Implement SPCI_RUN directly rather than via encoding run_return.

Bug: 141469322
Change-Id: I34d53898acb1e09b37dd9ff314a8883e32abda98
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 024973e..1854e9f 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -28,10 +28,6 @@
 spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id,
 				     const struct vcpu *current);
 void api_regs_state_saved(struct vcpu *vcpu);
-struct hf_vcpu_run_return api_vcpu_run(spci_vm_id_t vm_id,
-				       spci_vcpu_index_t vcpu_idx,
-				       const struct vcpu *current,
-				       struct vcpu **next);
 int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv, struct vcpu *current,
 			 struct vcpu **next);
 int64_t api_mailbox_clear(struct vcpu *current, struct vcpu **next);
@@ -67,3 +63,5 @@
 	struct spci_memory_region *memory_region, uint32_t memory_to_attributes,
 	enum spci_memory_share share);
 struct spci_value api_spci_features(uint32_t function_id);
+struct spci_value api_spci_run(spci_vm_id_t vm_id, spci_vcpu_index_t vcpu_idx,
+			       const struct vcpu *current, struct vcpu **next);
diff --git a/inc/vmapi/hf/abi.h b/inc/vmapi/hf/abi.h
index 6936dcc..ea23c31 100644
--- a/inc/vmapi/hf/abi.h
+++ b/inc/vmapi/hf/abi.h
@@ -146,67 +146,6 @@
 	HF_MEMORY_SHARE,
 };
 
-/**
- * Encode an hf_vcpu_run_return struct in the SPCI ABI.
- */
-static inline struct spci_value hf_vcpu_run_return_encode(
-	struct hf_vcpu_run_return res, spci_vm_id_t vm_id,
-	spci_vcpu_index_t vcpu_index)
-{
-	struct spci_value ret = {0};
-
-	switch (res.code) {
-	case HF_VCPU_RUN_PREEMPTED:
-		ret.func = SPCI_INTERRUPT_32;
-		ret.arg1 = (uint32_t)vm_id << 16 | vcpu_index;
-		break;
-	case HF_VCPU_RUN_YIELD:
-		ret.func = SPCI_YIELD_32;
-		ret.arg1 = (uint32_t)vcpu_index << 16 | vm_id;
-		break;
-	case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
-		ret.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT;
-		ret.arg1 = (uint32_t)vcpu_index << 16 | vm_id;
-		if (res.sleep.ns == HF_SLEEP_INDEFINITE) {
-			ret.arg2 = SPCI_SLEEP_INDEFINITE;
-		} else if (res.sleep.ns == SPCI_SLEEP_INDEFINITE) {
-			ret.arg2 = 1;
-		} else {
-			ret.arg2 = res.sleep.ns;
-		}
-		break;
-	case HF_VCPU_RUN_WAIT_FOR_MESSAGE:
-		ret.func = SPCI_MSG_WAIT_32;
-		ret.arg1 = (uint32_t)vcpu_index << 16 | vm_id;
-		if (res.sleep.ns == HF_SLEEP_INDEFINITE) {
-			ret.arg2 = SPCI_SLEEP_INDEFINITE;
-		} else if (res.sleep.ns == SPCI_SLEEP_INDEFINITE) {
-			ret.arg2 = 1;
-		} else {
-			ret.arg2 = res.sleep.ns;
-		}
-		break;
-	case HF_VCPU_RUN_WAKE_UP:
-		ret.func = HF_SPCI_RUN_WAKE_UP;
-		ret.arg1 = (uint32_t)res.wake_up.vcpu << 16 | res.wake_up.vm_id;
-		break;
-	case HF_VCPU_RUN_MESSAGE:
-		ret.func = SPCI_MSG_SEND_32;
-		ret.arg1 = (uint32_t)vm_id << 16 | res.message.vm_id;
-		ret.arg3 = res.message.size;
-		break;
-	case HF_VCPU_RUN_NOTIFY_WAITERS:
-		ret.func = SPCI_RX_RELEASE_32;
-		break;
-	case HF_VCPU_RUN_ABORTED:
-		ret.func = SPCI_ERROR_32;
-		ret.arg2 = SPCI_ABORTED;
-		break;
-	}
-
-	return ret;
-}
-
 static spci_vm_id_t wake_up_get_vm_id(struct spci_value v)
 {
 	return v.arg1 & 0xffff;
@@ -263,7 +202,16 @@
 		ret.code = HF_VCPU_RUN_NOTIFY_WAITERS;
 		break;
 	case SPCI_ERROR_32:
-		ret.code = HF_VCPU_RUN_ABORTED;
+		if (res.arg2 == SPCI_ABORTED) {
+			ret.code = HF_VCPU_RUN_ABORTED;
+		} else {
+			/*
+			 * Treat other errors as suspending the vCPU
+			 * indefinitely, to maintain existing behaviour.
+			 */
+			ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
+			ret.sleep.ns = HF_SLEEP_INDEFINITE;
+		}
 		break;
 	default:
 		ret.code = HF_VCPU_RUN_ABORTED;
diff --git a/src/abi_test.cc b/src/abi_test.cc
index fb30f57..a698840 100644
--- a/src/abi_test.cc
+++ b/src/abi_test.cc
@@ -27,18 +27,6 @@
 using ::testing::Eq;
 
 /**
- * Simulate an uninitialized hf_vcpu_run_return so it can be detected if any
- * uninitialized fields make their way into the encoded form which would
- * indicate a data leak.
- */
-struct hf_vcpu_run_return dirty_vcpu_run_return()
-{
-	struct hf_vcpu_run_return res;
-	memset(&res, 0xc5, sizeof(res));
-	return res;
-}
-
-/**
  * Simulate an uninitialized spci_value so it can be detected if any
  * uninitialized fields make their way into the encoded form which would
  * indicate a data leak.
@@ -50,30 +38,6 @@
 	return res;
 }
 
-bool operator==(const spci_value a, const spci_value b)
-{
-	return a.func == b.func && a.arg1 == b.arg1 && a.arg2 == b.arg2 &&
-	       a.arg3 == b.arg3 && a.arg4 == b.arg4 && a.arg5 == b.arg5 &&
-	       a.arg6 == b.arg6 && a.arg7 == b.arg7;
-}
-
-MATCHER_P(SpciEq, expected, "")
-{
-	return arg == expected;
-}
-
-/**
- * Encode a preempted response without leaking.
- */
-TEST(abi, hf_vcpu_run_return_encode_preempted)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_PREEMPTED;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){.func = SPCI_INTERRUPT_32,
-					       .arg1 = 0x11112222}));
-}
-
 /**
  * Decode a preempted response ignoring the irrelevant bits.
  */
@@ -86,18 +50,6 @@
 }
 
 /**
- * Encode a yield response without leaking.
- */
-TEST(abi, hf_vcpu_run_return_encode_yield)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_YIELD;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){.func = SPCI_YIELD_32,
-					       .arg1 = 0x22221111}));
-}
-
-/**
  * Decode a yield response ignoring the irrelevant bits.
  */
 TEST(abi, hf_vcpu_run_return_decode_yield)
@@ -109,53 +61,6 @@
 }
 
 /**
- * Encode wait-for-interrupt response without leaking.
- */
-TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
-	res.sleep.ns = HF_SLEEP_INDEFINITE;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){
-			    .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
-			    .arg1 = 0x22221111,
-			    .arg2 = SPCI_SLEEP_INDEFINITE}));
-}
-
-/**
- * Encoding wait-for-interrupt response with large sleep duration won't drop the
- * top octet.
- */
-TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt_sleep_long)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
-	res.sleep.ns = 0xcc22888888888888;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){
-			    .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
-			    .arg1 = 0x22221111,
-			    .arg2 = 0xcc22888888888888}));
-}
-
-/**
- * Encoding wait-for-interrupt response with zero sleep duration will become
- * non-zero for SPCI compatibility.
- */
-TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt_sleep_zero)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
-	res.sleep.ns = 0;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){
-			    .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
-			    .arg1 = 0x22221111,
-			    .arg2 = 1}));
-}
-
-/**
  * Decode a wait-for-interrupt response ignoring the irrelevant bits.
  */
 TEST(abi, hf_vcpu_run_return_decode_wait_for_interrupt)
@@ -182,50 +87,6 @@
 }
 
 /**
- * Encode wait-for-message response without leaking.
- */
-TEST(abi, hf_vcpu_run_return_encode_wait_for_message)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE;
-	res.sleep.ns = HF_SLEEP_INDEFINITE;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){.func = SPCI_MSG_WAIT_32,
-					       .arg1 = 0x22221111,
-					       .arg2 = SPCI_SLEEP_INDEFINITE}));
-}
-
-/**
- * Encoding wait-for-message response with large sleep duration won't drop
- * the top octet.
- */
-TEST(abi, hf_vcpu_run_return_encode_wait_for_message_sleep_long)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE;
-	res.sleep.ns = 0xaa99777777777777;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){.func = SPCI_MSG_WAIT_32,
-					       .arg1 = 0x22221111,
-					       .arg2 = 0xaa99777777777777}));
-}
-
-/**
- * Encoding wait-for-message response with zero sleep duration will become
- * non-zero for SPCI compatibility.
- */
-TEST(abi, hf_vcpu_run_return_encode_wait_for_message_sleep_zero)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE;
-	res.sleep.ns = 0;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){.func = SPCI_MSG_WAIT_32,
-					       .arg1 = 0x22221111,
-					       .arg2 = 1}));
-}
-
-/**
  * Decode a wait-for-message response ignoring the irrelevant bits.
  */
 TEST(abi, hf_vcpu_run_return_decode_wait_for_message)
@@ -252,20 +113,6 @@
 }
 
 /**
- * Encode wake up response without leaking.
- */
-TEST(abi, hf_vcpu_run_return_encode_wake_up)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_WAKE_UP;
-	res.wake_up.vm_id = 0x1234;
-	res.wake_up.vcpu = 0xabcd;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){.func = HF_SPCI_RUN_WAKE_UP,
-					       .arg1 = 0xabcd1234}));
-}
-
-/**
  * Decode a wake up response ignoring the irrelevant bits.
  */
 TEST(abi, hf_vcpu_run_return_decode_wake_up)
@@ -280,21 +127,6 @@
 }
 
 /**
- * Encode message response without leaking.
- */
-TEST(abi, hf_vcpu_run_return_encode_message)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_MESSAGE;
-	res.message.vm_id = 0xf007;
-	res.message.size = 0xcafe1971;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){.func = SPCI_MSG_SEND_32,
-					       .arg1 = 0x1111f007,
-					       .arg3 = 0xcafe1971}));
-}
-
-/**
  * Decode a wake up response ignoring the irrelevant bits.
  */
 TEST(abi, hf_vcpu_run_return_decode_message)
@@ -310,17 +142,6 @@
 }
 
 /**
- * Encode a 'notify waiters' response without leaking.
- */
-TEST(abi, hf_vcpu_run_return_encode_notify_waiters)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_NOTIFY_WAITERS;
-	EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		    SpciEq((struct spci_value){.func = SPCI_RX_RELEASE_32}));
-}
-
-/**
  * Decode a 'notify waiters' response ignoring the irrelevant bits.
  */
 TEST(abi, hf_vcpu_run_return_decode_notify_waiters)
@@ -332,19 +153,6 @@
 }
 
 /**
- * Encode an aborted response without leaking.
- */
-TEST(abi, hf_vcpu_run_return_encode_aborted)
-{
-	struct hf_vcpu_run_return res = dirty_vcpu_run_return();
-	res.code = HF_VCPU_RUN_ABORTED;
-	EXPECT_THAT(
-		hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
-		SpciEq((struct spci_value){.func = SPCI_ERROR_32,
-					   .arg2 = (uint64_t)SPCI_ABORTED}));
-}
-
-/**
  * Decode an aborted response ignoring the irrelevant bits.
  */
 TEST(abi, hf_vcpu_run_return_decode_aborted)
diff --git a/src/api.c b/src/api.c
index f20dc19..6f118a2 100644
--- a/src/api.c
+++ b/src/api.c
@@ -65,7 +65,7 @@
  * to cause SPCI_RUN to return and the primary VM to regain control of the CPU.
  */
 static struct vcpu *api_switch_to_primary(struct vcpu *current,
-					  struct hf_vcpu_run_return primary_ret,
+					  struct spci_value primary_ret,
 					  enum vcpu_state secondary_state)
 {
 	struct vm *primary = vm_find(HF_PRIMARY_VM_ID);
@@ -75,14 +75,32 @@
 	 * If the secondary is blocked but has a timer running, sleep until the
 	 * timer fires rather than indefinitely.
 	 */
-	switch (primary_ret.code) {
-	case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
-	case HF_VCPU_RUN_WAIT_FOR_MESSAGE:
-		primary_ret.sleep.ns =
-			arch_timer_enabled_current()
-				? arch_timer_remaining_ns_current()
-				: HF_SLEEP_INDEFINITE;
+	switch (primary_ret.func) {
+	case HF_SPCI_RUN_WAIT_FOR_INTERRUPT:
+	case SPCI_MSG_WAIT_32: {
+		if (arch_timer_enabled_current()) {
+			uint64_t remaining_ns =
+				arch_timer_remaining_ns_current();
+
+			if (remaining_ns == 0) {
+				/*
+				 * Timer is pending, so the current vCPU should
+				 * be run again right away.
+				 */
+				primary_ret.func = SPCI_INTERRUPT_32;
+				/*
+				 * primary_ret.arg1 should already be set to the
+				 * current VM ID and vCPU ID.
+				 */
+				primary_ret.arg2 = 0;
+			} else {
+				primary_ret.arg2 = remaining_ns;
+			}
+		} else {
+			primary_ret.arg2 = SPCI_SLEEP_INDEFINITE;
+		}
 		break;
+	}
 
 	default:
 		/* Do nothing. */
@@ -90,9 +108,7 @@
 	}
 
 	/* Set the return value for the primary VM's call to HF_VCPU_RUN. */
-	arch_regs_set_retval(&next->regs, hf_vcpu_run_return_encode(
-						  primary_ret, current->vm->id,
-						  vcpu_index(current)));
+	arch_regs_set_retval(&next->regs, primary_ret);
 
 	/* Mark the current vcpu as waiting. */
 	sl_lock(&current->lock);
@@ -107,8 +123,9 @@
  */
 struct vcpu *api_preempt(struct vcpu *current)
 {
-	struct hf_vcpu_run_return ret = {
-		.code = HF_VCPU_RUN_PREEMPTED,
+	struct spci_value ret = {
+		.func = SPCI_INTERRUPT_32,
+		.arg1 = ((uint32_t)current->vm->id << 16) | vcpu_index(current),
 	};
 
 	return api_switch_to_primary(current, ret, VCPU_STATE_READY);
@@ -120,8 +137,9 @@
  */
 struct vcpu *api_wait_for_interrupt(struct vcpu *current)
 {
-	struct hf_vcpu_run_return ret = {
-		.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
+	struct spci_value ret = {
+		.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
+		.arg1 = ((uint32_t)vcpu_index(current) << 16) | current->vm->id,
 	};
 
 	return api_switch_to_primary(current, ret,
@@ -133,8 +151,9 @@
  */
 struct vcpu *api_vcpu_off(struct vcpu *current)
 {
-	struct hf_vcpu_run_return ret = {
-		.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
+	struct spci_value ret = {
+		.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
+		.arg1 = ((uint32_t)vcpu_index(current) << 16) | current->vm->id,
 	};
 
 	/*
@@ -153,8 +172,9 @@
  */
 void api_yield(struct vcpu *current, struct vcpu **next)
 {
-	struct hf_vcpu_run_return primary_ret = {
-		.code = HF_VCPU_RUN_YIELD,
+	struct spci_value primary_ret = {
+		.func = SPCI_YIELD_32,
+		.arg1 = ((uint32_t)vcpu_index(current) << 16) | current->vm->id,
 	};
 
 	if (current->vm->id == HF_PRIMARY_VM_ID) {
@@ -171,10 +191,10 @@
  */
 struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
 {
-	struct hf_vcpu_run_return ret = {
-		.code = HF_VCPU_RUN_WAKE_UP,
-		.wake_up.vm_id = target_vcpu->vm->id,
-		.wake_up.vcpu = vcpu_index(target_vcpu),
+	struct spci_value ret = {
+		.func = HF_SPCI_RUN_WAKE_UP,
+		.arg1 = ((uint32_t)vcpu_index(target_vcpu) << 16) |
+			target_vcpu->vm->id,
 	};
 	return api_switch_to_primary(current, ret, VCPU_STATE_READY);
 }
@@ -184,9 +204,7 @@
  */
 struct vcpu *api_abort(struct vcpu *current)
 {
-	struct hf_vcpu_run_return ret = {
-		.code = HF_VCPU_RUN_ABORTED,
-	};
+	struct spci_value ret = spci_error(SPCI_ABORTED);
 
 	dlog("Aborting VM %u vCPU %u\n", current->vm->id, vcpu_index(current));
 
@@ -361,7 +379,7 @@
  * value needs to be forced onto the vCPU.
  */
 static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
-				 struct hf_vcpu_run_return *run_ret)
+				 struct spci_value *run_ret)
 {
 	bool need_vm_lock;
 	bool ret;
@@ -396,13 +414,12 @@
 			/*
 			 * vCPU is running on another pCPU.
 			 *
-			 * It's ok not to return the sleep duration here because
-			 * the other physical CPU that is currently running this
-			 * vCPU will return the sleep duration if needed. The
-			 * default return value is
-			 * HF_VCPU_RUN_WAIT_FOR_INTERRUPT, so no need to set it
-			 * explicitly.
+			 * It's okay not to return the sleep duration here
+			 * because the other physical CPU that is currently
+			 * running this vCPU will return the sleep duration if
+			 * needed.
 			 */
+			*run_ret = spci_error(SPCI_BUSY);
 			ret = false;
 			goto out;
 		}
@@ -458,12 +475,18 @@
 		 * the primary which called vcpu_run.
 		 */
 		if (arch_timer_enabled(&vcpu->regs)) {
-			run_ret->code =
+			run_ret->func =
 				vcpu->state == VCPU_STATE_BLOCKED_MAILBOX
-					? HF_VCPU_RUN_WAIT_FOR_MESSAGE
-					: HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
-			run_ret->sleep.ns =
-				arch_timer_remaining_ns(&vcpu->regs);
+					? SPCI_MSG_WAIT_32
+					: HF_SPCI_RUN_WAIT_FOR_INTERRUPT;
+			run_ret->arg1 = ((uint32_t)vcpu_index(vcpu) << 16) |
+					vcpu->vm->id;
+			/*
+			 * arch_timer_remaining_ns should never return 0,
+			 * because if it would then arch_timer_pending would
+			 * have returned true before and so we won't get here.
+			 */
+			run_ret->arg2 = arch_timer_remaining_ns(&vcpu->regs);
 		}
 
 		ret = false;
@@ -495,23 +518,16 @@
 	return ret;
 }
 
-/**
- * Runs the given vcpu of the given vm.
- */
-struct hf_vcpu_run_return api_vcpu_run(spci_vm_id_t vm_id,
-				       spci_vcpu_index_t vcpu_idx,
-				       const struct vcpu *current,
-				       struct vcpu **next)
+struct spci_value api_spci_run(spci_vm_id_t vm_id, spci_vcpu_index_t vcpu_idx,
+			       const struct vcpu *current, struct vcpu **next)
 {
 	struct vm *vm;
 	struct vcpu *vcpu;
-	struct hf_vcpu_run_return ret = {
-		.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
-		.sleep.ns = HF_SLEEP_INDEFINITE,
-	};
+	struct spci_value ret = spci_error(SPCI_INVALID_PARAMETERS);
 
 	/* Only the primary VM can switch vcpus. */
 	if (current->vm->id != HF_PRIMARY_VM_ID) {
+		ret.arg2 = SPCI_DENIED;
 		goto out;
 	}
 
@@ -565,7 +581,9 @@
 	 * Set a placeholder return code to the scheduler. This will be
 	 * overwritten when the switch back to the primary occurs.
 	 */
-	ret.code = HF_VCPU_RUN_PREEMPTED;
+	ret.func = SPCI_INTERRUPT_32;
+	ret.arg1 = ((uint32_t)vm_id << 16) | vcpu_idx;
+	ret.arg2 = 0;
 
 out:
 	return ret;
@@ -589,8 +607,8 @@
 				 struct vcpu *current, struct vcpu **next)
 {
 	struct vm *vm = locked_vm.vm;
-	struct hf_vcpu_run_return ret = {
-		.code = HF_VCPU_RUN_NOTIFY_WAITERS,
+	struct spci_value ret = {
+		.func = SPCI_RX_RELEASE_32,
 	};
 
 	if (list_empty(&vm->mailbox.waiter_list)) {
@@ -876,19 +894,18 @@
 static void deliver_msg(struct vm_locked to, struct vm_locked from,
 			uint32_t size, struct vcpu *current, struct vcpu **next)
 {
-	struct hf_vcpu_run_return primary_ret = {
-		.code = HF_VCPU_RUN_MESSAGE,
+	struct spci_value primary_ret = {
+		.func = SPCI_MSG_SEND_32,
+		.arg1 = ((uint32_t)from.vm->id << 16) | to.vm->id,
 	};
 
-	primary_ret.message.vm_id = to.vm->id;
-
 	/* Messages for the primary VM are delivered directly. */
 	if (to.vm->id == HF_PRIMARY_VM_ID) {
 		/*
 		 * Only tell the primary VM the size if the message is for it,
 		 * to avoid leaking data about messages for other VMs.
 		 */
-		primary_ret.message.size = size;
+		primary_ret.arg3 = size;
 
 		to.vm->mailbox.state = MAILBOX_STATE_READ;
 		*next = api_switch_to_primary(current, primary_ret,
@@ -1114,8 +1131,9 @@
 
 	/* Switch back to primary vm to block. */
 	{
-		struct hf_vcpu_run_return run_return = {
-			.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE,
+		struct spci_value run_return = {
+			.func = SPCI_MSG_WAIT_32,
+			.arg1 = ((uint32_t)vcpu_index(current) << 16) | vm->id,
 		};
 
 		*next = api_switch_to_primary(current, run_return,
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 885a383..f976eee 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -344,15 +344,11 @@
 	case SPCI_MSG_POLL_32:
 		*args = api_spci_msg_recv(false, current(), next);
 		return true;
-	case SPCI_RUN_32: {
-		struct vcpu *vcpu = current();
-		*args = hf_vcpu_run_return_encode(
-			api_vcpu_run((args->arg1 >> 16) & 0xffff,
-				     args->arg1 & 0xffff, vcpu, next),
-			vcpu->vm->id, vcpu_index(vcpu));
+	case SPCI_RUN_32:
+		*args = api_spci_run((args->arg1 >> 16) & 0xffff,
+				     args->arg1 & 0xffff, current(), next);
 		return true;
 	}
-	}
 
 	return false;
 }