Encode run return values in an SPCI compatible way.
Bug: 141469322
Change-Id: I269a7797b72e1e7d36c153cd41050584db8415b3
diff --git a/inc/vmapi/hf/abi.h b/inc/vmapi/hf/abi.h
index 14c3fc6..6936dcc 100644
--- a/inc/vmapi/hf/abi.h
+++ b/inc/vmapi/hf/abi.h
@@ -19,6 +19,30 @@
#include "hf/spci.h"
#include "hf/types.h"
+/* Keep macro alignment */
+/* clang-format off */
+
+/* TODO: Define constants below according to spec. */
+#define HF_VM_GET_COUNT 0xff01
+#define HF_VCPU_GET_COUNT 0xff02
+#define HF_VM_CONFIGURE 0xff03
+#define HF_MAILBOX_CLEAR 0xff04
+#define HF_MAILBOX_WRITABLE_GET 0xff05
+#define HF_MAILBOX_WAITER_GET 0xff06
+#define HF_INTERRUPT_ENABLE 0xff07
+#define HF_INTERRUPT_GET 0xff08
+#define HF_INTERRUPT_INJECT 0xff09
+#define HF_SHARE_MEMORY 0xff0a
+
+/* Custom SPCI-like calls returned from SPCI_RUN. */
+#define HF_SPCI_RUN_WAIT_FOR_INTERRUPT 0xff0b
+#define HF_SPCI_RUN_WAKE_UP 0xff0c
+
+/* This matches what Trusty and its ATF module currently use. */
+#define HF_DEBUG_LOG 0xbd000000
+
+/* clang-format on */
+
enum hf_vcpu_run_code {
/**
* The vCPU has been preempted but still has work to do. If the
@@ -123,56 +147,126 @@
};
/**
- * Encode an hf_vcpu_run_return struct in the 64-bit packing ABI.
+ * Encode an hf_vcpu_run_return struct in the SPCI ABI.
*/
-static inline uint64_t hf_vcpu_run_return_encode(struct hf_vcpu_run_return res)
+static inline struct spci_value hf_vcpu_run_return_encode(
+ struct hf_vcpu_run_return res, spci_vm_id_t vm_id,
+ spci_vcpu_index_t vcpu_index)
{
- uint64_t ret = res.code & 0xff;
+ struct spci_value ret = {0};
switch (res.code) {
- case HF_VCPU_RUN_WAKE_UP:
- ret |= (uint64_t)res.wake_up.vm_id << 32;
- ret |= (uint64_t)res.wake_up.vcpu << 16;
+ case HF_VCPU_RUN_PREEMPTED:
+ ret.func = SPCI_INTERRUPT_32;
+ ret.arg1 = (uint32_t)vm_id << 16 | vcpu_index;
break;
- case HF_VCPU_RUN_MESSAGE:
- ret |= (uint64_t)res.message.size << 32;
- ret |= res.message.vm_id << 8;
+ case HF_VCPU_RUN_YIELD:
+ ret.func = SPCI_YIELD_32;
+ ret.arg1 = (uint32_t)vcpu_index << 16 | vm_id;
break;
case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
- case HF_VCPU_RUN_WAIT_FOR_MESSAGE:
- ret |= res.sleep.ns << 8;
+ ret.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT;
+ ret.arg1 = (uint32_t)vcpu_index << 16 | vm_id;
+ if (res.sleep.ns == HF_SLEEP_INDEFINITE) {
+ ret.arg2 = SPCI_SLEEP_INDEFINITE;
+ } else if (res.sleep.ns == SPCI_SLEEP_INDEFINITE) {
+ ret.arg2 = 1;
+ } else {
+ ret.arg2 = res.sleep.ns;
+ }
break;
- default:
+ case HF_VCPU_RUN_WAIT_FOR_MESSAGE:
+ ret.func = SPCI_MSG_WAIT_32;
+ ret.arg1 = (uint32_t)vcpu_index << 16 | vm_id;
+ if (res.sleep.ns == HF_SLEEP_INDEFINITE) {
+ ret.arg2 = SPCI_SLEEP_INDEFINITE;
+ } else if (res.sleep.ns == SPCI_SLEEP_INDEFINITE) {
+ ret.arg2 = 1;
+ } else {
+ ret.arg2 = res.sleep.ns;
+ }
+ break;
+ case HF_VCPU_RUN_WAKE_UP:
+ ret.func = HF_SPCI_RUN_WAKE_UP;
+ ret.arg1 = (uint32_t)res.wake_up.vcpu << 16 | res.wake_up.vm_id;
+ break;
+ case HF_VCPU_RUN_MESSAGE:
+ ret.func = SPCI_MSG_SEND_32;
+ ret.arg1 = (uint32_t)vm_id << 16 | res.message.vm_id;
+ ret.arg3 = res.message.size;
+ break;
+ case HF_VCPU_RUN_NOTIFY_WAITERS:
+ ret.func = SPCI_RX_RELEASE_32;
+ break;
+ case HF_VCPU_RUN_ABORTED:
+ ret.func = SPCI_ERROR_32;
+ ret.arg2 = SPCI_ABORTED;
break;
}
return ret;
}
+static spci_vm_id_t wake_up_get_vm_id(struct spci_value v)
+{
+ return v.arg1 & 0xffff;
+}
+
+static spci_vcpu_index_t wake_up_get_vcpu(struct spci_value v)
+{
+ return (v.arg1 >> 16) & 0xffff;
+}
+
/**
* Decode an hf_vcpu_run_return struct from the 64-bit packing ABI.
*/
-static inline struct hf_vcpu_run_return hf_vcpu_run_return_decode(uint64_t res)
+static inline struct hf_vcpu_run_return hf_vcpu_run_return_decode(
+ struct spci_value res)
{
- struct hf_vcpu_run_return ret = {
- .code = (enum hf_vcpu_run_code)(res & 0xff),
- };
+ struct hf_vcpu_run_return ret = {.code = HF_VCPU_RUN_PREEMPTED};
/* Some codes include more data. */
- switch (ret.code) {
- case HF_VCPU_RUN_WAKE_UP:
- ret.wake_up.vm_id = res >> 32;
- ret.wake_up.vcpu = (res >> 16) & 0xffff;
+ switch (res.func) {
+ case SPCI_INTERRUPT_32:
+ ret.code = HF_VCPU_RUN_PREEMPTED;
break;
- case HF_VCPU_RUN_MESSAGE:
- ret.message.size = res >> 32;
- ret.message.vm_id = (res >> 8) & 0xffff;
+ case SPCI_YIELD_32:
+ ret.code = HF_VCPU_RUN_YIELD;
break;
- case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
- case HF_VCPU_RUN_WAIT_FOR_MESSAGE:
- ret.sleep.ns = res >> 8;
+ case HF_SPCI_RUN_WAIT_FOR_INTERRUPT:
+ ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
+ if (res.arg2 == SPCI_SLEEP_INDEFINITE) {
+ ret.sleep.ns = HF_SLEEP_INDEFINITE;
+ } else {
+ ret.sleep.ns = res.arg2;
+ }
+ break;
+ case SPCI_MSG_WAIT_32:
+ ret.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE;
+ if (res.arg2 == SPCI_SLEEP_INDEFINITE) {
+ ret.sleep.ns = HF_SLEEP_INDEFINITE;
+ } else {
+ ret.sleep.ns = res.arg2;
+ }
+ break;
+ case HF_SPCI_RUN_WAKE_UP:
+ ret.code = HF_VCPU_RUN_WAKE_UP;
+ ret.wake_up.vcpu = wake_up_get_vcpu(res);
+ ret.wake_up.vm_id = wake_up_get_vm_id(res);
+ break;
+ case SPCI_MSG_SEND_32:
+ ret.code = HF_VCPU_RUN_MESSAGE;
+ ret.message.vm_id = res.arg1 & 0xffff;
+ ret.message.size = res.arg3;
+ break;
+ case SPCI_RX_RELEASE_32:
+ ret.code = HF_VCPU_RUN_NOTIFY_WAITERS;
+ break;
+ case SPCI_ERROR_32:
+ ret.code = HF_VCPU_RUN_ABORTED;
break;
default:
+ ret.code = HF_VCPU_RUN_ABORTED;
break;
}
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index 52396aa..dd39c26 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -20,27 +20,6 @@
#include "hf/spci.h"
#include "hf/types.h"
-/* Keep macro alignment */
-/* clang-format off */
-
-/* TODO: Define constants below according to spec. */
-#define HF_VM_GET_COUNT 0xff01
-#define HF_VCPU_GET_COUNT 0xff02
-#define HF_VCPU_RUN 0xff03
-#define HF_VM_CONFIGURE 0xff05
-#define HF_MAILBOX_CLEAR 0xff08
-#define HF_MAILBOX_WRITABLE_GET 0xff09
-#define HF_MAILBOX_WAITER_GET 0xff0a
-#define HF_INTERRUPT_ENABLE 0xff0b
-#define HF_INTERRUPT_GET 0xff0c
-#define HF_INTERRUPT_INJECT 0xff0d
-#define HF_SHARE_MEMORY 0xff0e
-
-/* This matches what Trusty and its ATF module currently use. */
-#define HF_DEBUG_LOG 0xbd000000
-
-/* clang-format on */
-
/**
* This function must be implemented to trigger the architecture specific
* mechanism to call to the hypervisor.
@@ -88,8 +67,8 @@
static inline struct hf_vcpu_run_return hf_vcpu_run(spci_vm_id_t vm_id,
spci_vcpu_index_t vcpu_idx)
{
- return hf_vcpu_run_return_decode(
- hf_call(HF_VCPU_RUN, vm_id, vcpu_idx, 0));
+ return hf_vcpu_run_return_decode(spci_call((struct spci_value){
+ .func = SPCI_RUN_32, (uint32_t)vm_id << 16 | vcpu_idx}));
}
/**
diff --git a/inc/vmapi/hf/spci.h b/inc/vmapi/hf/spci.h
index 8a58227..d5d55ff 100644
--- a/inc/vmapi/hf/spci.h
+++ b/inc/vmapi/hf/spci.h
@@ -52,6 +52,7 @@
#define SPCI_INTERRUPTED INT32_C(-5)
#define SPCI_DENIED INT32_C(-6)
#define SPCI_RETRY INT32_C(-7)
+#define SPCI_ABORTED INT32_C(-8)
/* Architected memory sharing message IDs. */
enum spci_memory_share {
@@ -70,6 +71,8 @@
#define SPCI_MSG_SEND_LEGACY_MEMORY 0x2
#define SPCI_MSG_SEND_LEGACY_MEMORY_MASK 0x2
+#define SPCI_SLEEP_INDEFINITE 0
+
/* The maximum length possible for a single message. */
#define SPCI_MSG_PAYLOAD_MAX HF_MAILBOX_SIZE
diff --git a/inc/vmapi/hf/types.h b/inc/vmapi/hf/types.h
index 687dafa..1f0d5d4 100644
--- a/inc/vmapi/hf/types.h
+++ b/inc/vmapi/hf/types.h
@@ -51,7 +51,7 @@
#define HF_PRIMARY_VM_ID (HF_VM_ID_OFFSET + HF_PRIMARY_VM_INDEX)
/** Sleep value for an indefinite period of time. */
-#define HF_SLEEP_INDEFINITE 0xffffffffffffff
+#define HF_SLEEP_INDEFINITE 0xffffffffffffffff
/** The amount of data that can be sent to a mailbox. */
#define HF_MAILBOX_SIZE 4096
diff --git a/src/abi_test.cc b/src/abi_test.cc
index 0668ce0..fb30f57 100644
--- a/src/abi_test.cc
+++ b/src/abi_test.cc
@@ -16,6 +16,8 @@
extern "C" {
#include "vmapi/hf/abi.h"
+
+#include "vmapi/hf/spci.h"
}
#include <gmock/gmock.h>
@@ -37,13 +39,39 @@
}
/**
+ * Simulate an uninitialized spci_value so it can be detected if any
+ * uninitialized fields make their way into the encoded form which would
+ * indicate a data leak.
+ */
+struct spci_value dirty_spci_value()
+{
+ struct spci_value res;
+ memset(&res, 0xc5, sizeof(res));
+ return res;
+}
+
+bool operator==(const spci_value a, const spci_value b)
+{
+ return a.func == b.func && a.arg1 == b.arg1 && a.arg2 == b.arg2 &&
+ a.arg3 == b.arg3 && a.arg4 == b.arg4 && a.arg5 == b.arg5 &&
+ a.arg6 == b.arg6 && a.arg7 == b.arg7;
+}
+
+MATCHER_P(SpciEq, expected, "")
+{
+ return arg == expected;
+}
+
+/**
* Encode a preempted response without leaking.
*/
TEST(abi, hf_vcpu_run_return_encode_preempted)
{
struct hf_vcpu_run_return res = dirty_vcpu_run_return();
res.code = HF_VCPU_RUN_PREEMPTED;
- EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0));
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){.func = SPCI_INTERRUPT_32,
+ .arg1 = 0x11112222}));
}
/**
@@ -51,8 +79,9 @@
*/
TEST(abi, hf_vcpu_run_return_decode_preempted)
{
- struct hf_vcpu_run_return res =
- hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b00);
+ struct spci_value v = dirty_spci_value();
+ v.func = SPCI_INTERRUPT_32;
+ struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_PREEMPTED));
}
@@ -63,7 +92,9 @@
{
struct hf_vcpu_run_return res = dirty_vcpu_run_return();
res.code = HF_VCPU_RUN_YIELD;
- EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(1));
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){.func = SPCI_YIELD_32,
+ .arg1 = 0x22221111}));
}
/**
@@ -71,8 +102,9 @@
*/
TEST(abi, hf_vcpu_run_return_decode_yield)
{
- struct hf_vcpu_run_return res =
- hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b01);
+ struct spci_value v = dirty_spci_value();
+ v.func = SPCI_YIELD_32;
+ struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_YIELD));
}
@@ -84,19 +116,43 @@
struct hf_vcpu_run_return res = dirty_vcpu_run_return();
res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
res.sleep.ns = HF_SLEEP_INDEFINITE;
- EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xffffffffffffff02));
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){
+ .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
+ .arg1 = 0x22221111,
+ .arg2 = SPCI_SLEEP_INDEFINITE}));
}
/**
- * Encoding wait-for-interrupt response with too large sleep duration will drop
- * the top octet.
+ * Encoding wait-for-interrupt response with large sleep duration won't drop the
+ * top octet.
*/
-TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt_sleep_too_long)
+TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt_sleep_long)
{
struct hf_vcpu_run_return res = dirty_vcpu_run_return();
res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
res.sleep.ns = 0xcc22888888888888;
- EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x2288888888888802));
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){
+ .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
+ .arg1 = 0x22221111,
+ .arg2 = 0xcc22888888888888}));
+}
+
+/**
+ * Encoding wait-for-interrupt response with zero sleep duration will become
+ * non-zero for SPCI compatibility.
+ */
+TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt_sleep_zero)
+{
+ struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+ res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
+ res.sleep.ns = 0;
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){
+ .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
+ .arg1 = 0x22221111,
+ .arg2 = 1}));
}
/**
@@ -104,13 +160,28 @@
*/
TEST(abi, hf_vcpu_run_return_decode_wait_for_interrupt)
{
- struct hf_vcpu_run_return res =
- hf_vcpu_run_return_decode(0x1234abcdbadb0102);
+ struct spci_value v = dirty_spci_value();
+ v.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT;
+ v.arg2 = 0x1234abcdbadb01;
+ struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_INTERRUPT));
EXPECT_THAT(res.sleep.ns, Eq(0x1234abcdbadb01));
}
/**
+ * Decode a wait-for-interrupt response waiting indefinitely.
+ */
+TEST(abi, hf_vcpu_run_return_decode_wait_for_interrupt_indefinite)
+{
+ struct spci_value v = dirty_spci_value();
+ v.func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT;
+ v.arg2 = SPCI_SLEEP_INDEFINITE;
+ struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
+ EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_INTERRUPT));
+ EXPECT_THAT(res.sleep.ns, Eq(HF_SLEEP_INDEFINITE));
+}
+
+/**
* Encode wait-for-message response without leaking.
*/
TEST(abi, hf_vcpu_run_return_encode_wait_for_message)
@@ -118,19 +189,40 @@
struct hf_vcpu_run_return res = dirty_vcpu_run_return();
res.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE;
res.sleep.ns = HF_SLEEP_INDEFINITE;
- EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xffffffffffffff03));
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){.func = SPCI_MSG_WAIT_32,
+ .arg1 = 0x22221111,
+ .arg2 = SPCI_SLEEP_INDEFINITE}));
}
/**
- * Encoding wait-for-message response with too large sleep duration will drop
+ * Encoding wait-for-message response with large sleep duration won't drop
* the top octet.
*/
-TEST(abi, hf_vcpu_run_return_encode_wait_for_message_sleep_too_long)
+TEST(abi, hf_vcpu_run_return_encode_wait_for_message_sleep_long)
{
struct hf_vcpu_run_return res = dirty_vcpu_run_return();
res.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE;
res.sleep.ns = 0xaa99777777777777;
- EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x9977777777777703));
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){.func = SPCI_MSG_WAIT_32,
+ .arg1 = 0x22221111,
+ .arg2 = 0xaa99777777777777}));
+}
+
+/**
+ * Encoding wait-for-message response with zero sleep duration will become
+ * non-zero for SPCI compatibility.
+ */
+TEST(abi, hf_vcpu_run_return_encode_wait_for_message_sleep_zero)
+{
+ struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+ res.code = HF_VCPU_RUN_WAIT_FOR_MESSAGE;
+ res.sleep.ns = 0;
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){.func = SPCI_MSG_WAIT_32,
+ .arg1 = 0x22221111,
+ .arg2 = 1}));
}
/**
@@ -138,13 +230,28 @@
*/
TEST(abi, hf_vcpu_run_return_decode_wait_for_message)
{
- struct hf_vcpu_run_return res =
- hf_vcpu_run_return_decode(0x12347654badb0103);
+ struct spci_value v = dirty_spci_value();
+ v.func = SPCI_MSG_WAIT_32;
+ v.arg2 = 0x12347654badb01;
+ struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_MESSAGE));
EXPECT_THAT(res.sleep.ns, Eq(0x12347654badb01));
}
/**
+ * Decode a wait-for-message response waiting indefinitely.
+ */
+TEST(abi, hf_vcpu_run_return_decode_wait_for_message_indefinite)
+{
+ struct spci_value v = dirty_spci_value();
+ v.func = SPCI_MSG_WAIT_32;
+ v.arg2 = SPCI_SLEEP_INDEFINITE;
+ struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
+ EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_MESSAGE));
+ EXPECT_THAT(res.sleep.ns, Eq(HF_SLEEP_INDEFINITE));
+}
+
+/**
* Encode wake up response without leaking.
*/
TEST(abi, hf_vcpu_run_return_encode_wake_up)
@@ -153,7 +260,9 @@
res.code = HF_VCPU_RUN_WAKE_UP;
res.wake_up.vm_id = 0x1234;
res.wake_up.vcpu = 0xabcd;
- EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x1234abcd0004));
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){.func = HF_SPCI_RUN_WAKE_UP,
+ .arg1 = 0xabcd1234}));
}
/**
@@ -161,8 +270,10 @@
*/
TEST(abi, hf_vcpu_run_return_decode_wake_up)
{
- struct hf_vcpu_run_return res =
- hf_vcpu_run_return_decode(0xbeeff00daf04);
+ struct spci_value v = dirty_spci_value();
+ v.func = HF_SPCI_RUN_WAKE_UP;
+ v.arg1 = 0x88888888f00dbeef;
+ struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAKE_UP));
EXPECT_THAT(res.wake_up.vm_id, Eq(0xbeef));
EXPECT_THAT(res.wake_up.vcpu, Eq(0xf00d));
@@ -177,7 +288,10 @@
res.code = HF_VCPU_RUN_MESSAGE;
res.message.vm_id = 0xf007;
res.message.size = 0xcafe1971;
- EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xcafe197100f00705));
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){.func = SPCI_MSG_SEND_32,
+ .arg1 = 0x1111f007,
+ .arg3 = 0xcafe1971}));
}
/**
@@ -185,8 +299,11 @@
*/
TEST(abi, hf_vcpu_run_return_decode_message)
{
- struct hf_vcpu_run_return res =
- hf_vcpu_run_return_decode(0x1123581314916205);
+ struct spci_value v = dirty_spci_value();
+ v.func = SPCI_MSG_SEND_32;
+ v.arg1 = 0x1111222233339162;
+ v.arg3 = 0x11235813;
+ struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_MESSAGE));
EXPECT_THAT(res.message.vm_id, Eq(0x9162));
EXPECT_THAT(res.message.size, Eq(0x11235813));
@@ -199,7 +316,8 @@
{
struct hf_vcpu_run_return res = dirty_vcpu_run_return();
res.code = HF_VCPU_RUN_NOTIFY_WAITERS;
- EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(6));
+ EXPECT_THAT(hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){.func = SPCI_RX_RELEASE_32}));
}
/**
@@ -207,8 +325,9 @@
*/
TEST(abi, hf_vcpu_run_return_decode_notify_waiters)
{
- struct hf_vcpu_run_return res =
- hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b06);
+ struct spci_value v = dirty_spci_value();
+ v.func = SPCI_RX_RELEASE_32;
+ struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_NOTIFY_WAITERS));
}
@@ -219,7 +338,10 @@
{
struct hf_vcpu_run_return res = dirty_vcpu_run_return();
res.code = HF_VCPU_RUN_ABORTED;
- EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(7));
+ EXPECT_THAT(
+ hf_vcpu_run_return_encode(res, 0x1111, 0x2222),
+ SpciEq((struct spci_value){.func = SPCI_ERROR_32,
+ .arg2 = (uint64_t)SPCI_ABORTED}));
}
/**
@@ -227,8 +349,10 @@
*/
TEST(abi, hf_vcpu_run_return_decode_aborted)
{
- struct hf_vcpu_run_return res =
- hf_vcpu_run_return_decode(0x31dbac4810fbc507);
+ struct spci_value v = dirty_spci_value();
+ v.func = SPCI_ERROR_32;
+ v.arg2 = SPCI_ABORTED;
+ struct hf_vcpu_run_return res = hf_vcpu_run_return_decode(v);
EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_ABORTED));
}
diff --git a/src/api.c b/src/api.c
index d009538..f20dc19 100644
--- a/src/api.c
+++ b/src/api.c
@@ -62,8 +62,7 @@
* Switches the physical CPU back to the corresponding vcpu of the primary VM.
*
* This triggers the scheduling logic to run. Run in the context of secondary VM
- * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
- * cpu.
+ * to cause SPCI_RUN to return and the primary VM to regain control of the CPU.
*/
static struct vcpu *api_switch_to_primary(struct vcpu *current,
struct hf_vcpu_run_return primary_ret,
@@ -91,10 +90,9 @@
}
/* Set the return value for the primary VM's call to HF_VCPU_RUN. */
- arch_regs_set_retval(
- &next->regs,
- (struct spci_value){
- .func = hf_vcpu_run_return_encode(primary_ret)});
+ arch_regs_set_retval(&next->regs, hf_vcpu_run_return_encode(
+ primary_ret, current->vm->id,
+ vcpu_index(current)));
/* Mark the current vcpu as waiting. */
sl_lock(¤t->lock);
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 77b10e3..885a383 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -344,6 +344,14 @@
case SPCI_MSG_POLL_32:
*args = api_spci_msg_recv(false, current(), next);
return true;
+ case SPCI_RUN_32: {
+ struct vcpu *vcpu = current();
+ *args = hf_vcpu_run_return_encode(
+ api_vcpu_run((args->arg1 >> 16) & 0xffff,
+ args->arg1 & 0xffff, vcpu, next),
+ vcpu->vm->id, vcpu_index(vcpu));
+ return true;
+ }
}
return false;
@@ -441,11 +449,6 @@
vcpu->regs.r[0] = api_vcpu_get_count(args.arg1, vcpu);
break;
- case HF_VCPU_RUN:
- vcpu->regs.r[0] = hf_vcpu_run_return_encode(
- api_vcpu_run(args.arg1, args.arg2, vcpu, &next));
- break;
-
case HF_VM_CONFIGURE:
vcpu->regs.r[0] = api_vm_configure(
ipa_init(args.arg1), ipa_init(args.arg2), vcpu, &next);