Update spci_msg_send to new SPCI beta API.
This removes the header in the message buffers, as the header data is now
passed in the SPCI_MSG_SEND parameters.
Bug: 141469322
Change-Id: I3a61f5470fd95ba2d47df33f5c96466ba286af85
diff --git a/.vscode/settings.json b/.vscode/settings.json
index b85c5f5..88f2c18 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -15,7 +15,8 @@
"spinlock.h": "c",
"offsets.h": "c",
"barriers.h": "c",
- "spci.h": "c"
+ "spci.h": "c",
+ "spci_internal.h": "c"
},
"C_Cpp.errorSquiggles": "Disabled"
}
diff --git a/driver/linux b/driver/linux
index b331fa9..cafe017 160000
--- a/driver/linux
+++ b/driver/linux
@@ -1 +1 @@
-Subproject commit b331fa985153f8184530a4758ef289dd16923448
+Subproject commit cafe017d8863e5326c3f1c660ce71d5a82da7b19
diff --git a/inc/hf/api.h b/inc/hf/api.h
index ebc6b20..8c6328c 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -54,14 +54,15 @@
spci_vcpu_index_t target_vcpu_idx, uint32_t intid,
struct vcpu *current, struct vcpu **next);
-spci_return_t api_spci_msg_send(uint32_t attributes, struct vcpu *current,
- struct vcpu **next);
+struct spci_value api_spci_msg_send(spci_vm_id_t sender_vm_id,
+ spci_vm_id_t receiver_vm_id, uint32_t size,
+ uint32_t attributes, struct vcpu *current,
+ struct vcpu **next);
struct spci_value api_spci_msg_recv(bool block, struct vcpu *current,
struct vcpu **next);
void api_yield(struct vcpu *current, struct vcpu **next);
struct spci_value api_spci_version(void);
-spci_return_t api_spci_share_memory(struct vm_locked to_locked,
- struct vm_locked from_locked,
- struct spci_memory_region *memory_region,
- uint32_t memory_to_attributes,
- enum spci_memory_share share);
+struct spci_value api_spci_share_memory(
+ struct vm_locked to_locked, struct vm_locked from_locked,
+ struct spci_memory_region *memory_region, uint32_t memory_to_attributes,
+ enum spci_memory_share share);
diff --git a/inc/hf/spci_internal.h b/inc/hf/spci_internal.h
index 8f2a500..e046233 100644
--- a/inc/hf/spci_internal.h
+++ b/inc/hf/spci_internal.h
@@ -68,11 +68,11 @@
return (struct spci_value){.func = SPCI_ERROR_32, .arg1 = error_code};
}
-spci_return_t spci_msg_handle_architected_message(
+struct spci_value spci_msg_handle_architected_message(
struct vm_locked to_locked, struct vm_locked from_locked,
const struct spci_architected_message_header
*architected_message_replica,
- struct spci_message *from_msg_replica, struct spci_message *to_msg);
+ uint32_t size);
bool spci_msg_check_transition(struct vm *to, struct vm *from,
enum spci_memory_share share,
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index 7ea1e12..4762a2e 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -60,8 +60,17 @@
struct mailbox {
enum mailbox_state state;
- struct spci_message *recv;
- const struct spci_message *send;
+ void *recv;
+ const void *send;
+
+ /** The ID of the VM which sent the message currently in `recv`. */
+ spci_vm_id_t recv_sender;
+
+ /** The size of the message currently in `recv`. */
+ uint32_t recv_size;
+
+ /** The attributes of the message currently in `recv`. */
+ uint32_t recv_attributes;
/**
* List of wait_entry structs representing VMs that want to be notified
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index a920a58..b3db3fc 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -115,14 +115,26 @@
* If the recipient's receive buffer is busy, it can optionally register the
* caller to be notified when the recipient's receive buffer becomes available.
*
- * Returns SPCI_SUCCESS if the message is sent, an error code otherwise:
- * - INVALID_PARAMETER: one or more of the parameters do not conform.
+ * Attributes may include:
+ * - SPCI_MSG_SEND_NOTIFY, to notify the caller when it should try again.
+ * - SPCI_MSG_SEND_LEGACY_MEMORY, to send a legacy architected memory sharing
+ * message.
+ *
+ * Returns SPCI_SUCCESS if the message is sent, or an error code otherwise:
+ * - INVALID_PARAMETERS: one or more of the parameters do not conform.
* - BUSY: the message could not be delivered either because the mailbox
- * was full or the target VM does not yet exist.
+ * was full or the target VM is not yet set up.
*/
-static inline int64_t spci_msg_send(uint32_t attributes)
+static inline struct spci_value spci_msg_send(spci_vm_id_t sender_vm_id,
+ spci_vm_id_t target_vm_id,
+ uint32_t size,
+ uint32_t attributes)
{
- return hf_call(SPCI_MSG_SEND_32, attributes, 0, 0);
+ return spci_call((struct spci_value){
+ .func = SPCI_MSG_SEND_32,
+ .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
+ .arg3 = size,
+ .arg4 = attributes});
}
/**
diff --git a/inc/vmapi/hf/spci.h b/inc/vmapi/hf/spci.h
index 125380b..e878ffe 100644
--- a/inc/vmapi/hf/spci.h
+++ b/inc/vmapi/hf/spci.h
@@ -61,23 +61,16 @@
};
/* SPCI function specific constants. */
+#define SPCI_MSG_RECV_BLOCK 0x1
#define SPCI_MSG_RECV_BLOCK_MASK 0x1
-#define SPCI_MSG_SEND_NOTIFY_MASK 0x1
-
-#define SPCI_MESSAGE_ARCHITECTED 0x0
-#define SPCI_MESSAGE_IMPDEF 0x1
-#define SPCI_MESSAGE_IMPDEF_MASK 0x1
#define SPCI_MSG_SEND_NOTIFY 0x1
-#define SPCI_MSG_RECV_BLOCK 0x1
+#define SPCI_MSG_SEND_NOTIFY_MASK 0x1
+#define SPCI_MSG_SEND_LEGACY_MEMORY 0x2
+#define SPCI_MSG_SEND_LEGACY_MEMORY_MASK 0x2
/* The maximum length possible for a single message. */
-#define SPCI_MSG_PAYLOAD_MAX (HF_MAILBOX_SIZE - sizeof(struct spci_message))
-
-#define spci_get_lend_descriptor(message)\
- ((struct spci_memory_lend *)(((uint8_t *) message)\
- + sizeof(struct spci_message)\
- + sizeof(struct spci_architected_message_header)))
+#define SPCI_MSG_PAYLOAD_MAX HF_MAILBOX_SIZE
enum spci_lend_access {
SPCI_LEND_RO_NX,
@@ -208,42 +201,10 @@
return args.arg3;
}
-/** SPCI common message header. */
-struct spci_message {
- /*
- * TODO: version is part of SPCI alpha2 but will be
- * removed in the next spec revision hence we are not
- * including it in the header.
- */
-
- /**
- * flags[0]:
- * 0: Architected message payload;
- * 1: Implementation defined message payload.
- * flags[15:1] reserved (MBZ).
- */
- uint16_t flags;
-
- /*
- * TODO: Padding is present to ensure controlled offset
- * of the length field. SPCI spec must be updated
- * to reflect this (TBD).
- */
- uint16_t reserved_1;
-
- uint32_t length;
- spci_vm_id_t target_vm_id;
- spci_vm_id_t source_vm_id;
-
- /*
- * TODO: Padding is present to ensure that the field
- * payload alignment is 64B. SPCI spec must be updated
- * to reflect this.
- */
- uint32_t reserved_2;
-
- uint8_t payload[];
-};
+static inline uint32_t spci_msg_send_attributes(struct spci_value args)
+{
+ return args.arg4;
+}
struct spci_architected_message_header {
uint16_t type;
@@ -281,73 +242,23 @@
};
/* TODO: Move all the functions below this line to a support library. */
-/**
- * Fill all the fields, except for the flags, in the SPCI message common header.
- */
-static inline void spci_common_header_init(struct spci_message *message,
- uint32_t message_length,
- spci_vm_id_t target_vm_id,
- spci_vm_id_t source_vm_id)
+
+static inline struct spci_memory_lend *spci_get_lend_descriptor(void *message)
{
- message->length = message_length;
- message->target_vm_id = target_vm_id;
- message->source_vm_id = source_vm_id;
-
- /*
- * TODO: Reserved fields in the common message header will be
- * defined as MBZ in next SPCI spec updates.
- */
- message->reserved_1 = 0;
- message->reserved_2 = 0;
-}
-
-/**
- * Set the SPCI implementation defined message header fields.
- */
-static inline void spci_message_init(struct spci_message *message,
- uint32_t message_length,
- spci_vm_id_t target_vm_id,
- spci_vm_id_t source_vm_id)
-{
- spci_common_header_init(message, message_length, target_vm_id,
- source_vm_id);
-
- message->flags = SPCI_MESSAGE_IMPDEF;
-}
-
-/**
- * Obtain a pointer to the architected header in the spci_message.
- *
- * Note: the argument "message" has const qualifier. This qualifier
- * is meant to forbid changes in information enclosed in the
- * struct spci_message. The spci_architected_message_header, for which
- * a pointer is returned in this function, is not part of spci_message.
- * Its information is meant to be changed and hence the returned pointer
- * does not have const type qualifier.
- */
-static inline struct spci_architected_message_header *
-spci_get_architected_message_header(const struct spci_message *message)
-{
- return (struct spci_architected_message_header *)message->payload;
+ return (struct spci_memory_lend
+ *)((struct spci_architected_message_header *)message)
+ ->payload;
}
/**
* Helper method to fill in the information about the architected message.
*/
-static inline void spci_architected_message_init(struct spci_message *message,
- uint32_t message_length,
- spci_vm_id_t target_vm_id,
- spci_vm_id_t source_vm_id,
+static inline void spci_architected_message_init(void *message,
enum spci_memory_share type)
{
- struct spci_architected_message_header *architected_header;
-
- spci_common_header_init(message, message_length, target_vm_id,
- source_vm_id);
- message->flags = SPCI_MESSAGE_ARCHITECTED;
-
/* Fill the architected header. */
- architected_header = spci_get_architected_message_header(message);
+ struct spci_architected_message_header *architected_header =
+ (struct spci_architected_message_header *)message;
architected_header->type = type;
architected_header->reserved[0] = 0;
architected_header->reserved[1] = 0;
@@ -356,10 +267,10 @@
/** Obtain a pointer to the start of the memory region in the donate message. */
static inline struct spci_memory_region *spci_get_donated_memory_region(
- struct spci_message *message)
+ void *message)
{
struct spci_architected_message_header *architected_header =
- spci_get_architected_message_header(message);
+ (struct spci_architected_message_header *)message;
return (struct spci_memory_region *)architected_header->payload;
}
@@ -396,64 +307,61 @@
}
/** Construct the SPCI donate memory region message. */
-static inline void spci_memory_donate(
- struct spci_message *message, spci_vm_id_t target_vm_id,
- spci_vm_id_t source_vm_id,
+static inline uint32_t spci_memory_donate_init(
+ void *message,
struct spci_memory_region_constituent *region_constituents,
uint32_t num_elements, uint32_t handle)
{
- int32_t message_length;
+ uint32_t message_length;
struct spci_memory_region *memory_region =
spci_get_donated_memory_region(message);
message_length = sizeof(struct spci_architected_message_header);
/* Fill in the details on the common message header. */
- spci_architected_message_init(message, message_length, target_vm_id,
- source_vm_id, SPCI_MEMORY_DONATE);
+ spci_architected_message_init(message, SPCI_MEMORY_DONATE);
/* Create single memory region. */
- message->length += spci_memory_region_add(
+ message_length += spci_memory_region_add(
memory_region, handle, region_constituents, num_elements);
+ return message_length;
}
/**
* Construct the SPCI memory region relinquish message.
* A set of memory regions can be given back to the owner.
*/
-static inline void spci_memory_relinquish(
- struct spci_message *message, spci_vm_id_t target_vm_id,
- spci_vm_id_t source_vm_id,
+static inline uint32_t spci_memory_relinquish_init(
+ void *message,
struct spci_memory_region_constituent *region_constituents,
uint64_t num_elements, uint32_t handle)
{
- int32_t message_length;
+ uint32_t message_length;
struct spci_memory_region *memory_region =
spci_get_donated_memory_region(message);
message_length = sizeof(struct spci_architected_message_header);
/* Fill in the details on the common message header. */
- spci_architected_message_init(message, message_length, target_vm_id,
- source_vm_id, SPCI_MEMORY_RELINQUISH);
+ spci_architected_message_init(message, SPCI_MEMORY_RELINQUISH);
/* Create single memory region. */
- message->length += spci_memory_region_add(
+ message_length += spci_memory_region_add(
memory_region, handle, region_constituents, num_elements);
+ return message_length;
}
/**
* Construct the SPCI memory region lend message.
*/
-static inline void spci_memory_lend(
- struct spci_message *message, spci_vm_id_t target_vm_id,
- spci_vm_id_t source_vm_id,
+static inline uint32_t spci_memory_lend_init(
+ void *message,
struct spci_memory_region_constituent *region_constituents,
uint64_t num_elements, uint32_t handle, enum spci_lend_access access,
enum spci_lend_type type, enum spci_lend_cacheability cacheability,
enum spci_lend_shareability shareability)
{
- int32_t message_length;
+ uint32_t message_length;
struct spci_memory_region *memory_region;
const struct spci_memory_lend lend_init = {0};
@@ -469,8 +377,7 @@
sizeof(struct spci_memory_lend);
/* Fill in the details on the common message header. */
- spci_architected_message_init(message, message_length, target_vm_id,
- source_vm_id, SPCI_MEMORY_LEND);
+ spci_architected_message_init(message, SPCI_MEMORY_LEND);
lend_descriptor->flags = SPCI_LEND_KEEP_MAPPED;
@@ -484,6 +391,7 @@
shareability);
/* Create single memory region. */
- message->length += spci_memory_region_add(
+ message_length += spci_memory_region_add(
memory_region, handle, region_constituents, num_elements);
+ return message_length;
}
diff --git a/inc/vmapi/hf/types.h b/inc/vmapi/hf/types.h
index 1612099..687dafa 100644
--- a/inc/vmapi/hf/types.h
+++ b/inc/vmapi/hf/types.h
@@ -21,6 +21,8 @@
#include <linux/types.h>
+#define INT32_C(c) c
+
typedef phys_addr_t hf_ipaddr_t;
#else
diff --git a/src/BUILD.gn b/src/BUILD.gn
index 1b81c4a..16e8197 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -158,7 +158,6 @@
"manifest_test.cc",
"mm_test.cc",
"mpool_test.cc",
- "spci_test.cc",
"string_test.cc",
]
sources += [ "layout_fake.c" ]
diff --git a/src/api.c b/src/api.c
index 2034ae9..d11b305 100644
--- a/src/api.c
+++ b/src/api.c
@@ -352,9 +352,9 @@
{
return (struct spci_value){
.func = SPCI_MSG_SEND_32,
- .arg1 = receiver->mailbox.recv->source_vm_id << 16 |
- receiver->id,
- .arg3 = receiver->mailbox.recv->length};
+ .arg1 = (receiver->mailbox.recv_sender << 16) | receiver->id,
+ .arg3 = receiver->mailbox.recv_size,
+ .arg4 = receiver->mailbox.recv_attributes};
}
/**
@@ -846,8 +846,10 @@
* If the recipient's receive buffer is busy, it can optionally register the
* caller to be notified when the recipient's receive buffer becomes available.
*/
-spci_return_t api_spci_msg_send(uint32_t attributes, struct vcpu *current,
- struct vcpu **next)
+struct spci_value api_spci_msg_send(spci_vm_id_t sender_vm_id,
+ spci_vm_id_t receiver_vm_id, uint32_t size,
+ uint32_t attributes, struct vcpu *current,
+ struct vcpu **next)
{
struct vm *from = current->vm;
struct vm *to;
@@ -857,59 +859,49 @@
struct hf_vcpu_run_return primary_ret = {
.code = HF_VCPU_RUN_MESSAGE,
};
- struct spci_message from_msg_replica;
- struct spci_message *to_msg;
- const struct spci_message *from_msg;
+ const void *from_msg;
- uint32_t size;
-
- int64_t ret;
+ struct spci_value ret;
bool notify = (attributes & SPCI_MSG_SEND_NOTIFY_MASK) ==
SPCI_MSG_SEND_NOTIFY;
+ /* Ensure sender VM ID corresponds to the current VM. */
+ if (sender_vm_id != from->id) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Disallow reflexive requests as this suggests an error in the VM. */
+ if (receiver_vm_id == from->id) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Limit the size of transfer. */
+ if (size > SPCI_MSG_PAYLOAD_MAX) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
/*
- * Check that the sender has configured its send buffer. Copy the
- * message header. If the tx mailbox at from_msg is configured (i.e.
- * from_msg != NULL) then it can be safely accessed after releasing the
- * lock since the tx mailbox address can only be configured once.
+ * Check that the sender has configured its send buffer. If the tx
+ * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
+ * be safely accessed after releasing the lock since the tx mailbox
+ * address can only be configured once.
*/
sl_lock(&from->lock);
from_msg = from->mailbox.send;
sl_unlock(&from->lock);
if (from_msg == NULL) {
- return SPCI_INVALID_PARAMETERS;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
- /*
- * Note that the payload is not copied when the message header is.
- */
- from_msg_replica = *from_msg;
-
- /* Ensure source VM id corresponds to the current VM. */
- if (from_msg_replica.source_vm_id != from->id) {
- return SPCI_INVALID_PARAMETERS;
- }
-
- size = from_msg_replica.length;
- /* Limit the size of transfer. */
- if (size > SPCI_MSG_PAYLOAD_MAX) {
- return SPCI_INVALID_PARAMETERS;
- }
-
- /* Disallow reflexive requests as this suggests an error in the VM. */
- if (from_msg_replica.target_vm_id == from->id) {
- return SPCI_INVALID_PARAMETERS;
- }
-
- /* Ensure the target VM exists. */
- to = vm_find(from_msg_replica.target_vm_id);
+ /* Ensure the receiver VM exists. */
+ to = vm_find(receiver_vm_id);
if (to == NULL) {
- return SPCI_INVALID_PARAMETERS;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
/*
- * Hf needs to hold the lock on <to> before the mailbox state is
+ * Hafnium needs to hold the lock on <to> before the mailbox state is
* checked. The lock on <to> must be held until the information is
* copied to <to> Rx buffer. Since in
* spci_msg_handle_architected_message we may call api_spci_share_memory
@@ -921,13 +913,12 @@
if (to->mailbox.state != MAILBOX_STATE_EMPTY ||
to->mailbox.recv == NULL) {
/*
- * Fail if the target isn't currently ready to receive data,
+ * Fail if the receiver isn't currently ready to receive data,
* setting up for notification if requested.
*/
if (notify) {
struct wait_entry *entry =
- ¤t->vm->wait_entries
- [from_msg_replica.target_vm_id];
+ &from->wait_entries[receiver_vm_id];
/* Append waiter only if it's not there yet. */
if (list_empty(&entry->wait_links)) {
@@ -936,72 +927,68 @@
}
}
- ret = SPCI_BUSY;
+ ret = spci_error(SPCI_BUSY);
goto out;
}
- to_msg = to->mailbox.recv;
-
- /* Handle architected messages. */
- if ((from_msg_replica.flags & SPCI_MESSAGE_IMPDEF_MASK) !=
- SPCI_MESSAGE_IMPDEF) {
+ /* Handle legacy memory sharing messages. */
+ if ((attributes & SPCI_MSG_SEND_LEGACY_MEMORY_MASK) ==
+ SPCI_MSG_SEND_LEGACY_MEMORY) {
/*
* Buffer holding the internal copy of the shared memory
* regions.
*/
- uint8_t *message_buffer = cpu_get_buffer(current->cpu->id);
+ struct spci_architected_message_header
+ *architected_message_replica =
+ (struct spci_architected_message_header *)
+ cpu_get_buffer(current->cpu->id);
uint32_t message_buffer_size =
cpu_get_buffer_size(current->cpu->id);
struct spci_architected_message_header *architected_header =
- spci_get_architected_message_header(from->mailbox.send);
+ (struct spci_architected_message_header *)from_msg;
- const struct spci_architected_message_header
- *architected_message_replica;
-
- if (from_msg_replica.length > message_buffer_size) {
- ret = SPCI_INVALID_PARAMETERS;
+ if (size > message_buffer_size) {
+ ret = spci_error(SPCI_INVALID_PARAMETERS);
goto out;
}
- if (from_msg_replica.length <
- sizeof(struct spci_architected_message_header)) {
- ret = SPCI_INVALID_PARAMETERS;
+ if (size < sizeof(struct spci_architected_message_header)) {
+ ret = spci_error(SPCI_INVALID_PARAMETERS);
goto out;
}
- /* Copy the architected message into an internal buffer. */
- memcpy_s(message_buffer, message_buffer_size,
- architected_header, from_msg_replica.length);
-
- architected_message_replica =
- (struct spci_architected_message_header *)
- message_buffer;
+ /* Copy the architected message into the internal buffer. */
+ memcpy_s(architected_message_replica, message_buffer_size,
+ architected_header, size);
/*
- * Note that message_buffer is passed as the third parameter to
- * spci_msg_handle_architected_message. The execution flow
- * commencing at spci_msg_handle_architected_message will make
- * several accesses to fields in message_buffer. The memory area
- * message_buffer must be exclusively owned by Hf so that TOCTOU
- * issues do not arise.
+ * Note that architected_message_replica is passed as the third
+ * parameter to spci_msg_handle_architected_message. The
+ * execution flow commencing at
+ * spci_msg_handle_architected_message will make several
+ * accesses to fields in architected_message_replica. The memory
+ * area architected_message_replica must be exclusively owned by
+ * Hafnium so that TOCTOU issues do not arise.
*/
ret = spci_msg_handle_architected_message(
vm_from_to_lock.vm1, vm_from_to_lock.vm2,
- architected_message_replica, &from_msg_replica, to_msg);
+ architected_message_replica, size);
- if (ret != SPCI_SUCCESS) {
+ if (ret.func != SPCI_SUCCESS_32) {
goto out;
}
} else {
/* Copy data. */
- memcpy_s(to_msg->payload, SPCI_MSG_PAYLOAD_MAX,
- from->mailbox.send->payload, size);
- *to_msg = from_msg_replica;
+ memcpy_s(to->mailbox.recv, SPCI_MSG_PAYLOAD_MAX, from_msg,
+ size);
+ to->mailbox.recv_size = size;
+ to->mailbox.recv_sender = sender_vm_id;
+ to->mailbox.recv_attributes = 0;
+ ret = (struct spci_value){.func = SPCI_SUCCESS_32};
}
primary_ret.message.vm_id = to->id;
- ret = SPCI_SUCCESS;
/* Messages for the primary VM are delivered directly. */
if (to->id == HF_PRIMARY_VM_ID) {
@@ -1429,11 +1416,10 @@
* the request.
* Success is indicated by SPCI_SUCCESS.
*/
-spci_return_t api_spci_share_memory(struct vm_locked to_locked,
- struct vm_locked from_locked,
- struct spci_memory_region *memory_region,
- uint32_t memory_to_attributes,
- enum spci_memory_share share)
+struct spci_value api_spci_share_memory(
+ struct vm_locked to_locked, struct vm_locked from_locked,
+ struct spci_memory_region *memory_region, uint32_t memory_to_attributes,
+ enum spci_memory_share share)
{
struct vm *to = to_locked.vm;
struct vm *from = from_locked.vm;
@@ -1441,7 +1427,7 @@
int from_mode;
int to_mode;
struct mpool local_page_pool;
- int64_t ret;
+ struct spci_value ret;
paddr_t pa_begin;
paddr_t pa_end;
ipaddr_t begin;
@@ -1451,7 +1437,7 @@
/* Disallow reflexive shares as this suggests an error in the VM. */
if (to == from) {
- return SPCI_INVALID_PARAMETERS;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
/*
@@ -1475,7 +1461,7 @@
if (!spci_msg_check_transition(to, from, share, &orig_from_mode, begin,
end, memory_to_attributes, &from_mode,
&to_mode)) {
- return SPCI_INVALID_PARAMETERS;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
pa_begin = pa_from_ipa(begin);
@@ -1487,7 +1473,7 @@
*/
if (!mm_vm_identity_map(&from->ptable, pa_begin, pa_end, from_mode,
NULL, &local_page_pool)) {
- ret = SPCI_NO_MEMORY;
+ ret = spci_error(SPCI_NO_MEMORY);
goto out;
}
@@ -1498,7 +1484,7 @@
/* Recover any memory consumed in failed mapping. */
mm_vm_defrag(&from->ptable, &local_page_pool);
- ret = SPCI_NO_MEMORY;
+ ret = spci_error(SPCI_NO_MEMORY);
CHECK(mm_vm_identity_map(&from->ptable, pa_begin, pa_end,
orig_from_mode, NULL,
@@ -1507,10 +1493,9 @@
goto out;
}
- ret = SPCI_SUCCESS;
+ ret = (struct spci_value){.func = SPCI_SUCCESS_32};
out:
-
mpool_fini(&local_page_pool);
return ret;
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 00ba80b..c4719ab 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -324,7 +324,11 @@
return true;
case SPCI_MSG_SEND_32:
- args->func = api_spci_msg_send(args->arg1, current(), next);
+ *args = api_spci_msg_send(spci_msg_send_sender(*args),
+ spci_msg_send_receiver(*args),
+ spci_msg_send_size(*args),
+ spci_msg_send_attributes(*args),
+ current(), next);
return true;
case SPCI_MSG_WAIT_32:
*args = api_spci_msg_recv(true, current(), next);
diff --git a/src/spci_architected_message.c b/src/spci_architected_message.c
index 28d5fc7..ec2041e 100644
--- a/src/spci_architected_message.c
+++ b/src/spci_architected_message.c
@@ -23,7 +23,7 @@
* Check if the message length and the number of memory region constituents
* match, if the check is correct call the memory sharing routine.
*/
-static spci_return_t spci_validate_call_share_memory(
+static struct spci_value spci_validate_call_share_memory(
struct vm_locked to_locked, struct vm_locked from_locked,
struct spci_memory_region *memory_region, uint32_t memory_share_size,
uint32_t memory_to_attributes, enum spci_memory_share share)
@@ -38,7 +38,7 @@
sizeof(struct spci_memory_region) +
(sizeof(struct spci_memory_region_constituent) *
max_count)) {
- return SPCI_INVALID_PARAMETERS;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
return api_spci_share_memory(to_locked, from_locked, memory_region,
@@ -50,13 +50,13 @@
* corresponding api functions implementing the functionality requested
* in the architected message.
*/
-spci_return_t spci_msg_handle_architected_message(
+struct spci_value spci_msg_handle_architected_message(
struct vm_locked to_locked, struct vm_locked from_locked,
const struct spci_architected_message_header
*architected_message_replica,
- struct spci_message *from_msg_replica, struct spci_message *to_msg)
+ uint32_t size)
{
- int64_t ret;
+ struct spci_value ret;
struct spci_memory_region *memory_region;
uint32_t to_mode;
uint32_t message_type;
@@ -70,8 +70,7 @@
architected_message_replica->payload;
memory_share_size =
- from_msg_replica->length -
- sizeof(struct spci_architected_message_header);
+ size - sizeof(struct spci_architected_message_header);
/* TODO: Add memory attributes. */
to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
@@ -87,8 +86,7 @@
architected_message_replica->payload;
memory_share_size =
- from_msg_replica->length -
- sizeof(struct spci_architected_message_header);
+ size - sizeof(struct spci_architected_message_header);
to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
@@ -111,8 +109,7 @@
memory_region =
(struct spci_memory_region *)lend_descriptor->payload;
memory_share_size =
- from_msg_replica->length -
- sizeof(struct spci_architected_message_header) -
+ size - sizeof(struct spci_architected_message_header) -
sizeof(struct spci_memory_lend);
to_mode = spci_memory_attrs_to_mode(borrower_attributes);
@@ -126,7 +123,7 @@
default:
dlog("Invalid memory sharing message.\n");
- return SPCI_INVALID_PARAMETERS;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
/* Copy data to the destination Rx. */
@@ -138,11 +135,14 @@
* in the destination Rx buffer. This mechanism will be defined at the
* spec level.
*/
- if (ret == SPCI_SUCCESS) {
- memcpy_s(to_msg->payload, SPCI_MSG_PAYLOAD_MAX,
- architected_message_replica, from_msg_replica->length);
+ if (ret.func == SPCI_SUCCESS_32) {
+ memcpy_s(to_locked.vm->mailbox.recv, SPCI_MSG_PAYLOAD_MAX,
+ architected_message_replica, size);
+ to_locked.vm->mailbox.recv_size = size;
+ to_locked.vm->mailbox.recv_sender = from_locked.vm->id;
+ to_locked.vm->mailbox.recv_attributes =
+ SPCI_MSG_SEND_LEGACY_MEMORY;
}
- *to_msg = *from_msg_replica;
return ret;
}
diff --git a/src/spci_test.cc b/src/spci_test.cc
deleted file mode 100644
index a3191cc..0000000
--- a/src/spci_test.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright 2019 The Hafnium Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-extern "C" {
-#include "vmapi/hf/spci.h"
-}
-
-#include <gmock/gmock.h>
-
-namespace
-{
-using ::testing::Eq;
-
-/**
- * Ensure that spci_message_init is correctly setting the expected fields in the
- * SPCI common message header.
- */
-TEST(spci, spci_message_init)
-{
- spci_message header;
- spci_message compare_header = {
- .flags = SPCI_MESSAGE_IMPDEF_MASK,
- .length = 1,
- .target_vm_id = 2,
- .source_vm_id = 3,
- };
-
- memset(&header, 0xff, sizeof(header));
- spci_message_init(&header, 1, 2, 3);
-
- EXPECT_THAT(memcmp(&header, &compare_header, sizeof(header)), 0);
-}
-} /* namespace */
diff --git a/test/hftest/inc/hftest_impl.h b/test/hftest/inc/hftest_impl.h
index f3eb407..9598b12 100644
--- a/test/hftest/inc/hftest_impl.h
+++ b/test/hftest/inc/hftest_impl.h
@@ -140,8 +140,8 @@
const struct fdt_header *fdt;
/* These are used in services. */
- struct spci_message *send;
- struct spci_message *recv;
+ void *send;
+ void *recv;
size_t memory_size;
};
@@ -291,12 +291,12 @@
ASSERT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE); \
\
/* Send the selected service to run and let it be handled. */ \
- memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, service, \
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, service, \
msg_length); \
- spci_message_init(send_buffer, msg_length, vm_id, \
- hf_vm_get_id()); \
\
- ASSERT_EQ(spci_msg_send(0), 0); \
+ ASSERT_EQ(spci_msg_send(hf_vm_get_id(), vm_id, msg_length, 0) \
+ .func, \
+ SPCI_SUCCESS_32); \
run_res = hf_vcpu_run(vm_id, 0); \
ASSERT_EQ(run_res.code, HF_VCPU_RUN_YIELD); \
} while (0)
diff --git a/test/hftest/service.c b/test/hftest/service.c
index c04c253..ed35e74 100644
--- a/test/hftest/service.c
+++ b/test/hftest/service.c
@@ -95,8 +95,6 @@
}
}
- struct spci_message *recv_msg = (struct spci_message *)recv;
-
/* Prepare the context. */
/* Set up the mailbox. */
@@ -104,7 +102,8 @@
/* Receive the name of the service to run. */
ret = spci_msg_wait();
- memiter_init(&args, recv_msg->payload, spci_msg_send_size(ret));
+ ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
+ memiter_init(&args, recv, spci_msg_send_size(ret));
service = find_service(&args);
hf_mailbox_clear();
@@ -122,8 +121,8 @@
ctx = hftest_get_context();
memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx));
ctx->abort = abort;
- ctx->send = (struct spci_message *)send;
- ctx->recv = (struct spci_message *)recv;
+ ctx->send = send;
+ ctx->recv = recv;
ctx->memory_size = memory_size;
/* Pause so the next time cycles are given the service will be run. */
diff --git a/test/linux/hftest_socket.c b/test/linux/hftest_socket.c
index ae69abb..460d1f1 100644
--- a/test/linux/hftest_socket.c
+++ b/test/linux/hftest_socket.c
@@ -74,14 +74,12 @@
ctx = hftest_get_context();
memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx));
ctx->abort = abort;
- ctx->send = (struct spci_message *)send;
- ctx->recv = (struct spci_message *)recv;
+ ctx->send = send;
+ ctx->recv = recv;
ctx->memory_size = memory_size;
for (;;) {
struct spci_value ret;
- struct spci_message *send_buf = (struct spci_message *)send;
- struct spci_message *recv_buf = (struct spci_message *)recv;
/* Receive the packet. */
ret = spci_msg_wait();
@@ -89,21 +87,21 @@
EXPECT_LE(spci_msg_send_size(ret), SPCI_MSG_PAYLOAD_MAX);
/* Echo the message back to the sender. */
- memcpy_s(send_buf->payload, SPCI_MSG_PAYLOAD_MAX,
- recv_buf->payload, spci_msg_send_size(ret));
+ memcpy_s(send, SPCI_MSG_PAYLOAD_MAX, recv,
+ spci_msg_send_size(ret));
/* Swap the socket's source and destination ports */
- struct hf_msg_hdr *hdr = (struct hf_msg_hdr *)send_buf->payload;
+ struct hf_msg_hdr *hdr = (struct hf_msg_hdr *)send;
swap(&(hdr->src_port), &(hdr->dst_port));
/* Swap the destination and source ids. */
spci_vm_id_t dst_id = spci_msg_send_sender(ret);
spci_vm_id_t src_id = spci_msg_send_receiver(ret);
- spci_message_init(send_buf, spci_msg_send_size(ret), dst_id,
- src_id);
-
hf_mailbox_clear();
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(src_id, dst_id, spci_msg_send_size(ret),
+ 0)
+ .func,
+ SPCI_SUCCESS_32);
}
}
diff --git a/test/vmapi/arch/aarch64/gicv3/busy_secondary.c b/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
index 2f0766a..1ed1031 100644
--- a/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
+++ b/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
@@ -79,10 +79,11 @@
/* Let secondary start looping. */
dlog("Telling secondary to loop.\n");
- memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(send_buffer, 0, SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_PREEMPTED);
@@ -136,10 +137,11 @@
/* Let secondary start looping. */
dlog("Telling secondary to loop.\n");
- memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(send_buffer, 0, SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_PREEMPTED);
diff --git a/test/vmapi/arch/aarch64/gicv3/gicv3.c b/test/vmapi/arch/aarch64/gicv3/gicv3.c
index e6e7435..9c144b0 100644
--- a/test/vmapi/arch/aarch64/gicv3/gicv3.c
+++ b/test/vmapi/arch/aarch64/gicv3/gicv3.c
@@ -34,8 +34,8 @@
hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
-struct spci_message *send_buffer = (struct spci_message *)send_page;
-struct spci_message *recv_buffer = (struct spci_message *)recv_page;
+void *send_buffer = send_page;
+void *recv_buffer = recv_page;
volatile uint32_t last_interrupt_id = 0;
diff --git a/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h b/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h
index d51398f..7308233 100644
--- a/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h
+++ b/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h
@@ -33,8 +33,8 @@
extern hf_ipaddr_t send_page_addr;
extern hf_ipaddr_t recv_page_addr;
-extern struct spci_message *send_buffer;
-extern struct spci_message *recv_buffer;
+extern void *send_buffer;
+extern void *recv_buffer;
extern volatile uint32_t last_interrupt_id;
diff --git a/test/vmapi/arch/aarch64/gicv3/services/timer.c b/test/vmapi/arch/aarch64/gicv3/services/timer.c
index 5d3dd2d..fc52ab1 100644
--- a/test/vmapi/arch/aarch64/gicv3/services/timer.c
+++ b/test/vmapi/arch/aarch64/gicv3/services/timer.c
@@ -46,11 +46,8 @@
}
buffer[8] = '0' + interrupt_id / 10;
buffer[9] = '0' + interrupt_id % 10;
- memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, buffer,
- size);
- spci_message_init(SERVICE_SEND_BUFFER(), size, HF_PRIMARY_VM_ID,
- hf_vm_get_id());
- spci_msg_send(0);
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, buffer, size);
+ spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, size, 0);
dlog("secondary IRQ %d ended\n", interrupt_id);
event_send_local();
}
@@ -63,8 +60,7 @@
for (;;) {
const char timer_wfi_message[] = "**** xxxxxxx";
- struct spci_message *message_header = SERVICE_RECV_BUFFER();
- uint8_t *message;
+ uint8_t *message = (uint8_t *)SERVICE_RECV_BUFFER();
bool wfi, wfe, receive;
bool disable_interrupts;
uint32_t ticks;
@@ -77,8 +73,6 @@
spci_msg_send_size(ret));
}
- message = message_header->payload;
-
/*
* Start a timer to send the message back: enable it and
* set it for the requested number of ticks.
diff --git a/test/vmapi/arch/aarch64/gicv3/timer_secondary.c b/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
index b21bd83..e38a2fc 100644
--- a/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
+++ b/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
@@ -49,11 +49,11 @@
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
/* Send the message for the secondary to set a timer. */
- memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(send_buffer, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
/*
* Let the secondary handle the message and set the timer. It will loop
@@ -75,7 +75,7 @@
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response));
- EXPECT_EQ(memcmp(recv_buffer->payload, expected_response,
+ EXPECT_EQ(memcmp(recv_buffer, expected_response,
sizeof(expected_response)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -108,11 +108,11 @@
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
/* Send the message for the secondary to set a timer. */
- memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message,
- message_length);
- spci_message_init(send_buffer, message_length, SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, message_length);
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, message_length, 0)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the secondary handle the message and set the timer. */
last_interrupt_id = 0;
@@ -173,7 +173,7 @@
/* Once we wake it up it should get the timer interrupt and respond. */
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response));
- EXPECT_EQ(memcmp(recv_buffer->payload, expected_response,
+ EXPECT_EQ(memcmp(recv_buffer, expected_response,
sizeof(expected_response)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -261,11 +261,11 @@
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
/* Send the message for the secondary to set a timer. */
- memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message,
- message_length);
- spci_message_init(send_buffer, message_length, SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, message_length);
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, message_length, 0)
+ .func,
+ SPCI_SUCCESS_32);
/*
* Let the secondary handle the message and set the timer.
diff --git a/test/vmapi/primary_with_secondaries/BUILD.gn b/test/vmapi/primary_with_secondaries/BUILD.gn
index 7302db0..86092cc 100644
--- a/test/vmapi/primary_with_secondaries/BUILD.gn
+++ b/test/vmapi/primary_with_secondaries/BUILD.gn
@@ -38,14 +38,24 @@
"spci.c",
]
- sources += [ "util.c" ]
-
deps = [
+ ":util",
"//src/arch/aarch64/hftest:registers",
"//test/hftest:hftest_primary_vm",
]
}
+source_set("util") {
+ testonly = true
+ public_configs = [
+ ":config",
+ "//test/hftest:hftest_config",
+ ]
+ sources = [
+ "util.c",
+ ]
+}
+
initrd("primary_with_secondaries_test") {
testonly = true
diff --git a/test/vmapi/primary_with_secondaries/inc/util.h b/test/vmapi/primary_with_secondaries/inc/util.h
index eca641f..845a4b4 100644
--- a/test/vmapi/primary_with_secondaries/inc/util.h
+++ b/test/vmapi/primary_with_secondaries/inc/util.h
@@ -18,9 +18,15 @@
#include "vmapi/hf/spci.h"
+#define EXPECT_SPCI_ERROR(value, spci_error) \
+ do { \
+ EXPECT_EQ(value.func, SPCI_ERROR_32); \
+ EXPECT_EQ(value.arg1, spci_error); \
+ } while (0)
+
struct mailbox_buffers {
- struct spci_message *send;
- struct spci_message *recv;
+ void *send;
+ void *recv;
};
struct mailbox_buffers set_up_mailbox(void);
diff --git a/test/vmapi/primary_with_secondaries/interrupts.c b/test/vmapi/primary_with_secondaries/interrupts.c
index ae081d4..31db0b0 100644
--- a/test/vmapi/primary_with_secondaries/interrupts.c
+++ b/test/vmapi/primary_with_secondaries/interrupts.c
@@ -42,16 +42,15 @@
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
/* Set the message, echo it and wait for a response. */
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
- sizeof(expected_response)),
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
}
@@ -78,8 +77,7 @@
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
- sizeof(expected_response)),
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -88,8 +86,7 @@
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
- sizeof(expected_response)),
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
}
@@ -116,8 +113,7 @@
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
- sizeof(expected_response)),
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -126,7 +122,7 @@
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response_2));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response_2,
+ EXPECT_EQ(memcmp(mb.recv, expected_response_2,
sizeof(expected_response_2)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -156,8 +152,7 @@
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
- sizeof(expected_response)),
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -166,15 +161,15 @@
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
/* Now send a message to the secondary. */
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response_2));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response_2,
+ EXPECT_EQ(memcmp(mb.recv, expected_response_2,
sizeof(expected_response_2)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -204,16 +199,15 @@
* Now send a message to the secondary to enable the interrupt ID, and
* expect the response from the interrupt we sent before.
*/
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
- sizeof(expected_response)),
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
}
@@ -240,8 +234,7 @@
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
- sizeof(expected_response)),
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
}
@@ -268,8 +261,7 @@
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response,
- sizeof(expected_response)),
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
}
@@ -290,15 +282,15 @@
EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_MESSAGE);
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
hf_interrupt_inject(SERVICE_VM0, 0, EXTERNAL_INTERRUPT_ID_A);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(message));
- EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)), 0);
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
EXPECT_EQ(hf_mailbox_clear(), 0);
}
diff --git a/test/vmapi/primary_with_secondaries/mailbox.c b/test/vmapi/primary_with_secondaries/mailbox.c
index ca06bff..d83beaf 100644
--- a/test/vmapi/primary_with_secondaries/mailbox.c
+++ b/test/vmapi/primary_with_secondaries/mailbox.c
@@ -88,15 +88,15 @@
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
/* Set the message, echo it and check it didn't change. */
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(message));
- EXPECT_EQ(memcmp(mb.send->payload, message, sizeof(message)), 0);
+ EXPECT_EQ(memcmp(mb.send, message, sizeof(message)), 0);
EXPECT_EQ(hf_mailbox_clear(), 0);
}
@@ -120,16 +120,16 @@
/* Set the message, echo it and check it didn't change. */
next_permutation(message, sizeof(message) - 1);
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message,
sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0,
+ sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(message));
- EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)),
- 0);
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
EXPECT_EQ(hf_mailbox_clear(), 0);
}
}
@@ -159,17 +159,19 @@
* SERVICE_VM0, then to SERVICE_VM1 and finally back to here.
*/
{
- spci_vm_id_t *chain = (spci_vm_id_t *)mb.send->payload;
+ spci_vm_id_t *chain = (spci_vm_id_t *)mb.send;
*chain++ = htole32(SERVICE_VM1);
*chain++ = htole32(HF_PRIMARY_VM_ID);
memcpy_s(chain,
SPCI_MSG_PAYLOAD_MAX - (2 * sizeof(spci_vm_id_t)),
message, sizeof(message));
- spci_message_init(mb.send,
- sizeof(message) + (2 * sizeof(spci_vm_id_t)),
- SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ EXPECT_EQ(
+ spci_msg_send(
+ HF_PRIMARY_VM_ID, SERVICE_VM0,
+ sizeof(message) + (2 * sizeof(spci_vm_id_t)), 0)
+ .func,
+ SPCI_SUCCESS_32);
}
/* Let SERVICE_VM0 forward the message. */
@@ -185,7 +187,7 @@
/* Ensure the message is intact. */
EXPECT_EQ(run_res.message.vm_id, HF_PRIMARY_VM_ID);
EXPECT_EQ(run_res.message.size, sizeof(message));
- EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)), 0);
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
EXPECT_EQ(hf_mailbox_clear(), 0);
}
@@ -196,17 +198,17 @@
TEST(mailbox, no_primary_to_secondary_notification_on_configure)
{
struct hf_vcpu_run_return run_res;
+ set_up_mailbox();
- struct mailbox_buffers mb = set_up_mailbox();
- spci_message_init(mb.send, 0, SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_BUSY);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 0, 0),
+ SPCI_BUSY);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
- spci_message_init(mb.send, 0, SERVICE_VM0, HF_PRIMARY_VM_ID);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_MESSAGE);
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 0, 0).func,
+ SPCI_SUCCESS_32);
}
/**
@@ -216,11 +218,11 @@
TEST(mailbox, secondary_to_primary_notification_on_configure)
{
struct hf_vcpu_run_return run_res;
+ set_up_mailbox();
- struct mailbox_buffers mb = set_up_mailbox();
-
- spci_message_init(mb.send, 0, SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(SPCI_MSG_SEND_NOTIFY), SPCI_BUSY);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 0,
+ SPCI_MSG_SEND_NOTIFY),
+ SPCI_BUSY);
/*
* Run first VM for it to configure itself. It should result in
@@ -234,7 +236,8 @@
EXPECT_EQ(hf_mailbox_waiter_get(SERVICE_VM0), -1);
/* Send should now succeed. */
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 0, 0).func,
+ SPCI_SUCCESS_32);
}
/**
@@ -255,15 +258,15 @@
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
/* Send a message to echo service, and get response back. */
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(message));
- EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)), 0);
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
/* Let secondary VM continue running so that it will wait again. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -272,13 +275,13 @@
/* Without clearing our mailbox, send message again. */
reverse(message, strnlen_s(message, sizeof(message)));
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
/* Message should be dropped since the mailbox was not cleared. */
- EXPECT_EQ(spci_msg_send(0), 0);
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_INTERRUPT);
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
@@ -300,7 +303,7 @@
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(message));
- EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)), 0);
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
}
/**
@@ -321,18 +324,21 @@
EXPECT_EQ(run_res.sleep.ns, HF_SLEEP_INDEFINITE);
/* Send a message to echo service twice. The second should fail. */
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
- EXPECT_EQ(spci_msg_send(SPCI_MSG_SEND_NOTIFY), SPCI_BUSY);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message),
+ SPCI_MSG_SEND_NOTIFY)
+ .arg1,
+ SPCI_BUSY);
/* Receive a reply for the first message. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(message));
- EXPECT_EQ(memcmp(mb.recv->payload, message, sizeof(message)), 0);
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
/* Run VM again so that it clears its mailbox. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -343,5 +349,8 @@
EXPECT_EQ(hf_mailbox_waiter_get(SERVICE_VM0), -1);
/* Send should now succeed. */
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
}
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index 5de2649..b97dfab 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -84,16 +84,23 @@
for (m = 0;
m < ARRAY_SIZE(lend_shareability);
++m) {
- spci_memory_lend(
- mb.send, vms[i],
- HF_PRIMARY_VM_ID,
- constituents, 1, 0,
- lend_access[j],
- lend_type[k],
- lend_cacheability[l],
- lend_shareability[m]);
- EXPECT_EQ(
- spci_msg_send(0),
+ uint32_t msg_size =
+ spci_memory_lend_init(
+ mb.send,
+ constituents, 1,
+ 0,
+ lend_access[j],
+ lend_type[k],
+ lend_cacheability
+ [l],
+ lend_shareability
+ [m]);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(
+ HF_PRIMARY_VM_ID,
+ vms[i],
+ msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
SPCI_INVALID_PARAMETERS);
}
}
@@ -116,13 +123,17 @@
int i;
for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+ uint32_t msg_size;
/* Optionally skip one VM as the donate would succeed. */
if (vms[i] == avoid_vm) {
continue;
}
- spci_memory_donate(mb.send, vms[i], HF_PRIMARY_VM_ID,
- constituents, num_elements, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_donate_init(mb.send, constituents,
+ num_elements, 0);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, vms[i], msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
}
}
@@ -140,9 +151,12 @@
int j;
for (i = 0; i < ARRAY_SIZE(vms); ++i) {
for (j = 0; j < ARRAY_SIZE(vms); ++j) {
- spci_memory_relinquish(mb.send, vms[i], vms[j],
- constituents, num_elements, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ uint32_t msg_size = spci_memory_relinquish_init(
+ mb.send, constituents, num_elements, 0);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(vms[j], vms[i], msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
}
}
}
@@ -210,9 +224,10 @@
* API is still to be agreed on so the address is passed
* explicitly to test the mechanism.
*/
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
- spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
@@ -253,9 +268,10 @@
* API is still to be agreed on so the address is passed
* explicitly to test the mechanism.
*/
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
- spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be returned. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -295,6 +311,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_memory_return", mb.send);
@@ -306,10 +323,12 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
/* Let the memory be returned. */
@@ -333,6 +352,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish", mb.send);
@@ -343,11 +363,15 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
/* Let the memory be returned. */
@@ -385,9 +409,10 @@
* API is still to be agreed on so the address is passed
* explicitly to test the mechanism.
*/
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
- spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be returned. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -423,9 +448,10 @@
* API is still to be agreed on so the address is passed
* explicitly to test the mechanism.
*/
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
- spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be returned. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -460,9 +486,10 @@
* API is still to be agreed on so the address is passed
* explicitly to test the mechanism.
*/
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
- spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be returned. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -500,9 +527,10 @@
* API is still to be agreed on so the address is passed
* explicitly to test the mechanism.
*/
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
- spci_message_init(mb.send, sizeof(ptr), SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(ptr), 0)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be returned. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -534,7 +562,7 @@
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
/* Check the memory was cleared. */
- ptr = *(uint8_t **)mb.recv->payload;
+ ptr = *(uint8_t **)mb.recv;
for (int i = 0; i < PAGE_SIZE; ++i) {
ASSERT_EQ(ptr[i], 0);
}
@@ -560,7 +588,7 @@
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
/* Check the memory was cleared. */
- ptr = *(uint8_t **)mb.recv->payload;
+ ptr = *(uint8_t **)mb.recv;
for (int i = 0; i < PAGE_SIZE; ++i) {
ASSERT_EQ(ptr[i], 0);
}
@@ -578,6 +606,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_donate_check_upper_bound", mb.send);
@@ -588,9 +617,11 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Observe the service faulting when accessing the memory. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -605,6 +636,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_donate_check_lower_bound", mb.send);
@@ -615,9 +647,11 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Observe the service faulting when accessing the memory. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -633,6 +667,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_memory_return", mb.send);
SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
@@ -644,19 +679,23 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
/* Let the memory be returned. */
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
/* Share the memory with another VM. */
- spci_memory_donate(mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents,
- 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Observe the original service faulting when accessing the memory. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -672,6 +711,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_donate_secondary_and_fault", mb.send);
SERVICE_SELECT(SERVICE_VM1, "spci_memory_receive", mb.send);
@@ -688,9 +728,11 @@
EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_MESSAGE);
/* Donate memory. */
- spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be sent from VM0 to VM1. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -717,6 +759,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_donate_twice", mb.send);
SERVICE_SELECT(SERVICE_VM1, "spci_memory_receive", mb.send);
@@ -729,9 +772,11 @@
};
/* Donate memory to VM0. */
- spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be received. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -761,6 +806,7 @@
{
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
/* Initialise the memory before giving it. */
memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
@@ -768,10 +814,11 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_donate(mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
- constituents, 1, 0);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
}
/**
@@ -781,6 +828,7 @@
{
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
/* Initialise the memory before giving it. */
memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
@@ -788,11 +836,13 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_lend(mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
- constituents, 1, 0, SPCI_LEND_RW_X,
- SPCI_LEND_NORMAL_MEM, SPCI_LEND_CACHE_WRITE_BACK,
- SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
}
/**
@@ -803,6 +853,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_donate_invalid_source", mb.send);
SERVICE_SELECT(SERVICE_VM1, "spci_memory_receive", mb.send);
@@ -814,22 +865,27 @@
};
/* Try invalid configurations. */
- spci_memory_donate(mb.send, HF_PRIMARY_VM_ID, SERVICE_VM0, constituents,
- 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM0, HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
- spci_memory_donate(mb.send, SERVICE_VM0, SERVICE_VM0, constituents, 1,
- 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM0, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
- spci_memory_donate(mb.send, SERVICE_VM0, SERVICE_VM1, constituents, 1,
- 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM1, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
/* Successfully donate to VM0. */
- spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Receive and return memory from VM0. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -853,14 +909,20 @@
struct spci_memory_region_constituent constituents[] = {
{.address = (uint64_t)page + i, .page_count = 1},
};
- spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID,
- constituents, 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
- spci_memory_lend(
- mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, 1,
- 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ uint32_t msg_size =
+ spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_lend_init(
+ mb.send, constituents, 1, 0, SPCI_LEND_RW_X,
+ SPCI_LEND_NORMAL_MEM, SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
}
}
@@ -872,6 +934,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_lend_invalid_source", mb.send);
@@ -882,16 +945,23 @@
};
/* Check cannot swap VM IDs. */
- spci_memory_lend(mb.send, HF_PRIMARY_VM_ID, SERVICE_VM0, constituents,
- 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM0, HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
/* Lend memory to VM0. */
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Receive and return memory from VM0. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -911,6 +981,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
@@ -921,11 +992,15 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be accessed. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -944,11 +1019,15 @@
/* Re-initialise the memory before giving it. */
memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be accessed. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -974,6 +1053,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
@@ -984,11 +1064,15 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be accessed. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1006,11 +1090,15 @@
/* Re-initialise the memory before giving it. */
memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be accessed. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1035,6 +1123,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_X", mb.send);
@@ -1049,19 +1138,27 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Attempt to execute from memory. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Try and fail to execute from the memory region. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1076,6 +1173,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_X", mb.send);
@@ -1090,19 +1188,27 @@
{.address = (uint64_t)page, .page_count = 1},
};
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Attempt to execute from memory. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Try and fail to execute from the memory region. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1117,6 +1223,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send);
@@ -1128,11 +1235,15 @@
{.address = (uint64_t)page, .page_count = 2},
};
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be accessed. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1142,15 +1253,19 @@
constituents[0].page_count = 1;
for (int i = 1; i < PAGE_SIZE * 2; i++) {
constituents[0].address = (uint64_t)page + PAGE_SIZE;
- spci_memory_donate(mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID,
- constituents, 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
}
/* Ensure we can donate to the only borrower. */
- spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_donate_init(mb.send, constituents, 1, 0);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
}
/**
@@ -1161,6 +1276,7 @@
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = page;
+ uint32_t msg_size;
SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_twice", mb.send);
SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_twice", mb.send);
@@ -1172,11 +1288,15 @@
{.address = (uint64_t)page, .page_count = 2},
};
- spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
- 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+ msg_size = spci_memory_lend_init(mb.send, constituents, 1, 0,
+ SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Let the memory be accessed. */
run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -1193,10 +1313,13 @@
constituents[0].page_count = 1;
for (int i = 1; i < PAGE_SIZE * 2; i++) {
constituents[0].address = (uint64_t)page + PAGE_SIZE;
- spci_memory_lend(
- mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents, 1,
- 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_lend_init(
+ mb.send, constituents, 1, 0, SPCI_LEND_RO_X,
+ SPCI_LEND_NORMAL_MEM, SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
}
}
diff --git a/test/vmapi/primary_with_secondaries/run_race.c b/test/vmapi/primary_with_secondaries/run_race.c
index 7ba4d74..3689825 100644
--- a/test/vmapi/primary_with_secondaries/run_race.c
+++ b/test/vmapi/primary_with_secondaries/run_race.c
@@ -56,7 +56,7 @@
/* Copies the contents of the received boolean to the return value. */
if (run_res.message.size == sizeof(ok)) {
- ok = *(bool *)mb->recv->payload;
+ ok = *(bool *)mb->recv;
}
hf_mailbox_clear();
diff --git a/test/vmapi/primary_with_secondaries/services/BUILD.gn b/test/vmapi/primary_with_secondaries/services/BUILD.gn
index 6694a93..4dbe074 100644
--- a/test/vmapi/primary_with_secondaries/services/BUILD.gn
+++ b/test/vmapi/primary_with_secondaries/services/BUILD.gn
@@ -93,6 +93,9 @@
"..:config",
"//test/hftest:hftest_config",
]
+ deps = [
+ "//test/vmapi/primary_with_secondaries:util",
+ ]
sources = [
"memory.c",
@@ -150,6 +153,7 @@
]
deps = [
"//src/arch/aarch64:arch",
+ "//test/vmapi/primary_with_secondaries:util",
]
}
@@ -194,6 +198,9 @@
"..:config",
"//test/hftest:hftest_config",
]
+ deps = [
+ "//test/vmapi/primary_with_secondaries:util",
+ ]
sources = [
"spci_check.c",
diff --git a/test/vmapi/primary_with_secondaries/services/check_state.c b/test/vmapi/primary_with_secondaries/services/check_state.c
index a1805f9..49939dd 100644
--- a/test/vmapi/primary_with_secondaries/services/check_state.c
+++ b/test/vmapi/primary_with_secondaries/services/check_state.c
@@ -22,13 +22,14 @@
#include "hftest.h"
-void send_with_retry()
+void send_with_retry(spci_vm_id_t sender_vm_id, spci_vm_id_t target_vm_id,
+ uint32_t size)
{
- int64_t res;
+ struct spci_value res;
do {
- res = spci_msg_send(0);
- } while (res != SPCI_SUCCESS);
+ res = spci_msg_send(sender_vm_id, target_vm_id, size, 0);
+ } while (res.func != SPCI_SUCCESS_32);
}
/**
@@ -49,9 +50,6 @@
static volatile uintptr_t expected;
static volatile uintptr_t actual;
- spci_message_init(SERVICE_SEND_BUFFER(), 0, HF_PRIMARY_VM_ID,
- hf_vm_get_id());
-
for (i = 0; i < 100000; i++) {
/*
* We store the expected/actual values in volatile static
@@ -60,16 +58,13 @@
*/
expected = i;
per_cpu_ptr_set(expected);
- send_with_retry();
+ send_with_retry(hf_vm_get_id(), HF_PRIMARY_VM_ID, 0);
actual = per_cpu_ptr_get();
ok &= expected == actual;
}
/* Send two replies, one for each physical CPU. */
- memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, &ok,
- sizeof(ok));
- spci_message_init(SERVICE_SEND_BUFFER(), sizeof(ok), HF_PRIMARY_VM_ID,
- hf_vm_get_id());
- send_with_retry();
- send_with_retry();
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, &ok, sizeof(ok));
+ send_with_retry(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ok));
+ send_with_retry(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ok));
}
diff --git a/test/vmapi/primary_with_secondaries/services/echo.c b/test/vmapi/primary_with_secondaries/services/echo.c
index 87695a8..0578710 100644
--- a/test/vmapi/primary_with_secondaries/services/echo.c
+++ b/test/vmapi/primary_with_secondaries/services/echo.c
@@ -28,17 +28,15 @@
struct spci_value ret = spci_msg_wait();
spci_vm_id_t target_vm_id = spci_msg_send_receiver(ret);
spci_vm_id_t source_vm_id = spci_msg_send_sender(ret);
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
- memcpy_s(send_buf->payload, SPCI_MSG_PAYLOAD_MAX,
- recv_buf->payload, spci_msg_send_size(ret));
- spci_message_init(SERVICE_SEND_BUFFER(),
- spci_msg_send_size(ret), source_vm_id,
- target_vm_id);
+ memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX, recv_buf,
+ spci_msg_send_size(ret));
hf_mailbox_clear();
- spci_msg_send(0);
+ spci_msg_send(target_vm_id, source_vm_id,
+ spci_msg_send_size(ret), 0);
}
}
diff --git a/test/vmapi/primary_with_secondaries/services/echo_with_notification.c b/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
index ffd1e12..a4954d9 100644
--- a/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
+++ b/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
@@ -53,18 +53,19 @@
/* Loop, echo messages back to the sender. */
for (;;) {
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
struct spci_value ret = spci_msg_wait();
spci_vm_id_t target_vm_id = spci_msg_send_receiver(ret);
spci_vm_id_t source_vm_id = spci_msg_send_sender(ret);
- memcpy_s(send_buf->payload, SPCI_MSG_PAYLOAD_MAX,
- recv_buf->payload, spci_msg_send_size(ret));
- spci_message_init(send_buf, spci_msg_send_size(ret),
- source_vm_id, target_vm_id);
+ memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX, recv_buf,
+ spci_msg_send_size(ret));
- while (spci_msg_send(SPCI_MSG_SEND_NOTIFY) != SPCI_SUCCESS) {
+ while (spci_msg_send(target_vm_id, source_vm_id,
+ spci_msg_send_size(ret),
+ SPCI_MSG_SEND_NOTIFY)
+ .func != SPCI_SUCCESS_32) {
wait_for_vm(source_vm_id);
}
diff --git a/test/vmapi/primary_with_secondaries/services/interruptible.c b/test/vmapi/primary_with_secondaries/services/interruptible.c
index 94ac007..04ea783 100644
--- a/test/vmapi/primary_with_secondaries/services/interruptible.c
+++ b/test/vmapi/primary_with_secondaries/services/interruptible.c
@@ -39,11 +39,8 @@
dlog("secondary IRQ %d from current\n", interrupt_id);
buffer[8] = '0' + interrupt_id / 10;
buffer[9] = '0' + interrupt_id % 10;
- memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, buffer,
- size);
- spci_message_init(SERVICE_SEND_BUFFER(), size, HF_PRIMARY_VM_ID,
- hf_vm_get_id());
- spci_msg_send(0);
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, buffer, size);
+ spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, size, 0);
dlog("secondary IRQ %d ended\n", interrupt_id);
}
@@ -66,7 +63,7 @@
TEST_SERVICE(interruptible)
{
spci_vm_id_t this_vm_id = hf_vm_get_id();
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
exception_setup(irq);
hf_interrupt_enable(SELF_INTERRUPT_ID, true);
@@ -83,13 +80,12 @@
ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
if (spci_msg_send_sender(ret) == HF_PRIMARY_VM_ID &&
spci_msg_send_size(ret) == sizeof(ping_message) &&
- memcmp(recv_buf->payload, ping_message,
- sizeof(ping_message)) == 0) {
+ memcmp(recv_buf, ping_message, sizeof(ping_message)) == 0) {
/* Interrupt ourselves */
hf_interrupt_inject(this_vm_id, 0, SELF_INTERRUPT_ID);
} else if (spci_msg_send_sender(ret) == HF_PRIMARY_VM_ID &&
spci_msg_send_size(ret) == sizeof(enable_message) &&
- memcmp(recv_buf->payload, enable_message,
+ memcmp(recv_buf, enable_message,
sizeof(enable_message)) == 0) {
/* Enable interrupt ID C. */
hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_C, true);
diff --git a/test/vmapi/primary_with_secondaries/services/interruptible_echo.c b/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
index 560c47e..814cce1 100644
--- a/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
+++ b/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
@@ -39,8 +39,8 @@
for (;;) {
struct spci_value res = spci_msg_wait();
- struct spci_message *message = SERVICE_SEND_BUFFER();
- struct spci_message *recv_message = SERVICE_RECV_BUFFER();
+ void *message = SERVICE_SEND_BUFFER();
+ void *recv_message = SERVICE_RECV_BUFFER();
/* Retry if interrupted but made visible with the yield. */
while (res.func == SPCI_ERROR_32 &&
@@ -50,12 +50,11 @@
}
ASSERT_EQ(res.func, SPCI_MSG_SEND_32);
- memcpy_s(message->payload, SPCI_MSG_PAYLOAD_MAX,
- recv_message->payload, spci_msg_send_size(res));
- spci_message_init(message, spci_msg_send_size(res),
- HF_PRIMARY_VM_ID, SERVICE_VM0);
+ memcpy_s(message, SPCI_MSG_PAYLOAD_MAX, recv_message,
+ spci_msg_send_size(res));
hf_mailbox_clear();
- spci_msg_send(0);
+ spci_msg_send(SERVICE_VM0, HF_PRIMARY_VM_ID,
+ spci_msg_send_size(res), 0);
}
}
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index bd12409..cf15554 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -21,6 +21,7 @@
#include "hftest.h"
#include "primary_with_secondary.h"
+#include "util.h"
alignas(PAGE_SIZE) static uint8_t page[PAGE_SIZE];
@@ -35,10 +36,8 @@
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
/* Check the memory was cleared. */
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- ptr = *(uint8_t **)recv_buf->payload;
- spci_message_init(SERVICE_SEND_BUFFER(), sizeof(ptr),
- spci_msg_send_sender(ret), hf_vm_get_id());
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ ptr = *(uint8_t **)recv_buf;
for (int i = 0; i < PAGE_SIZE; ++i) {
ASSERT_EQ(ptr[i], 0);
@@ -54,7 +53,8 @@
/* Signal completion and reset. */
hf_mailbox_clear();
- spci_msg_send(0);
+ spci_msg_send(hf_vm_get_id(), spci_msg_send_sender(ret),
+ sizeof(ptr), 0);
}
}
@@ -64,11 +64,14 @@
for (;;) {
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
+ uint32_t msg_size;
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY);
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
(struct spci_memory_region *)(spci_get_lend_descriptor(
recv_buf)
@@ -85,10 +88,11 @@
hf_mailbox_clear();
/* Give the memory back and notify the sender. */
- spci_memory_relinquish(
- send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
- memory_region->constituents, memory_region->count, 0);
- spci_msg_send(0);
+ msg_size = spci_memory_relinquish_init(
+ send_buf, memory_region->constituents,
+ memory_region->count, 0);
+ spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY);
/*
* Try and access the memory which will cause a fault unless the
@@ -108,10 +112,8 @@
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
/* Check the memory was cleared. */
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- ptr = *(uint8_t **)recv_buf->payload;
- spci_message_init(SERVICE_SEND_BUFFER(), sizeof(ptr),
- spci_msg_send_sender(ret), hf_vm_get_id());
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ ptr = *(uint8_t **)recv_buf;
for (int i = 0; i < PAGE_SIZE; ++i) {
ASSERT_EQ(ptr[i], 0);
@@ -123,7 +125,8 @@
HF_MEMORY_GIVE),
0);
hf_mailbox_clear();
- spci_msg_send(0);
+ spci_msg_send(hf_vm_get_id(), spci_msg_send_sender(ret),
+ sizeof(ptr), 0);
/*
* Try and access the memory which will cause a fault unless the
@@ -147,11 +150,12 @@
* API is still to be agreed on so the address is passed
* explicitly to test the mechanism.
*/
- memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, &ptr,
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, &ptr,
sizeof(ptr));
- spci_message_init(SERVICE_SEND_BUFFER(), sizeof(ptr), HF_PRIMARY_VM_ID,
- hf_vm_get_id());
- EXPECT_EQ(spci_msg_send(0), 0);
+ EXPECT_EQ(
+ spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ptr), 0)
+ .func,
+ SPCI_SUCCESS_32);
/* Try using the memory that isn't valid unless it's been returned. */
page[16] = 123;
@@ -171,11 +175,12 @@
* API is still to be agreed on so the address is passed
* explicitly to test the mechanism.
*/
- memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, &ptr,
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, &ptr,
sizeof(ptr));
- spci_message_init(SERVICE_SEND_BUFFER(), sizeof(ptr), HF_PRIMARY_VM_ID,
- hf_vm_get_id());
- EXPECT_EQ(spci_msg_send(0), 0);
+ EXPECT_EQ(
+ spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ptr), 0)
+ .func,
+ SPCI_SUCCESS_32);
/* Try using the memory that isn't valid unless it's been returned. */
page[633] = 180;
@@ -187,12 +192,15 @@
for (;;) {
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ uint32_t msg_size;
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
spci_get_donated_memory_region(recv_buf);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -203,10 +211,11 @@
}
/* Give the memory back and notify the sender. */
- spci_memory_donate(
- send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
- memory_region->constituents, memory_region->count, 0);
- spci_msg_send(0);
+ msg_size = spci_memory_donate_init(send_buf,
+ memory_region->constituents,
+ memory_region->count, 0);
+ spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY);
/*
* Try and access the memory which will cause a fault unless the
@@ -220,11 +229,12 @@
{
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
struct spci_memory_region *memory_region =
spci_get_donated_memory_region(recv_buf);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -237,11 +247,12 @@
{
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
struct spci_memory_region *memory_region =
spci_get_donated_memory_region(recv_buf);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -257,21 +268,25 @@
{
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ uint32_t msg_size;
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
spci_get_donated_memory_region(recv_buf);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint8_t *)memory_region->constituents[0].address;
/* Donate memory to next VM. */
- spci_memory_donate(send_buf, SERVICE_VM1, spci_msg_send_receiver(ret),
- memory_region->constituents, memory_region->count,
- 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_donate_init(
+ send_buf, memory_region->constituents, memory_region->count, 0);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret), SERVICE_VM1,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Ensure that we are unable to modify memory any more. */
ptr[0] = 'c';
@@ -284,29 +299,37 @@
*/
TEST_SERVICE(spci_donate_twice)
{
+ uint32_t msg_size;
struct spci_value ret = spci_msg_wait();
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
spci_get_donated_memory_region(recv_buf);
struct spci_memory_region_constituent constituent =
memory_region->constituents[0];
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
/* Yield to allow attempt to re donate from primary. */
spci_yield();
/* Give the memory back and notify the sender. */
- spci_memory_donate(send_buf, HF_PRIMARY_VM_ID, SERVICE_VM0,
- &constituent, memory_region->count, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_donate_init(send_buf, &constituent,
+ memory_region->count, 0);
+ EXPECT_EQ(spci_msg_send(SERVICE_VM0, HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Attempt to donate the memory to another VM. */
- spci_memory_donate(send_buf, SERVICE_VM1, spci_msg_send_receiver(ret),
- &constituent, memory_region->count, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_donate_init(send_buf, &constituent,
+ memory_region->count, 0);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(spci_msg_send_receiver(ret), SERVICE_VM1,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
spci_yield();
}
@@ -320,11 +343,13 @@
for (;;) {
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
struct spci_memory_region *memory_region =
spci_get_donated_memory_region(recv_buf);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -342,26 +367,31 @@
*/
TEST_SERVICE(spci_donate_invalid_source)
{
+ uint32_t msg_size;
struct spci_value ret = spci_msg_wait();
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
spci_get_donated_memory_region(recv_buf);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
/* Give the memory back and notify the sender. */
- spci_memory_donate(
- send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
- memory_region->constituents, memory_region->count, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_donate_init(
+ send_buf, memory_region->constituents, memory_region->count, 0);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Fail to donate the memory from the primary to VM1. */
- spci_memory_donate(send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID,
- memory_region->constituents, memory_region->count,
- 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_donate_init(
+ send_buf, memory_region->constituents, memory_region->count, 0);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
spci_yield();
}
@@ -371,15 +401,18 @@
for (;;) {
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
+ uint32_t msg_size;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
(struct spci_memory_region *)(spci_get_lend_descriptor(
recv_buf)
->payload);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY);
ptr = (uint8_t *)memory_region->constituents[0].address;
/* Relevant information read, mailbox can be cleared. */
hf_mailbox_clear();
@@ -391,10 +424,11 @@
hf_mailbox_clear();
/* Give the memory back and notify the sender. */
- spci_memory_relinquish(
- send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
- memory_region->constituents, memory_region->count, 0);
- spci_msg_send(0);
+ msg_size = spci_memory_relinquish_init(
+ send_buf, memory_region->constituents,
+ memory_region->count, 0);
+ spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY);
/*
* Try and access the memory which will cause a fault unless the
@@ -412,13 +446,16 @@
for (;;) {
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
+ uint32_t msg_size;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
spci_get_donated_memory_region(recv_buf);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -428,10 +465,13 @@
ptr[i]++;
}
/* Give the memory back and notify the sender. */
- spci_memory_relinquish(
- send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
- memory_region->constituents, memory_region->count, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_relinquish_init(
+ send_buf, memory_region->constituents,
+ memory_region->count, 0);
+ EXPECT_SPCI_ERROR(spci_msg_send(spci_msg_send_receiver(ret),
+ HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
/* Ensure we still have access to the memory. */
ptr[0] = 123;
@@ -445,35 +485,43 @@
*/
TEST_SERVICE(spci_lend_invalid_source)
{
+ uint32_t msg_size;
struct spci_value ret = spci_msg_wait();
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
->payload);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
/* Attempt to relinquish from primary VM. */
- spci_memory_relinquish(send_buf, spci_msg_send_receiver(ret),
- HF_PRIMARY_VM_ID, memory_region->constituents,
- memory_region->count, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_relinquish_init(
+ send_buf, memory_region->constituents, memory_region->count, 0);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
/* Give the memory back and notify the sender. */
- spci_memory_relinquish(
- send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
- memory_region->constituents, memory_region->count, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_relinquish_init(
+ send_buf, memory_region->constituents, memory_region->count, 0);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
/* Ensure we cannot lend from the primary to another secondary. */
- spci_memory_lend(send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID,
- memory_region->constituents, memory_region->count, 0,
- SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_lend_init(
+ send_buf, memory_region->constituents, memory_region->count, 0,
+ SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+ SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
spci_yield();
}
@@ -485,15 +533,18 @@
for (;;) {
struct spci_value ret = spci_msg_wait();
uint64_t *ptr;
+ uint32_t msg_size;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
(struct spci_memory_region *)(spci_get_lend_descriptor(
recv_buf)
->payload);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint64_t *)memory_region->constituents[0].address;
@@ -506,10 +557,14 @@
__asm__ volatile("blr %0" ::"r"(ptr));
/* Release the memory again. */
- spci_memory_relinquish(
- send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
- memory_region->constituents, memory_region->count, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_relinquish_init(
+ send_buf, memory_region->constituents,
+ memory_region->count, 0);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
+ HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
}
}
@@ -521,15 +576,18 @@
for (;;) {
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
+ uint32_t msg_size;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
(struct spci_memory_region *)(spci_get_lend_descriptor(
recv_buf)
->payload);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -547,10 +605,14 @@
ptr[i]++;
}
- spci_memory_relinquish(
- send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
- memory_region->constituents, memory_region->count, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_relinquish_init(
+ send_buf, memory_region->constituents,
+ memory_region->count, 0);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
+ HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
}
}
@@ -562,12 +624,13 @@
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
struct spci_memory_region *memory_region =
(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
->payload);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -584,12 +647,13 @@
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
struct spci_memory_region *memory_region =
(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
->payload);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -602,14 +666,16 @@
{
struct spci_value ret = spci_msg_wait();
uint8_t *ptr;
+ uint32_t msg_size;
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region *memory_region =
(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
->payload);
EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
hf_mailbox_clear();
ptr = (uint8_t *)memory_region->constituents[0].address;
@@ -631,16 +697,21 @@
memory_region->constituents[0].address = (uint64_t)ptr + i;
/* Fail to lend the memory back to the primary. */
- spci_memory_lend(
- send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID,
- memory_region->constituents, memory_region->count, 0,
- SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
- SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_lend_init(
+ send_buf, memory_region->constituents,
+ memory_region->count, 0, SPCI_LEND_RW_X,
+ SPCI_LEND_NORMAL_MEM, SPCI_LEND_CACHE_WRITE_BACK,
+ SPCI_LEND_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
}
- spci_memory_relinquish(
- send_buf, HF_PRIMARY_VM_ID, spci_msg_send_receiver(ret),
- memory_region->constituents, memory_region->count, 0);
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ msg_size = spci_memory_relinquish_init(
+ send_buf, memory_region->constituents, memory_region->count, 0);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
}
diff --git a/test/vmapi/primary_with_secondaries/services/receive_block.c b/test/vmapi/primary_with_secondaries/services/receive_block.c
index 666754f..c8c4de2 100644
--- a/test/vmapi/primary_with_secondaries/services/receive_block.c
+++ b/test/vmapi/primary_with_secondaries/services/receive_block.c
@@ -24,6 +24,7 @@
#include "hftest.h"
#include "primary_with_secondary.h"
+#include "util.h"
/*
* Secondary VM that enables an interrupt, disables interrupts globally, and
@@ -47,14 +48,11 @@
for (i = 0; i < 10; ++i) {
struct spci_value res = spci_msg_wait();
- EXPECT_EQ(res.func, SPCI_ERROR_32);
- EXPECT_EQ(res.arg1, SPCI_INTERRUPTED);
+ EXPECT_SPCI_ERROR(res, SPCI_INTERRUPTED);
}
- memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, message,
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, message,
sizeof(message));
- spci_message_init(SERVICE_SEND_BUFFER(), sizeof(message),
- HF_PRIMARY_VM_ID, hf_vm_get_id());
- spci_msg_send(0);
+ spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(message), 0);
}
diff --git a/test/vmapi/primary_with_secondaries/services/relay.c b/test/vmapi/primary_with_secondaries/services/relay.c
index 1003699..61e26b9 100644
--- a/test/vmapi/primary_with_secondaries/services/relay.c
+++ b/test/vmapi/primary_with_secondaries/services/relay.c
@@ -40,23 +40,21 @@
ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
/* Prepare to relay the message. */
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
- struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
ASSERT_GE(spci_msg_send_size(ret), sizeof(spci_vm_id_t));
- chain = (spci_vm_id_t *)recv_buf->payload;
+ chain = (spci_vm_id_t *)recv_buf;
next_vm_id = le16toh(*chain);
next_message = chain + 1;
next_message_size =
spci_msg_send_size(ret) - sizeof(spci_vm_id_t);
/* Send the message to the next stage. */
- memcpy_s(send_buf->payload, SPCI_MSG_PAYLOAD_MAX, next_message,
+ memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX, next_message,
next_message_size);
- spci_message_init(send_buf, next_message_size, next_vm_id,
- hf_vm_get_id());
hf_mailbox_clear();
- spci_msg_send(0);
+ spci_msg_send(hf_vm_get_id(), next_vm_id, next_message_size, 0);
}
}
diff --git a/test/vmapi/primary_with_secondaries/services/smp.c b/test/vmapi/primary_with_secondaries/services/smp.c
index 5ad31d0..cd8fe9e 100644
--- a/test/vmapi/primary_with_secondaries/services/smp.c
+++ b/test/vmapi/primary_with_secondaries/services/smp.c
@@ -41,12 +41,10 @@
/** Send a message back to the primary. */
void send_message(const char *message, uint32_t size)
{
- memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, message,
- size);
- spci_message_init(SERVICE_SEND_BUFFER(), size, HF_PRIMARY_VM_ID,
- hf_vm_get_id());
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, message, size);
- ASSERT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ ASSERT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, size, 0).func,
+ SPCI_SUCCESS_32);
}
/**
diff --git a/test/vmapi/primary_with_secondaries/services/spci_check.c b/test/vmapi/primary_with_secondaries/services/spci_check.c
index 3ce9a1f..0cb1cc0 100644
--- a/test/vmapi/primary_with_secondaries/services/spci_check.c
+++ b/test/vmapi/primary_with_secondaries/services/spci_check.c
@@ -21,24 +21,12 @@
#include "hftest.h"
#include "primary_with_secondary.h"
+#include "util.h"
TEST_SERVICE(spci_check)
{
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
const char message[] = "spci_msg_send";
- struct spci_message expected_message = {
- .flags = SPCI_MESSAGE_IMPDEF_MASK,
- .length = sizeof(message),
- .target_vm_id = hf_vm_get_id(),
- .source_vm_id = HF_PRIMARY_VM_ID,
-
- /*
- * TODO: Padding fields may be set to MBZ in the next SPCI spec
- * versions.
- */
- .reserved_1 = 0,
- .reserved_2 = 0,
- };
/* Wait for single message to be sent by the primary VM. */
struct spci_value ret = spci_msg_wait();
@@ -49,18 +37,16 @@
EXPECT_EQ(spci_msg_send_size(ret), sizeof(message));
EXPECT_EQ(spci_msg_send_receiver(ret), hf_vm_get_id());
EXPECT_EQ(spci_msg_send_sender(ret), HF_PRIMARY_VM_ID);
- EXPECT_EQ(memcmp(recv_buf, &expected_message, sizeof(expected_message)),
- 0);
/* Ensure that the payload was correctly transmitted. */
- EXPECT_EQ(memcmp(recv_buf->payload, message, sizeof(message)), 0);
+ EXPECT_EQ(memcmp(recv_buf, message, sizeof(message)), 0);
spci_yield();
}
TEST_SERVICE(spci_length)
{
- struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
const char message[] = "this should be truncated";
/* Wait for single message to be sent by the primary VM. */
@@ -72,9 +58,8 @@
EXPECT_EQ(16, spci_msg_send_size(ret));
/* Check only part of the message is sent correctly. */
- EXPECT_NE(memcmp(recv_buf->payload, message, sizeof(message)), 0);
- EXPECT_EQ(memcmp(recv_buf->payload, message, spci_msg_send_size(ret)),
- 0);
+ EXPECT_NE(memcmp(recv_buf, message, sizeof(message)), 0);
+ EXPECT_EQ(memcmp(recv_buf, message, spci_msg_send_size(ret)), 0);
spci_yield();
}
@@ -84,8 +69,7 @@
/* Wait for single message to be sent by the primary VM. */
struct spci_value ret = spci_msg_poll();
- EXPECT_EQ(ret.func, SPCI_ERROR_32);
- EXPECT_EQ(ret.arg1, SPCI_RETRY);
+ EXPECT_SPCI_ERROR(ret, SPCI_RETRY);
spci_yield();
}
diff --git a/test/vmapi/primary_with_secondaries/services/wfi.c b/test/vmapi/primary_with_secondaries/services/wfi.c
index 6935107..7dbd372 100644
--- a/test/vmapi/primary_with_secondaries/services/wfi.c
+++ b/test/vmapi/primary_with_secondaries/services/wfi.c
@@ -48,10 +48,8 @@
interrupt_wait();
}
- memcpy_s(SERVICE_SEND_BUFFER()->payload, SPCI_MSG_PAYLOAD_MAX, message,
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, message,
sizeof(message));
- spci_message_init(SERVICE_SEND_BUFFER(), sizeof(message),
- HF_PRIMARY_VM_ID, hf_vm_get_id());
- spci_msg_send(0);
+ spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(message), 0);
}
diff --git a/test/vmapi/primary_with_secondaries/smp.c b/test/vmapi/primary_with_secondaries/smp.c
index de91d78..af5c637 100644
--- a/test/vmapi/primary_with_secondaries/smp.c
+++ b/test/vmapi/primary_with_secondaries/smp.c
@@ -48,7 +48,7 @@
run_res = hf_vcpu_run(SERVICE_VM2, 1);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response_1));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response_1,
+ EXPECT_EQ(memcmp(mb.recv, expected_response_1,
sizeof(expected_response_1)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
@@ -58,7 +58,7 @@
run_res = hf_vcpu_run(SERVICE_VM2, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
EXPECT_EQ(run_res.message.size, sizeof(expected_response_0));
- EXPECT_EQ(memcmp(mb.recv->payload, expected_response_0,
+ EXPECT_EQ(memcmp(mb.recv, expected_response_0,
sizeof(expected_response_0)),
0);
EXPECT_EQ(hf_mailbox_clear(), 0);
diff --git a/test/vmapi/primary_with_secondaries/spci.c b/test/vmapi/primary_with_secondaries/spci.c
index ac54da7..f551830 100644
--- a/test/vmapi/primary_with_secondaries/spci.c
+++ b/test/vmapi/primary_with_secondaries/spci.c
@@ -39,11 +39,11 @@
SERVICE_SELECT(SERVICE_VM0, "spci_check", mb.send);
/* Set the payload, init the message header and send the message. */
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0,
- HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), 0);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
@@ -60,10 +60,10 @@
SERVICE_SELECT(SERVICE_VM0, "spci_check", mb.send);
/* Set the payload, init the message header and send the message. */
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), SERVICE_VM0, SERVICE_VM1);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(SERVICE_VM1, SERVICE_VM0, sizeof(message), 0),
+ SPCI_INVALID_PARAMETERS);
}
/**
@@ -76,10 +76,10 @@
SERVICE_SELECT(SERVICE_VM0, "spci_check", mb.send);
/* Set the payload, init the message header and send the message. */
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
- spci_message_init(mb.send, sizeof(message), -1, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, -1, sizeof(message), 0),
+ SPCI_INVALID_PARAMETERS);
}
/**
@@ -94,12 +94,10 @@
SERVICE_SELECT(SERVICE_VM0, "spci_length", mb.send);
/* Send the message and compare if truncated. */
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
/* Hard code incorrect length. */
- spci_message_init(mb.send, 16, SERVICE_VM0, HF_PRIMARY_VM_ID);
-
- EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 16, 0).func,
+ SPCI_SUCCESS_32);
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
}
@@ -112,11 +110,11 @@
const char message[] = "fail to send";
struct mailbox_buffers mb = set_up_mailbox();
- memcpy_s(mb.send->payload, SPCI_MSG_PAYLOAD_MAX, message,
- sizeof(message));
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
/* Send a message that is larger than the mailbox supports (4KB). */
- spci_message_init(mb.send, 4 * 1024, SERVICE_VM0, HF_PRIMARY_VM_ID);
- EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, 4 * 1024 + 1, 0),
+ SPCI_INVALID_PARAMETERS);
}
/**
diff --git a/test/vmapi/primary_with_secondaries/util.c b/test/vmapi/primary_with_secondaries/util.c
index 082daf6..419ec6c 100644
--- a/test/vmapi/primary_with_secondaries/util.c
+++ b/test/vmapi/primary_with_secondaries/util.c
@@ -36,7 +36,7 @@
{
ASSERT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
return (struct mailbox_buffers){
- .send = ((struct spci_message *)send_page),
- .recv = ((struct spci_message *)recv_page),
+ .send = send_page,
+ .recv = recv_page,
};
}