Update SPCI memory sharing to match latest FF-A spec 1.0 EAC.
SPCI is now called PSA FF-A. Symbols will be renamed in a later change.
Disabled checkpatch SPACING because it disagrees with Clang format.
Bug: 132420445
Change-Id: I41c6cc7ddad136ed7c4797dfa1204718a66ddfce
diff --git a/Makefile b/Makefile
index 3c2bded..ebea697 100644
--- a/Makefile
+++ b/Makefile
@@ -43,7 +43,7 @@
CHECKPATCH := $(CURDIR)/third_party/linux/scripts/checkpatch.pl \
- --ignore BRACES,SPDX_LICENSE_TAG,VOLATILE,SPLIT_STRING,AVOID_EXTERNS,USE_SPINLOCK_T,NEW_TYPEDEFS,INITIALISED_STATIC,FILE_PATH_CHANGES,EMBEDDED_FUNCTION_NAME,SINGLE_STATEMENT_DO_WHILE_MACRO,MACRO_WITH_FLOW_CONTROL,PREFER_PACKED,PREFER_ALIGNED,INDENTED_LABEL --quiet
+ --ignore BRACES,SPDX_LICENSE_TAG,VOLATILE,SPLIT_STRING,AVOID_EXTERNS,USE_SPINLOCK_T,NEW_TYPEDEFS,INITIALISED_STATIC,FILE_PATH_CHANGES,EMBEDDED_FUNCTION_NAME,SINGLE_STATEMENT_DO_WHILE_MACRO,MACRO_WITH_FLOW_CONTROL,PREFER_PACKED,PREFER_ALIGNED,INDENTED_LABEL,SPACING --quiet
# Specifies the grep pattern for ignoring specific files in checkpatch.
# C++ headers, *.hh, are automatically excluded.
diff --git a/inc/hf/api.h b/inc/hf/api.h
index ebc533b..07b9061 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -60,14 +60,16 @@
struct spci_value api_spci_features(uint32_t function_id);
struct spci_value api_spci_run(spci_vm_id_t vm_id, spci_vcpu_index_t vcpu_idx,
const struct vcpu *current, struct vcpu **next);
-struct spci_value api_spci_mem_send(uint32_t share_func, ipaddr_t address,
- uint32_t page_count,
- uint32_t fragment_length, uint32_t length,
- spci_cookie_t cookie, struct vcpu *current,
+struct spci_value api_spci_mem_send(uint32_t share_func, uint32_t length,
+ uint32_t fragment_length, ipaddr_t address,
+ uint32_t page_count, struct vcpu *current,
struct vcpu **next);
-struct spci_value api_spci_mem_retrieve_req(
- ipaddr_t address, uint32_t page_count, uint32_t fragment_length,
- uint32_t length, spci_cookie_t cookie, struct vcpu *current);
+struct spci_value api_spci_mem_retrieve_req(uint32_t length,
+ uint32_t fragment_length,
+ ipaddr_t address,
+ uint32_t page_count,
+ struct vcpu *current);
struct spci_value api_spci_mem_relinquish(struct vcpu *current);
-struct spci_value api_spci_mem_reclaim(uint32_t handle, uint32_t flags,
+struct spci_value api_spci_mem_reclaim(spci_memory_handle_t handle,
+ spci_memory_region_flags_t flags,
struct vcpu *current);
diff --git a/inc/hf/spci_memory.h b/inc/hf/spci_memory.h
index 023a8ce..6956102 100644
--- a/inc/hf/spci_memory.h
+++ b/inc/hf/spci_memory.h
@@ -27,8 +27,7 @@
uint32_t share_func,
struct mpool *page_pool);
struct spci_value spci_memory_retrieve(
- struct vm_locked to_locked,
- struct spci_memory_retrieve_request *retrieve_request,
+ struct vm_locked to_locked, struct spci_memory_region *retrieve_request,
uint32_t retrieve_request_size, struct mpool *page_pool);
struct spci_value spci_memory_relinquish(
struct vm_locked from_locked,
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index d4119d6..1fa31cf 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -131,44 +131,36 @@
.arg4 = attributes});
}
-static inline struct spci_value spci_mem_donate(uint32_t fragment_length,
- uint32_t length,
- spci_cookie_t cookie)
+static inline struct spci_value spci_mem_donate(uint32_t length,
+ uint32_t fragment_length)
{
return spci_call((struct spci_value){.func = SPCI_MEM_DONATE_32,
- .arg3 = fragment_length,
- .arg4 = length,
- .arg5 = cookie});
+ .arg1 = length,
+ .arg2 = fragment_length});
}
-static inline struct spci_value spci_mem_lend(uint32_t fragment_length,
- uint32_t length,
- spci_cookie_t cookie)
+static inline struct spci_value spci_mem_lend(uint32_t length,
+ uint32_t fragment_length)
{
return spci_call((struct spci_value){.func = SPCI_MEM_LEND_32,
- .arg3 = fragment_length,
- .arg4 = length,
- .arg5 = cookie});
+ .arg1 = length,
+ .arg2 = fragment_length});
}
-static inline struct spci_value spci_mem_share(uint32_t fragment_length,
- uint32_t length,
- spci_cookie_t cookie)
+static inline struct spci_value spci_mem_share(uint32_t length,
+ uint32_t fragment_length)
{
return spci_call((struct spci_value){.func = SPCI_MEM_SHARE_32,
- .arg3 = fragment_length,
- .arg4 = length,
- .arg5 = cookie});
+ .arg1 = length,
+ .arg2 = fragment_length});
}
-static inline struct spci_value spci_mem_retrieve_req(uint32_t fragment_length,
- uint32_t length,
- spci_cookie_t cookie)
+static inline struct spci_value spci_mem_retrieve_req(uint32_t length,
+ uint32_t fragment_length)
{
return spci_call((struct spci_value){.func = SPCI_MEM_RETRIEVE_REQ_32,
- .arg3 = fragment_length,
- .arg4 = length,
- .arg5 = cookie});
+ .arg1 = length,
+ .arg2 = fragment_length});
}
static inline struct spci_value spci_mem_relinquish(void)
@@ -176,11 +168,13 @@
return spci_call((struct spci_value){.func = SPCI_MEM_RELINQUISH_32});
}
-static inline struct spci_value spci_mem_reclaim(uint32_t handle,
- uint32_t flags)
+static inline struct spci_value spci_mem_reclaim(
+ spci_memory_handle_t handle, spci_memory_region_flags_t flags)
{
- return spci_call((struct spci_value){
- .func = SPCI_MEM_RECLAIM_32, .arg1 = handle, .arg2 = flags});
+ return spci_call((struct spci_value){.func = SPCI_MEM_RECLAIM_32,
+ .arg1 = (uint32_t)handle,
+ .arg2 = (uint32_t)(handle >> 32),
+ .arg3 = flags});
}
/**
diff --git a/inc/vmapi/hf/spci.h b/inc/vmapi/hf/spci.h
index 9c10acf..887b745 100644
--- a/inc/vmapi/hf/spci.h
+++ b/inc/vmapi/hf/spci.h
@@ -62,9 +62,11 @@
#define SPCI_RETRY INT32_C(-7)
#define SPCI_ABORTED INT32_C(-8)
+/* clang-format on */
+
/* SPCI function specific constants. */
-#define SPCI_MSG_RECV_BLOCK 0x1
-#define SPCI_MSG_RECV_BLOCK_MASK 0x1
+#define SPCI_MSG_RECV_BLOCK 0x1
+#define SPCI_MSG_RECV_BLOCK_MASK 0x1
#define SPCI_MSG_SEND_NOTIFY 0x1
#define SPCI_MSG_SEND_NOTIFY_MASK 0x1
@@ -83,14 +85,22 @@
/* The maximum length possible for a single message. */
#define SPCI_MSG_PAYLOAD_MAX HF_MAILBOX_SIZE
-enum spci_memory_access {
- SPCI_MEMORY_RO_NX,
- SPCI_MEMORY_RO_X,
- SPCI_MEMORY_RW_NX,
- SPCI_MEMORY_RW_X,
+enum spci_data_access {
+ SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_DATA_ACCESS_RO,
+ SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RESERVED,
+};
+
+enum spci_instruction_access {
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX,
+ SPCI_INSTRUCTION_ACCESS_X,
+ SPCI_INSTRUCTION_ACCESS_RESERVED,
};
enum spci_memory_type {
+ SPCI_MEMORY_NOT_SPECIFIED_MEM,
SPCI_MEMORY_DEVICE_MEM,
SPCI_MEMORY_NORMAL_MEM,
};
@@ -98,8 +108,8 @@
enum spci_memory_cacheability {
SPCI_MEMORY_CACHE_RESERVED = 0x0,
SPCI_MEMORY_CACHE_NON_CACHEABLE = 0x1,
- SPCI_MEMORY_CACHE_WRITE_THROUGH = 0x2,
- SPCI_MEMORY_CACHE_WRITE_BACK = 0x4,
+ SPCI_MEMORY_CACHE_RESERVED_1 = 0x2,
+ SPCI_MEMORY_CACHE_WRITE_BACK = 0x3,
SPCI_MEMORY_DEV_NGNRNE = 0x0,
SPCI_MEMORY_DEV_NGNRE = 0x1,
SPCI_MEMORY_DEV_NGRE = 0x2,
@@ -108,64 +118,83 @@
enum spci_memory_shareability {
SPCI_MEMORY_SHARE_NON_SHAREABLE,
- SPCI_MEMORY_RESERVED,
+ SPCI_MEMORY_SHARE_RESERVED,
SPCI_MEMORY_OUTER_SHAREABLE,
SPCI_MEMORY_INNER_SHAREABLE,
};
-#define SPCI_MEMORY_ACCESS_OFFSET (0x5U)
-#define SPCI_MEMORY_ACCESS_MASK ((0x3U) << SPCI_MEMORY_ACCESS_OFFSET)
+typedef uint8_t spci_memory_access_permissions_t;
+
+/**
+ * This corresponds to table 44 of the FF-A 1.0 EAC specification, "Memory
+ * region attributes descriptor".
+ */
+typedef uint8_t spci_memory_attributes_t;
+
+#define SPCI_DATA_ACCESS_OFFSET (0x0U)
+#define SPCI_DATA_ACCESS_MASK ((0x3U) << SPCI_DATA_ACCESS_OFFSET)
+
+#define SPCI_INSTRUCTION_ACCESS_OFFSET (0x2U)
+#define SPCI_INSTRUCTION_ACCESS_MASK ((0x3U) << SPCI_INSTRUCTION_ACCESS_OFFSET)
#define SPCI_MEMORY_TYPE_OFFSET (0x4U)
-#define SPCI_MEMORY_TYPE_MASK ((0x1U) << SPCI_MEMORY_TYPE_OFFSET)
+#define SPCI_MEMORY_TYPE_MASK ((0x3U) << SPCI_MEMORY_TYPE_OFFSET)
#define SPCI_MEMORY_CACHEABILITY_OFFSET (0x2U)
-#define SPCI_MEMORY_CACHEABILITY_MASK ((0x3U) <<\
- SPCI_MEMORY_CACHEABILITY_OFFSET)
+#define SPCI_MEMORY_CACHEABILITY_MASK \
+ ((0x3U) << SPCI_MEMORY_CACHEABILITY_OFFSET)
#define SPCI_MEMORY_SHAREABILITY_OFFSET (0x0U)
-#define SPCI_MEMORY_SHAREABILITY_MASK ((0x3U) <<\
- SPCI_MEMORY_SHAREABILITY_OFFSET)
+#define SPCI_MEMORY_SHAREABILITY_MASK \
+ ((0x3U) << SPCI_MEMORY_SHAREABILITY_OFFSET)
-#define LEND_ATTR_FUNCTION_SET(name, offset, mask) \
-static inline void spci_set_memory_##name##_attr(uint16_t *attr,\
- const enum spci_memory_##name perm)\
-{\
- *attr = (*attr & ~(mask)) | ((perm << offset) & mask);\
-}
+#define ATTR_FUNCTION_SET(name, container_type, offset, mask) \
+ static inline void spci_set_##name##_attr(container_type *attr, \
+ const enum spci_##name perm) \
+ { \
+ *attr = (*attr & ~(mask)) | ((perm << offset) & mask); \
+ }
-#define LEND_ATTR_FUNCTION_GET(name, offset, mask) \
-static inline enum spci_memory_##name spci_get_memory_##name##_attr(\
- uint16_t attr)\
-{\
- return (enum spci_memory_##name)((attr & mask) >> offset);\
-}
+#define ATTR_FUNCTION_GET(name, container_type, offset, mask) \
+ static inline enum spci_##name spci_get_##name##_attr( \
+ container_type attr) \
+ { \
+ return (enum spci_##name)((attr & mask) >> offset); \
+ }
-LEND_ATTR_FUNCTION_SET(access, SPCI_MEMORY_ACCESS_OFFSET,
- SPCI_MEMORY_ACCESS_MASK)
-LEND_ATTR_FUNCTION_GET(access, SPCI_MEMORY_ACCESS_OFFSET,
- SPCI_MEMORY_ACCESS_MASK)
+ATTR_FUNCTION_SET(data_access, spci_memory_access_permissions_t,
+ SPCI_DATA_ACCESS_OFFSET, SPCI_DATA_ACCESS_MASK)
+ATTR_FUNCTION_GET(data_access, spci_memory_access_permissions_t,
+ SPCI_DATA_ACCESS_OFFSET, SPCI_DATA_ACCESS_MASK)
-LEND_ATTR_FUNCTION_SET(type, SPCI_MEMORY_TYPE_OFFSET, SPCI_MEMORY_TYPE_MASK)
-LEND_ATTR_FUNCTION_GET(type, SPCI_MEMORY_TYPE_OFFSET, SPCI_MEMORY_TYPE_MASK)
+ATTR_FUNCTION_SET(instruction_access, spci_memory_access_permissions_t,
+ SPCI_INSTRUCTION_ACCESS_OFFSET, SPCI_INSTRUCTION_ACCESS_MASK)
+ATTR_FUNCTION_GET(instruction_access, spci_memory_access_permissions_t,
+ SPCI_INSTRUCTION_ACCESS_OFFSET, SPCI_INSTRUCTION_ACCESS_MASK)
-LEND_ATTR_FUNCTION_SET(cacheability, SPCI_MEMORY_CACHEABILITY_OFFSET,
- SPCI_MEMORY_CACHEABILITY_MASK)
+ATTR_FUNCTION_SET(memory_type, spci_memory_attributes_t,
+ SPCI_MEMORY_TYPE_OFFSET, SPCI_MEMORY_TYPE_MASK)
+ATTR_FUNCTION_GET(memory_type, spci_memory_attributes_t,
+ SPCI_MEMORY_TYPE_OFFSET, SPCI_MEMORY_TYPE_MASK)
-LEND_ATTR_FUNCTION_GET(cacheability, SPCI_MEMORY_CACHEABILITY_OFFSET,
- SPCI_MEMORY_CACHEABILITY_MASK)
+ATTR_FUNCTION_SET(memory_cacheability, spci_memory_attributes_t,
+ SPCI_MEMORY_CACHEABILITY_OFFSET,
+ SPCI_MEMORY_CACHEABILITY_MASK)
+ATTR_FUNCTION_GET(memory_cacheability, spci_memory_attributes_t,
+ SPCI_MEMORY_CACHEABILITY_OFFSET,
+ SPCI_MEMORY_CACHEABILITY_MASK)
-LEND_ATTR_FUNCTION_SET(shareability, SPCI_MEMORY_SHAREABILITY_OFFSET,
- SPCI_MEMORY_SHAREABILITY_MASK)
+ATTR_FUNCTION_SET(memory_shareability, spci_memory_attributes_t,
+ SPCI_MEMORY_SHAREABILITY_OFFSET,
+ SPCI_MEMORY_SHAREABILITY_MASK)
+ATTR_FUNCTION_GET(memory_shareability, spci_memory_attributes_t,
+ SPCI_MEMORY_SHAREABILITY_OFFSET,
+ SPCI_MEMORY_SHAREABILITY_MASK)
-LEND_ATTR_FUNCTION_GET(shareability, SPCI_MEMORY_SHAREABILITY_OFFSET,
- SPCI_MEMORY_SHAREABILITY_MASK)
-
-/* clang-format on */
-
-#define SPCI_MEMORY_HANDLE_ALLOCATOR_MASK ((spci_memory_handle_t)(1U << 31))
+#define SPCI_MEMORY_HANDLE_ALLOCATOR_MASK \
+ ((spci_memory_handle_t)(UINT64_C(1) << 63))
#define SPCI_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR \
- ((spci_memory_handle_t)(1U << 31))
+ ((spci_memory_handle_t)(UINT64_C(1) << 63))
/** The ID of a VM. These are assigned sequentially starting with an offset. */
typedef uint16_t spci_vm_id_t;
@@ -174,13 +203,7 @@
* A globally-unique ID assigned by the hypervisor for a region of memory being
* sent between VMs.
*/
-typedef uint32_t spci_memory_handle_t;
-
-/**
- * A unique-per-VM ID used to associate fragments of a memory sharing message,
- * assigned by the sender of the message.
- */
-typedef uint32_t spci_cookie_t;
+typedef uint64_t spci_memory_handle_t;
/**
* A count of VMs. This has the same range as the VM IDs but we give it a
@@ -251,158 +274,28 @@
return ((uint32_t)vm_id << 16) | vcpu_index;
}
+/**
+ * A set of contiguous pages which is part of a memory region. This corresponds
+ * to table 40 of the FF-A 1.0 EAC specification, "Constituent memory region
+ * descriptor".
+ */
struct spci_memory_region_constituent {
/**
* The base IPA of the constituent memory region, aligned to 4 kiB page
* size granularity.
*/
- uint32_t address_low;
- uint32_t address_high;
+ uint64_t address;
/** The number of 4 kiB pages in the constituent memory region. */
uint32_t page_count;
/** Reserved field, must be 0. */
uint32_t reserved;
};
-struct spci_memory_region_attributes {
- /** The ID of the VM to which the memory is being given or shared. */
- spci_vm_id_t receiver;
- /**
- * The attributes with which the memory region should be mapped in the
- * receiver's page table.
- */
- uint16_t memory_attributes;
- /** Reserved field, must be 0. */
- uint32_t reserved_0;
- /** Reserved field, must be 0. */
- uint64_t reserved_1;
-};
-
-/** Flags to control the behaviour of a memory sharing transaction. */
-typedef uint32_t spci_memory_region_flags_t;
-
/**
- * Clear memory region contents after unmapping it from the sender and before
- * mapping it for any receiver.
+ * A set of pages comprising a memory region. This corresponds to table 39 of
+ * the FF-A 1.0 EAC specification, "Composite memory region descriptor".
*/
-#define SPCI_MEMORY_REGION_FLAG_CLEAR 0x1
-
-struct spci_memory_region {
- /**
- * An implementation defined value associated with the receiver and the
- * memory region.
- */
- uint32_t tag;
- /** Flags to control behaviour of the transaction. */
- spci_memory_region_flags_t flags;
- /** Sender VM ID. */
- spci_vm_id_t sender;
- /** Reserved field, must be 0. */
- uint16_t reserved_0;
- /**
- * The total number of 4 kiB pages included in this memory region. This
- * must be equal to the sum of page counts specified in each
- * `spci_memory_region_constituent`.
- */
- uint32_t page_count;
- /**
- * The number of constituents (`spci_memory_region_constituent`)
- * included in this memory region.
- */
- uint32_t constituent_count;
- /**
- * The offset in bytes from the base address of this
- * `spci_memory_region` to the start of the first
- * `spci_memory_region_constituent`.
- */
- uint32_t constituent_offset;
- /**
- * The number of `spci_memory_region_attributes` entries included in
- * this memory region.
- */
- uint32_t attribute_count;
- /** Reserved field, must be 0. */
- uint32_t reserved_1;
- /**
- * An array of `attribute_count` memory region attribute descriptors.
- * Each one specifies an endpoint and the attributes with which this
- * memory region should be mapped in that endpoint's page table.
- */
- struct spci_memory_region_attributes attributes[];
-};
-
-/**
- * Retrieval attributes for a single receiver. This corresponds to table 138 of
- * the SPCI beta specification, "Descriptor with properties to retrieve a memory
- * region".
- */
-struct spci_memory_retrieve_properties {
- struct spci_memory_region_attributes attributes;
- uint32_t page_count;
- uint32_t constituent_count;
- /** Reserved field, must be 0. */
- uint32_t reserved;
- struct spci_memory_region_constituent constituents[];
-};
-
-/**
- * Descriptor used for SPCI_MEM_RETRIEVE_REQ. This corresponds to table 137 of
- * the SPCI beta specification, "Descriptor to retrieve a donated, lent or
- * shared memory region".
- */
-struct spci_memory_retrieve_request {
- /** Globally unique handle to identify the memory transaction. */
- spci_memory_handle_t handle;
- /** ID of the VM which sent the memory. */
- spci_vm_id_t sender;
- /** Reserved field, must be 0. */
- uint16_t reserved_0;
- /** Function ID of the memory sharing operation. */
- uint32_t share_func;
- /**
- * An implementation defined value associated with the receiver and the
- * memory region.
- */
- uint32_t tag;
- /**
- * The number of descriptors to specify the attributes with which the
- * memory region will be mapped in the other recipients. Hafnium doesn't
- * support multi-way memory sharing so this should always be 0.
- */
- uint32_t attribute_count;
- /**
- * The offset in bytes from the base address of this
- * `spci_memory_retrieve_request` to the start of the first attribute
- * descriptor.
- */
- uint32_t attribute_offset;
- /**
- * The number of `spci_memory_retrieve_properties` entries included
- * in this retrieve request, i.e. the number of receivers including the
- * caller and any stream endpoints for which the caller is a proxy.
- * Hafnium doesn't support stream endpoints, so this should always be 1.
- */
- uint32_t retrieve_properties_count;
- /** Reserved field, must be 0. */
- uint32_t reserved_1;
- /*
- * 'Array' of `struct spci_memory_retrieve_properties` goes here, but it
- * is not included in the struct as the entries are variable length. Use
- * `spci_memory_retrieve_request_first_retrieve_properties` to get the
- * first one.
- */
-};
-
-/**
- * Receiver address range descriptor. This corresponds to table 144 of the SPCI
- * beta specification, "Descriptor with address ranges of retrieved memory
- * region", and is included as part of the `spci_retrieved_memory_region`.
- */
-struct spci_receiver_address_range {
- /** Receiver VM ID. */
- spci_vm_id_t receiver;
- /** Reserved field, must be 0. */
- uint16_t reserved_0;
+struct spci_composite_memory_region {
/**
* The total number of 4 kiB pages included in this memory region. This
* must be equal to the sum of page counts specified in each
@@ -415,101 +308,155 @@
*/
uint32_t constituent_count;
/** Reserved field, must be 0. */
- uint32_t reserved_1;
+ uint64_t reserved_0;
/** An array of `constituent_count` memory region constituents. */
struct spci_memory_region_constituent constituents[];
};
+/** Flags to indicate properties of receivers during memory region retrieval. */
+typedef uint8_t spci_memory_receiver_flags_t;
+
/**
- * Descriptor used for SPCI_MEM_RETRIEVE_RESP. This corresponds to table 143 of
- * the SPCI beta specification, "Encoding of mapped address ranges of retrieved
- * memory region".
+ * This corresponds to table 41 of the FF-A 1.0 EAC specification, "Memory
+ * access permissions descriptor".
*/
-struct spci_retrieved_memory_region {
+struct spci_memory_region_attributes {
+ /** The ID of the VM to which the memory is being given or shared. */
+ spci_vm_id_t receiver;
/**
- * The number of `spci_receiver_address_range` entries included
- * in this memory region.
+ * The permissions with which the memory region should be mapped in the
+ * receiver's page table.
+ */
+ spci_memory_access_permissions_t permissions;
+ /**
+ * Flags used during SPCI_MEM_RETRIEVE_REQ and SPCI_MEM_RETRIEVE_RESP
+ * for memory regions with multiple borrowers.
+ */
+ spci_memory_receiver_flags_t flags;
+};
+
+/** Flags to control the behaviour of a memory sharing transaction. */
+typedef uint32_t spci_memory_region_flags_t;
+
+/**
+ * Clear memory region contents after unmapping it from the sender and before
+ * mapping it for any receiver.
+ */
+#define SPCI_MEMORY_REGION_FLAG_CLEAR 0x1
+
+/**
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define SPCI_MEMORY_REGION_FLAG_TIME_SLICE 0x2
+
+/**
+ * Whether the hypervisor should clear the memory region after the receiver
+ * relinquishes it or is aborted.
+ */
+#define SPCI_MEMORY_REGION_FLAG_CLEAR_RELINQUISH 0x4
+
+#define SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK ((0x3U) << 3)
+#define SPCI_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED ((0x0U) << 3)
+#define SPCI_MEMORY_REGION_TRANSACTION_TYPE_SHARE ((0x1U) << 3)
+#define SPCI_MEMORY_REGION_TRANSACTION_TYPE_LEND ((0x2U) << 3)
+#define SPCI_MEMORY_REGION_TRANSACTION_TYPE_DONATE ((0x3U) << 3)
+
+/**
+ * This corresponds to table 42 of the FF-A 1.0 EAC specification, "Endpoint
+ * memory access descriptor".
+ */
+struct spci_memory_access {
+ struct spci_memory_region_attributes receiver_permissions;
+ /**
+ * Offset in bytes from the start of the outer `spci_memory_region` to
+ * an `spci_composite_memory_region` struct.
+ */
+ uint32_t composite_memory_region_offset;
+ uint64_t reserved_0;
+};
+
+/**
+ * Information about a set of pages which are being shared. This corresponds to
+ * table 45 of the FF-A 1.0 EAC specification, "Lend, donate or share memory
+ * transaction descriptor". Note that it is also used for retrieve requests and
+ * responses.
+ */
+struct spci_memory_region {
+ /**
+ * The ID of the VM which originally sent the memory region, i.e. the
+ * owner.
+ */
+ spci_vm_id_t sender;
+ spci_memory_attributes_t attributes;
+ /** Reserved field, must be 0. */
+ uint8_t reserved_0;
+ /** Flags to control behaviour of the transaction. */
+ spci_memory_region_flags_t flags;
+ spci_memory_handle_t handle;
+ /**
+ * An implementation defined value associated with the receiver and the
+ * memory region.
+ */
+ uint64_t tag;
+ /** Reserved field, must be 0. */
+ uint32_t reserved_1;
+ /**
+ * The number of `spci_memory_access` entries included in this
+ * transaction.
*/
uint32_t receiver_count;
- /** Reserved field, must be 0. */
- uint32_t reserved_0;
- /** Reserved field, must be 0. */
- uint64_t reserved_1;
- /*
- * 'Array' of `struct spci_receiver_address_range` goes here, but it is
- * not included in the struct as the entries are variable length. Use
- * `spci_retrieved_memory_region_first_receiver_range` to get the first
- * one.
+ /**
+ * An array of `attribute_count` endpoint memory access descriptors.
+ * Each one specifies a memory region offset, an endpoint and the
+ * attributes with which this memory region should be mapped in that
+ * endpoint's page table.
*/
+ struct spci_memory_access receivers[];
};
/**
* Descriptor used for SPCI_MEM_RELINQUISH requests. This corresponds to table
- * 146 of the SPCI beta specification, "Descriptor to relinquish a memory
+ * 150 of the FF-A 1.0 EAC specification, "Descriptor to relinquish a memory
* region".
*/
struct spci_mem_relinquish {
spci_memory_handle_t handle;
- uint32_t flags;
- spci_vm_id_t sender;
- uint16_t reserved;
+ spci_memory_region_flags_t flags;
uint32_t endpoint_count;
spci_vm_id_t endpoints[];
};
-static inline struct spci_memory_region_constituent
-spci_memory_region_constituent_init(uint64_t address, uint32_t page_count)
-{
- return (struct spci_memory_region_constituent){
- .address_high = (uint32_t)(address >> 32),
- .address_low = (uint32_t)address,
- .page_count = page_count,
- };
-}
-
-static inline uint64_t spci_memory_region_constituent_get_address(
- struct spci_memory_region_constituent *constituent)
-{
- return (uint64_t)constituent->address_high << 32 |
- constituent->address_low;
-}
-
/**
- * Gets the constituent array for an `spci_memory_region`.
+ * Gets the `spci_composite_memory_region` for the given receiver from an
+ * `spci_memory_region`, or NULL if it is not valid.
*/
-static inline struct spci_memory_region_constituent *
-spci_memory_region_get_constituents(struct spci_memory_region *memory_region)
+static inline struct spci_composite_memory_region *
+spci_memory_region_get_composite(struct spci_memory_region *memory_region,
+ uint32_t receiver_index)
{
- return (struct spci_memory_region_constituent
- *)((uint8_t *)memory_region +
- memory_region->constituent_offset);
+ uint32_t offset = memory_region->receivers[receiver_index]
+ .composite_memory_region_offset;
+
+ if (offset == 0) {
+ return NULL;
+ }
+
+ return (struct spci_composite_memory_region *)((uint8_t *)
+ memory_region +
+ offset);
}
-/**
- * Gets the first descriptor with address ranges of a retrieved memory region.
- *
- * Note that getting the second requires parsing the first, as they are variable
- * length due to the variable number of constituents.
- */
-static inline struct spci_receiver_address_range *
-spci_retrieved_memory_region_first_receiver_range(
- struct spci_retrieved_memory_region *memory_region)
+static inline uint32_t spci_mem_relinquish_init(
+ struct spci_mem_relinquish *relinquish_request,
+ spci_memory_handle_t handle, spci_memory_region_flags_t flags,
+ spci_vm_id_t sender)
{
- return (struct spci_receiver_address_range *)(memory_region + 1);
-}
-
-/**
- * Gets the first retrieval attributes descriptor of a memory region retrieval
- * request.
- *
- * Note that getting the second requires parsing the first, as they are variable
- * length due to the variable number of constituents.
- */
-static inline struct spci_memory_retrieve_properties *
-spci_memory_retrieve_request_first_retrieve_properties(
- struct spci_memory_retrieve_request *retrieve_request)
-{
- return (struct spci_memory_retrieve_properties *)(retrieve_request + 1);
+ relinquish_request->handle = handle;
+ relinquish_request->flags = flags;
+ relinquish_request->endpoint_count = 1;
+ relinquish_request->endpoints[0] = sender;
+ return sizeof(struct spci_mem_relinquish) + sizeof(spci_vm_id_t);
}
uint32_t spci_memory_region_init(
@@ -517,18 +464,21 @@
spci_vm_id_t receiver,
const struct spci_memory_region_constituent constituents[],
uint32_t constituent_count, uint32_t tag,
- spci_memory_region_flags_t flags, enum spci_memory_access access,
+ spci_memory_region_flags_t flags, enum spci_data_access data_access,
+ enum spci_instruction_access instruction_access,
enum spci_memory_type type, enum spci_memory_cacheability cacheability,
enum spci_memory_shareability shareability);
uint32_t spci_memory_retrieve_request_init(
- struct spci_memory_retrieve_request *request,
- spci_memory_handle_t handle, spci_vm_id_t sender, spci_vm_id_t receiver,
- uint32_t share_func, uint32_t tag, uint32_t page_count,
- enum spci_memory_access access, enum spci_memory_type type,
- enum spci_memory_cacheability cacheability,
+ struct spci_memory_region *memory_region, spci_memory_handle_t handle,
+ spci_vm_id_t sender, spci_vm_id_t receiver, uint32_t tag,
+ spci_memory_region_flags_t flags, enum spci_data_access data_access,
+ enum spci_instruction_access instruction_access,
+ enum spci_memory_type type, enum spci_memory_cacheability cacheability,
enum spci_memory_shareability shareability);
uint32_t spci_retrieved_memory_region_init(
- struct spci_retrieved_memory_region *response, size_t response_max_size,
- spci_vm_id_t receiver,
+ struct spci_memory_region *response, size_t response_max_size,
+ spci_vm_id_t sender, spci_memory_attributes_t attributes,
+ spci_memory_region_flags_t flags, spci_memory_handle_t handle,
+ spci_vm_id_t receiver, spci_memory_access_permissions_t permissions,
const struct spci_memory_region_constituent constituents[],
- uint32_t constituent_count, uint32_t page_count);
+ uint32_t constituent_count);
diff --git a/src/api.c b/src/api.c
index 46aaefa..4b84c91 100644
--- a/src/api.c
+++ b/src/api.c
@@ -386,8 +386,8 @@
case SPCI_MEM_LEND_32:
case SPCI_MEM_SHARE_32:
return (struct spci_value){.func = receiver->mailbox.recv_func,
- .arg3 = receiver->mailbox.recv_size,
- .arg4 = receiver->mailbox.recv_size};
+ .arg1 = receiver->mailbox.recv_size,
+ .arg2 = receiver->mailbox.recv_size};
default:
/* This should never be reached, but return an error in case. */
dlog_error("Tried to return an invalid message function %#x\n",
@@ -1461,10 +1461,9 @@
}
}
-struct spci_value api_spci_mem_send(uint32_t share_func, ipaddr_t address,
- uint32_t page_count,
- uint32_t fragment_length, uint32_t length,
- spci_cookie_t cookie, struct vcpu *current,
+struct spci_value api_spci_mem_send(uint32_t share_func, uint32_t length,
+ uint32_t fragment_length, ipaddr_t address,
+ uint32_t page_count, struct vcpu *current,
struct vcpu **next)
{
struct vm *from = current->vm;
@@ -1481,8 +1480,8 @@
return spci_error(SPCI_INVALID_PARAMETERS);
}
- if ((cookie == 0) != (fragment_length == length)) {
- /* Cookie is required iff there are multiple fragments. */
+ if (fragment_length != length) {
+ dlog_verbose("Fragmentation not yet supported.\n");
return spci_error(SPCI_INVALID_PARAMETERS);
}
@@ -1523,12 +1522,12 @@
goto out;
}
- if (memory_region->attribute_count != 1) {
+ if (memory_region->receiver_count != 1) {
/* Hafnium doesn't support multi-way memory sharing for now. */
dlog_verbose(
"Multi-way memory sharing not supported (got %d "
- "attribute descriptors, expected 0).\n",
- memory_region->attribute_count);
+ "endpoint memory access descriptors, expected 1).\n",
+ memory_region->receiver_count);
ret = spci_error(SPCI_INVALID_PARAMETERS);
goto out;
}
@@ -1536,7 +1535,7 @@
/*
* Ensure that the receiver VM exists and isn't the same as the sender.
*/
- to = vm_find(memory_region->attributes[0].receiver);
+ to = vm_find(memory_region->receivers[0].receiver_permissions.receiver);
if (to == NULL || to == from) {
dlog_verbose("Invalid receiver.\n");
ret = spci_error(SPCI_INVALID_PARAMETERS);
@@ -1593,14 +1592,16 @@
return ret;
}
-struct spci_value api_spci_mem_retrieve_req(
- ipaddr_t address, uint32_t page_count, uint32_t fragment_length,
- uint32_t length, spci_cookie_t cookie, struct vcpu *current)
+struct spci_value api_spci_mem_retrieve_req(uint32_t length,
+ uint32_t fragment_length,
+ ipaddr_t address,
+ uint32_t page_count,
+ struct vcpu *current)
{
struct vm *to = current->vm;
struct vm_locked to_locked;
const void *to_msg;
- struct spci_memory_retrieve_request *retrieve_request;
+ struct spci_memory_region *retrieve_request;
uint32_t message_buffer_size;
struct spci_value ret;
@@ -1612,15 +1613,13 @@
return spci_error(SPCI_INVALID_PARAMETERS);
}
- if (fragment_length == length && cookie != 0) {
- /* Cookie is only allowed if there are multiple fragments. */
- dlog_verbose("Unexpected cookie %d.\n", cookie);
+ if (fragment_length != length) {
+ dlog_verbose("Fragmentation not yet supported.\n");
return spci_error(SPCI_INVALID_PARAMETERS);
}
retrieve_request =
- (struct spci_memory_retrieve_request *)cpu_get_buffer(
- current->cpu);
+ (struct spci_memory_region *)cpu_get_buffer(current->cpu);
message_buffer_size = cpu_get_buffer_size(current->cpu);
if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
dlog_verbose("Retrieve request too long.\n");
@@ -1718,7 +1717,8 @@
return ret;
}
-struct spci_value api_spci_mem_reclaim(uint32_t handle, uint32_t flags,
+struct spci_value api_spci_mem_reclaim(spci_memory_handle_t handle,
+ spci_memory_region_flags_t flags,
struct vcpu *current)
{
struct vm *to = current->vm;
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 924d024..d3a4a75 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -366,20 +366,22 @@
case SPCI_MEM_DONATE_32:
case SPCI_MEM_LEND_32:
case SPCI_MEM_SHARE_32:
- *args = api_spci_mem_send(func, ipa_init(args->arg1),
- args->arg2, args->arg3, args->arg4,
- args->arg5, current(), next);
+ *args = api_spci_mem_send(func, args->arg1, args->arg2,
+ ipa_init(args->arg3), args->arg4,
+ current(), next);
return true;
case SPCI_MEM_RETRIEVE_REQ_32:
- *args = api_spci_mem_retrieve_req(
- ipa_init(args->arg1), args->arg2, args->arg3,
- args->arg4, args->arg5, current());
+ *args = api_spci_mem_retrieve_req(args->arg1, args->arg2,
+ ipa_init(args->arg3),
+ args->arg4, current());
return true;
case SPCI_MEM_RELINQUISH_32:
*args = api_spci_mem_relinquish(current());
return true;
case SPCI_MEM_RECLAIM_32:
- *args = api_spci_mem_reclaim(args->arg1, args->arg2, current());
+ *args = api_spci_mem_reclaim(
+ (args->arg1 & 0xffffffff) | args->arg2 << 32,
+ args->arg3, current());
return true;
}
diff --git a/src/spci_memory.c b/src/spci_memory.c
index eeafc05..5da022c 100644
--- a/src/spci_memory.c
+++ b/src/spci_memory.c
@@ -37,22 +37,17 @@
static_assert(sizeof(struct spci_memory_region_constituent) % 16 == 0,
"struct spci_memory_region_constituent must be a multiple of 16 "
"bytes long.");
-static_assert(sizeof(struct spci_memory_region_attributes) % 16 == 0,
- "struct spci_memory_region_attributes must be a multiple of 16 "
+static_assert(sizeof(struct spci_composite_memory_region) % 16 == 0,
+ "struct spci_composite_memory_region must be a multiple of 16 "
"bytes long.");
+static_assert(sizeof(struct spci_memory_region_attributes) == 4,
+ "struct spci_memory_region_attributes must be 4bytes long.");
+static_assert(sizeof(struct spci_memory_access) % 16 == 0,
+ "struct spci_memory_access must be a multiple of 16 bytes long.");
static_assert(sizeof(struct spci_memory_region) % 16 == 0,
"struct spci_memory_region must be a multiple of 16 bytes long.");
-static_assert(sizeof(struct spci_receiver_address_range) % 16 == 0,
- "struct spci_receiver_address_range must be a multiple of 16 "
- "bytes long.");
-static_assert(sizeof(struct spci_retrieved_memory_region) % 16 == 0,
- "struct spci_retrieved_memory_region must be a multiple of 16 "
- "bytes long.");
-static_assert(sizeof(struct spci_memory_retrieve_properties) % 16 == 0,
- "struct spci_memory_retrieve_properties must be a multiple of 16 "
- "bytes long.");
-static_assert(sizeof(struct spci_memory_retrieve_request) % 16 == 0,
- "struct spci_memory_retrieve_request must be a multiple of 16 "
+static_assert(sizeof(struct spci_mem_relinquish) % 16 == 0,
+ "struct spci_mem_relinquish must be a multiple of 16 "
"bytes long.");
struct spci_memory_share_state {
@@ -101,7 +96,7 @@
struct spci_memory_region *memory_region,
spci_memory_handle_t *handle)
{
- uint32_t i;
+ uint64_t i;
CHECK(memory_region != NULL);
@@ -208,17 +203,21 @@
return;
}
- dlog("from VM %d, tag %d, flags %#x, %d total pages in %d constituents "
- "to %d recipients [",
- memory_region->sender, memory_region->tag, memory_region->flags,
- memory_region->page_count, memory_region->constituent_count,
- memory_region->attribute_count);
- for (i = 0; i < memory_region->attribute_count; ++i) {
+ dlog("from VM %d, attributes %#x, flags %#x, handle %#x, tag %d, to %d "
+ "recipients [",
+ memory_region->sender, memory_region->attributes,
+ memory_region->flags, memory_region->handle, memory_region->tag,
+ memory_region->receiver_count);
+ for (i = 0; i < memory_region->receiver_count; ++i) {
if (i != 0) {
dlog(", ");
}
- dlog("VM %d: %#x", memory_region->attributes[i].receiver,
- memory_region->attributes[i].memory_attributes);
+ dlog("VM %d: %#x (offset %d)",
+ memory_region->receivers[i].receiver_permissions.receiver,
+ memory_region->receivers[i]
+ .receiver_permissions.permissions,
+ memory_region->receivers[i]
+ .composite_memory_region_offset);
}
dlog("]");
}
@@ -264,23 +263,32 @@
}
/* TODO: Add device attributes: GRE, cacheability, shareability. */
-static inline uint32_t spci_memory_attrs_to_mode(uint16_t memory_attributes)
+static inline uint32_t spci_memory_permissions_to_mode(
+ spci_memory_access_permissions_t permissions)
{
uint32_t mode = 0;
- switch (spci_get_memory_access_attr(memory_attributes)) {
- case SPCI_MEMORY_RO_NX:
+ switch (spci_get_data_access_attr(permissions)) {
+ case SPCI_DATA_ACCESS_RO:
mode = MM_MODE_R;
break;
- case SPCI_MEMORY_RO_X:
- mode = MM_MODE_R | MM_MODE_X;
- break;
- case SPCI_MEMORY_RW_NX:
+ case SPCI_DATA_ACCESS_RW:
+ case SPCI_DATA_ACCESS_NOT_SPECIFIED:
mode = MM_MODE_R | MM_MODE_W;
break;
- case SPCI_MEMORY_RW_X:
- mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
+ case SPCI_DATA_ACCESS_RESERVED:
+ panic("Tried to convert SPCI_DATA_ACCESS_RESERVED.");
+ }
+
+ switch (spci_get_instruction_access_attr(permissions)) {
+ case SPCI_INSTRUCTION_ACCESS_NX:
break;
+ case SPCI_INSTRUCTION_ACCESS_X:
+ case SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED:
+ mode |= MM_MODE_X;
+ break;
+ case SPCI_INSTRUCTION_ACCESS_RESERVED:
+ panic("Tried to convert SPCI_INSTRUCTION_ACCESS_RESVERVED.");
}
return mode;
@@ -289,9 +297,9 @@
/**
* Get the current mode in the stage-2 page table of the given vm of all the
* pages in the given constituents, if they all have the same mode, or return
- * false if not.
+ * an appropriate SPCI error if not.
*/
-static bool constituents_get_mode(
+static struct spci_value constituents_get_mode(
struct vm_locked vm, uint32_t *orig_mode,
struct spci_memory_region_constituent *constituents,
uint32_t constituent_count)
@@ -303,13 +311,11 @@
* Fail if there are no constituents. Otherwise we would get an
* uninitialised *orig_mode.
*/
- return false;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
for (i = 0; i < constituent_count; ++i) {
- ipaddr_t begin =
- ipa_init(spci_memory_region_constituent_get_address(
- &constituents[i]));
+ ipaddr_t begin = ipa_init(constituents[i].address);
size_t size = constituents[i].page_count * PAGE_SIZE;
ipaddr_t end = ipa_add(begin, size);
uint32_t current_mode;
@@ -317,7 +323,7 @@
/* Fail if addresses are not page-aligned. */
if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
!is_aligned(ipa_addr(end), PAGE_SIZE)) {
- return false;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
/*
@@ -326,7 +332,7 @@
*/
if (!mm_vm_get_mode(&vm.vm->ptable, begin, end,
¤t_mode)) {
- return false;
+ return spci_error(SPCI_DENIED);
}
/*
@@ -335,11 +341,11 @@
if (i == 0) {
*orig_mode = current_mode;
} else if (current_mode != *orig_mode) {
- return false;
+ return spci_error(SPCI_DENIED);
}
}
- return true;
+ return (struct spci_value){.func = SPCI_SUCCESS_32};
}
/**
@@ -348,32 +354,37 @@
* to the sending VM.
*
* Returns:
- * The error code false indicates that:
- * 1) a state transition was not found;
- * 2) the pages being shared do not have the same mode within the <from> VM;
- * 3) The beginning and end IPAs are not page aligned;
- * 4) The requested share type was not handled.
- * Success is indicated by true.
- *
+ * 1) SPCI_DENIED if a state transition was not found;
+ * 2) SPCI_DENIED if the pages being shared do not have the same mode within
+ * the <from> VM;
+ * 3) SPCI_INVALID_PARAMETERS if the beginning and end IPAs are not page
+ * aligned;
+ * 4) SPCI_INVALID_PARAMETERS if the requested share type was not handled.
+ * Or SPCI_SUCCESS on success.
*/
-static bool spci_send_check_transition(
- struct vm_locked from, uint32_t share_func, uint32_t *orig_from_mode,
+static struct spci_value spci_send_check_transition(
+ struct vm_locked from, uint32_t share_func,
+ spci_memory_access_permissions_t permissions, uint32_t *orig_from_mode,
struct spci_memory_region_constituent *constituents,
uint32_t constituent_count, uint32_t *from_mode)
{
const uint32_t state_mask =
MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
+ const uint32_t required_from_mode =
+ spci_memory_permissions_to_mode(permissions);
+ struct spci_value ret;
- if (!constituents_get_mode(from, orig_from_mode, constituents,
- constituent_count)) {
- return false;
+ ret = constituents_get_mode(from, orig_from_mode, constituents,
+ constituent_count);
+ if (ret.func != SPCI_SUCCESS_32) {
+ return ret;
}
/* Ensure the address range is normal memory and not a device. */
if (*orig_from_mode & MM_MODE_D) {
dlog_verbose("Can't share device memory (mode is %#x).\n",
*orig_from_mode);
- return false;
+ return spci_error(SPCI_DENIED);
}
/*
@@ -381,7 +392,15 @@
* memory.
*/
if ((*orig_from_mode & state_mask) != 0) {
- return false;
+ return spci_error(SPCI_DENIED);
+ }
+
+ if ((*orig_from_mode & required_from_mode) != required_from_mode) {
+ dlog_verbose(
+ "Sender tried to send memory with permissions which "
+ "required mode %#x but only had %#x itself.\n",
+ required_from_mode, *orig_from_mode);
+ return spci_error(SPCI_DENIED);
}
/* Find the appropriate new mode. */
@@ -400,13 +419,13 @@
break;
default:
- return false;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
- return true;
+ return (struct spci_value){.func = SPCI_SUCCESS_32};
}
-static bool spci_relinquish_check_transition(
+static struct spci_value spci_relinquish_check_transition(
struct vm_locked from, uint32_t *orig_from_mode,
struct spci_memory_region_constituent *constituents,
uint32_t constituent_count, uint32_t *from_mode)
@@ -414,17 +433,19 @@
const uint32_t state_mask =
MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
uint32_t orig_from_state;
+ struct spci_value ret;
- if (!constituents_get_mode(from, orig_from_mode, constituents,
- constituent_count)) {
- return false;
+ ret = constituents_get_mode(from, orig_from_mode, constituents,
+ constituent_count);
+ if (ret.func != SPCI_SUCCESS_32) {
+ return ret;
}
/* Ensure the address range is normal memory and not a device. */
if (*orig_from_mode & MM_MODE_D) {
dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
*orig_from_mode);
- return false;
+ return spci_error(SPCI_DENIED);
}
/*
@@ -438,13 +459,13 @@
"but "
"should be %#x).\n",
*orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
- return false;
+ return spci_error(SPCI_DENIED);
}
/* Find the appropriate new mode. */
*from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
- return true;
+ return (struct spci_value){.func = SPCI_SUCCESS_32};
}
/**
@@ -453,24 +474,27 @@
* to the retrieving VM.
*
* Returns:
- * The error code false indicates that:
- * 1) a state transition was not found;
- * 2) the pages being shared do not have the same mode within the <to> VM;
- * 3) The beginning and end IPAs are not page aligned;
- * 4) The requested share type was not handled.
- * Success is indicated by true.
+ * 1) SPCI_DENIED if a state transition was not found;
+ * 2) SPCI_DENIED if the pages being shared do not have the same mode within
+ * the <to> VM;
+ * 3) SPCI_INVALID_PARAMETERS if the beginning and end IPAs are not page
+ * aligned;
+ * 4) SPCI_INVALID_PARAMETERS if the requested share type was not handled.
+ * Or SPCI_SUCCESS on success.
*/
-static bool spci_retrieve_check_transition(
+static struct spci_value spci_retrieve_check_transition(
struct vm_locked to, uint32_t share_func,
struct spci_memory_region_constituent *constituents,
uint32_t constituent_count, uint32_t memory_to_attributes,
uint32_t *to_mode)
{
uint32_t orig_to_mode;
+ struct spci_value ret;
- if (!constituents_get_mode(to, &orig_to_mode, constituents,
- constituent_count)) {
- return false;
+ ret = constituents_get_mode(to, &orig_to_mode, constituents,
+ constituent_count);
+ if (ret.func != SPCI_SUCCESS_32) {
+ return ret;
}
if (share_func == SPCI_MEM_RECLAIM_32) {
@@ -480,7 +504,7 @@
if (orig_to_state != MM_MODE_INVALID &&
orig_to_state != MM_MODE_SHARED) {
- return false;
+ return spci_error(SPCI_DENIED);
}
} else {
/*
@@ -490,7 +514,7 @@
*/
if ((orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
MM_MODE_UNMAPPED_MASK) {
- return false;
+ return spci_error(SPCI_DENIED);
}
}
@@ -514,10 +538,10 @@
break;
default:
- return false;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
- return true;
+ return (struct spci_value){.func = SPCI_SUCCESS_32};
}
/**
@@ -546,9 +570,8 @@
/* Iterate over the memory region constituents. */
for (uint32_t index = 0; index < constituent_count; index++) {
size_t size = constituents[index].page_count * PAGE_SIZE;
- paddr_t pa_begin = pa_from_ipa(
- ipa_init(spci_memory_region_constituent_get_address(
- &constituents[index])));
+ paddr_t pa_begin =
+ pa_from_ipa(ipa_init(constituents[index].address));
paddr_t pa_end = pa_add(pa_begin, size);
if (commit) {
@@ -626,9 +649,7 @@
/* Iterate over the memory region constituents. */
for (uint32_t i = 0; i < constituent_count; ++i) {
size_t size = constituents[i].page_count * PAGE_SIZE;
- paddr_t begin = pa_from_ipa(
- ipa_init(spci_memory_region_constituent_get_address(
- &constituents[i])));
+ paddr_t begin = pa_from_ipa(ipa_init(constituents[i].address));
paddr_t end = pa_add(begin, size);
if (!clear_memory(begin, end, &local_page_pool)) {
@@ -666,13 +687,16 @@
* erroneous;
* 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete
* the request.
+ * 3) SPCI_DENIED - The sender doesn't have sufficient access to send the
+ * memory with the given permissions.
* Success is indicated by SPCI_SUCCESS.
*/
static struct spci_value spci_send_memory(
struct vm_locked from_locked,
struct spci_memory_region_constituent *constituents,
uint32_t constituent_count, uint32_t share_func,
- struct mpool *page_pool, bool clear)
+ spci_memory_access_permissions_t permissions, struct mpool *page_pool,
+ bool clear)
{
struct vm *from = from_locked.vm;
uint32_t orig_from_mode;
@@ -681,10 +705,10 @@
struct spci_value ret;
/*
- * Make sure constituents are properly aligned to a 32-bit boundary. If
- * not we would get alignment faults trying to read (32-bit) values.
+ * Make sure constituents are properly aligned to a 64-bit boundary. If
+ * not we would get alignment faults trying to read (64-bit) values.
*/
- if (!is_aligned(constituents, 4)) {
+ if (!is_aligned(constituents, 8)) {
return spci_error(SPCI_INVALID_PARAMETERS);
}
@@ -693,10 +717,11 @@
* all constituents of a memory region being shared are at the same
* state.
*/
- if (!spci_send_check_transition(from_locked, share_func,
- &orig_from_mode, constituents,
- constituent_count, &from_mode)) {
- return spci_error(SPCI_INVALID_PARAMETERS);
+ ret = spci_send_check_transition(from_locked, share_func, permissions,
+ &orig_from_mode, constituents,
+ constituent_count, &from_mode);
+ if (ret.func != SPCI_SUCCESS_32) {
+ return ret;
}
/*
@@ -798,11 +823,12 @@
* that all constituents of the memory region being retrieved are at the
* same state.
*/
- if (!spci_retrieve_check_transition(to_locked, share_func, constituents,
- constituent_count,
- memory_to_attributes, &to_mode)) {
+ ret = spci_retrieve_check_transition(to_locked, share_func,
+ constituents, constituent_count,
+ memory_to_attributes, &to_mode);
+ if (ret.func != SPCI_SUCCESS_32) {
dlog_verbose("Invalid transition.\n");
- return spci_error(SPCI_INVALID_PARAMETERS);
+ return ret;
}
/*
@@ -868,11 +894,12 @@
struct mpool local_page_pool;
struct spci_value ret;
- if (!spci_relinquish_check_transition(from_locked, &orig_from_mode,
- constituents, constituent_count,
- &from_mode)) {
+ ret = spci_relinquish_check_transition(from_locked, &orig_from_mode,
+ constituents, constituent_count,
+ &from_mode);
+ if (ret.func != SPCI_SUCCESS_32) {
dlog_verbose("Invalid transition.\n");
- return spci_error(SPCI_INVALID_PARAMETERS);
+ return ret;
}
/*
@@ -937,6 +964,154 @@
}
/**
+ * Check that the given `memory_region` represents a valid memory send request
+ * of the given `share_func` type, return the clear flag and permissions via the
+ * respective output parameters, and update the permissions if necessary.
+ * Returns SPCI_SUCCESS if the request was valid, or the relevant SPCI_ERROR if
+ * not.
+ */
+static struct spci_value spci_memory_send_validate(
+ struct vm *to, struct vm_locked from_locked,
+ struct spci_memory_region *memory_region, uint32_t memory_share_size,
+ uint32_t share_func, bool *clear,
+ spci_memory_access_permissions_t *permissions)
+{
+ struct spci_composite_memory_region *composite;
+ uint32_t receivers_size;
+ uint32_t constituents_size;
+ enum spci_data_access data_access;
+ enum spci_instruction_access instruction_access;
+
+ CHECK(clear != NULL);
+ CHECK(permissions != NULL);
+
+ /* The sender must match the message sender. */
+ if (memory_region->sender != from_locked.vm->id) {
+ dlog_verbose("Invalid sender %d.\n", memory_region->sender);
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* We only support a single recipient. */
+ if (memory_region->receiver_count != 1) {
+ dlog_verbose("Multiple recipients not supported.\n");
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /*
+ * Ensure that the composite header is within the memory bounds and
+ * doesn't overlap the first part of the message.
+ */
+ receivers_size = sizeof(struct spci_memory_access) *
+ memory_region->receiver_count;
+ if (memory_region->receivers[0].composite_memory_region_offset <
+ sizeof(struct spci_memory_region) + receivers_size ||
+ memory_region->receivers[0].composite_memory_region_offset +
+ sizeof(struct spci_composite_memory_region) >=
+ memory_share_size) {
+ dlog_verbose(
+ "Invalid composite memory region descriptor offset.\n");
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ composite = spci_memory_region_get_composite(memory_region, 0);
+
+ /*
+ * Ensure the number of constituents are within the memory
+ * bounds.
+ */
+ constituents_size = sizeof(struct spci_memory_region_constituent) *
+ composite->constituent_count;
+ if (memory_share_size !=
+ memory_region->receivers[0].composite_memory_region_offset +
+ sizeof(struct spci_composite_memory_region) +
+ constituents_size) {
+ dlog_verbose("Invalid size %d or constituent offset %d.\n",
+ memory_share_size,
+ memory_region->receivers[0]
+ .composite_memory_region_offset);
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* The recipient must match the message recipient. */
+ if (memory_region->receivers[0].receiver_permissions.receiver !=
+ to->id) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ *clear = memory_region->flags & SPCI_MEMORY_REGION_FLAG_CLEAR;
+ /*
+ * Clear is not allowed for memory sharing, as the sender still has
+ * access to the memory.
+ */
+ if (*clear && share_func == SPCI_MEM_SHARE_32) {
+ dlog_verbose("Memory can't be cleared while being shared.\n");
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* No other flags are allowed/supported here. */
+ if (memory_region->flags & ~SPCI_MEMORY_REGION_FLAG_CLEAR) {
+ dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Check that the permissions are valid. */
+ *permissions =
+ memory_region->receivers[0].receiver_permissions.permissions;
+ data_access = spci_get_data_access_attr(*permissions);
+ instruction_access = spci_get_instruction_access_attr(*permissions);
+ if (data_access == SPCI_DATA_ACCESS_RESERVED ||
+ instruction_access == SPCI_INSTRUCTION_ACCESS_RESERVED) {
+ dlog_verbose("Reserved value for receiver permissions %#x.\n",
+ *permissions);
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+ if (instruction_access != SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
+ dlog_verbose(
+ "Invalid instruction access permissions %#x for "
+ "sending memory.\n",
+ *permissions);
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+ if (share_func == SPCI_MEM_SHARE_32) {
+ if (data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED) {
+ dlog_verbose(
+ "Invalid data access permissions %#x for "
+ "sharing memory.\n",
+ *permissions);
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+ /*
+ * According to section 6.11.3 of the FF-A spec NX is required
+ * for share operations (but must not be specified by the
+ * sender) so set it in the copy that we store, ready to be
+ * returned to the retriever.
+ */
+ spci_set_instruction_access_attr(permissions,
+ SPCI_INSTRUCTION_ACCESS_NX);
+ memory_region->receivers[0].receiver_permissions.permissions =
+ *permissions;
+ }
+ if (share_func == SPCI_MEM_LEND_32 &&
+ data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED) {
+ dlog_verbose(
+ "Invalid data access permissions %#x for lending "
+ "memory.\n",
+ *permissions);
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+ if (share_func == SPCI_MEM_DONATE_32 &&
+ data_access != SPCI_DATA_ACCESS_NOT_SPECIFIED) {
+ dlog_verbose(
+ "Invalid data access permissions %#x for donating "
+ "memory.\n",
+ *permissions);
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ return (struct spci_value){.func = SPCI_SUCCESS_32};
+}
+
+/**
* Validates a call to donate, lend or share memory and then updates the stage-2
* page tables. Specifically, check if the message length and number of memory
* region constituents match, and if the transition is valid for the type of
@@ -955,62 +1130,39 @@
uint32_t memory_share_size,
uint32_t share_func, struct mpool *page_pool)
{
- struct spci_memory_region_constituent *constituents =
- spci_memory_region_get_constituents(memory_region);
- uint32_t constituent_count = memory_region->constituent_count;
- uint32_t attributes_size;
- uint32_t constituents_size;
+ struct spci_composite_memory_region *composite;
bool clear;
+ spci_memory_access_permissions_t permissions;
struct spci_value ret;
spci_memory_handle_t handle;
/*
- * Ensure the number of constituents are within the memory
- * bounds.
+ * If there is an error validating the `memory_region` then we need to
+ * free it because we own it but we won't be storing it in a share state
+ * after all.
*/
- attributes_size = sizeof(struct spci_memory_region_attributes) *
- memory_region->attribute_count;
- constituents_size = sizeof(struct spci_memory_region_constituent) *
- constituent_count;
- if (memory_region->constituent_offset <
- sizeof(struct spci_memory_region) + attributes_size ||
- memory_share_size !=
- memory_region->constituent_offset + constituents_size) {
- dlog_verbose("Invalid size %d or constituent offset %d.\n",
- memory_share_size,
- memory_region->constituent_offset);
+ ret = spci_memory_send_validate(to, from_locked, memory_region,
+ memory_share_size, share_func, &clear,
+ &permissions);
+ if (ret.func != SPCI_SUCCESS_32) {
mpool_free(page_pool, memory_region);
- return spci_error(SPCI_INVALID_PARAMETERS);
+ return ret;
}
- /* The sender must match the message sender. */
- if (memory_region->sender != from_locked.vm->id) {
- dlog_verbose("Invalid sender %d.\n", memory_region->sender);
- mpool_free(page_pool, memory_region);
- return spci_error(SPCI_INVALID_PARAMETERS);
- }
-
- /* We only support a single recipient. */
- if (memory_region->attribute_count != 1) {
- dlog_verbose("Multiple recipients not supported.\n");
- mpool_free(page_pool, memory_region);
- return spci_error(SPCI_NOT_SUPPORTED);
- }
-
- /* The recipient must match the message recipient. */
- if (memory_region->attributes[0].receiver != to->id) {
- mpool_free(page_pool, memory_region);
- return spci_error(SPCI_INVALID_PARAMETERS);
- }
-
- clear = memory_region->flags & SPCI_MEMORY_REGION_FLAG_CLEAR;
- /*
- * Clear is not allowed for memory sharing, as the sender still has
- * access to the memory.
- */
- if (clear && share_func == SPCI_MEM_SHARE_32) {
- dlog_verbose("Memory can't be cleared while being shared.\n");
- return spci_error(SPCI_INVALID_PARAMETERS);
+ /* Set flag for share function, ready to be retrieved later. */
+ switch (share_func) {
+ case SPCI_MEM_SHARE_32:
+ memory_region->flags |=
+ SPCI_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
+ break;
+ case SPCI_MEM_LEND_32:
+ memory_region->flags |=
+ SPCI_MEMORY_REGION_TRANSACTION_TYPE_LEND;
+ break;
+ case SPCI_MEM_DONATE_32:
+ memory_region->flags |=
+ SPCI_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
+ break;
}
/*
@@ -1029,8 +1181,10 @@
dump_share_states();
/* Check that state is valid in sender page table and update. */
- ret = spci_send_memory(from_locked, constituents, constituent_count,
- share_func, page_pool, clear);
+ composite = spci_memory_region_get_composite(memory_region, 0);
+ ret = spci_send_memory(from_locked, composite->constituents,
+ composite->constituent_count, share_func,
+ permissions, page_pool, clear);
if (ret.func != SPCI_SUCCESS_32) {
if (to->id != HF_TEE_VM_ID) {
/* Free share state. */
@@ -1043,7 +1197,7 @@
}
if (to->id == HF_TEE_VM_ID) {
- /* Return directly, no need to allocate share state. */
+ /* No share state allocated here so no handle to return. */
return (struct spci_value){.func = SPCI_SUCCESS_32};
}
@@ -1051,20 +1205,27 @@
}
struct spci_value spci_memory_retrieve(
- struct vm_locked to_locked,
- struct spci_memory_retrieve_request *retrieve_request,
+ struct vm_locked to_locked, struct spci_memory_region *retrieve_request,
uint32_t retrieve_request_size, struct mpool *page_pool)
{
uint32_t expected_retrieve_request_size =
- sizeof(struct spci_memory_retrieve_request) +
- retrieve_request->retrieve_properties_count *
- sizeof(struct spci_memory_retrieve_properties);
+ sizeof(struct spci_memory_region) +
+ retrieve_request->receiver_count *
+ sizeof(struct spci_memory_access);
spci_memory_handle_t handle = retrieve_request->handle;
+ spci_memory_region_flags_t transaction_type =
+ retrieve_request->flags &
+ SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK;
struct spci_memory_region *memory_region;
- struct spci_memory_retrieve_properties *retrieve_properties;
+ spci_memory_access_permissions_t sent_permissions;
+ enum spci_data_access sent_data_access;
+ enum spci_instruction_access sent_instruction_access;
+ spci_memory_access_permissions_t requested_permissions;
+ enum spci_data_access requested_data_access;
+ enum spci_instruction_access requested_instruction_access;
+ spci_memory_access_permissions_t permissions;
uint32_t memory_to_attributes;
- struct spci_memory_region_constituent *constituents;
- uint32_t constituent_count;
+ struct spci_composite_memory_region *composite;
struct share_states_locked share_states;
struct spci_memory_share_state *share_state;
struct spci_value ret;
@@ -1080,6 +1241,15 @@
return spci_error(SPCI_INVALID_PARAMETERS);
}
+ if (retrieve_request->receiver_count != 1) {
+ dlog_verbose(
+ "Multi-way memory sharing not supported (got %d "
+ "receivers descriptors on SPCI_MEM_RETRIEVE_REQ, "
+ "expected 1).\n",
+ retrieve_request->receiver_count);
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
share_states = share_states_lock();
if (!get_share_state(share_states, handle, &share_state)) {
dlog_verbose("Invalid handle %#x for SPCI_MEM_RETRIEVE_REQ.\n",
@@ -1088,19 +1258,28 @@
goto out;
}
- if (retrieve_request->share_func != share_state->share_func) {
+ memory_region = share_state->memory_region;
+ CHECK(memory_region != NULL);
+
+ /*
+ * Check that the transaction type expected by the receiver is correct,
+ * if it has been specified.
+ */
+ if (transaction_type !=
+ SPCI_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
+ transaction_type != (memory_region->flags &
+ SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
dlog_verbose(
"Incorrect transaction type %#x for "
"SPCI_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
- retrieve_request->share_func, share_state->share_func,
+ transaction_type,
+ memory_region->flags &
+ SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK,
handle);
ret = spci_error(SPCI_INVALID_PARAMETERS);
goto out;
}
- memory_region = share_state->memory_region;
- CHECK(memory_region != NULL);
-
if (retrieve_request->sender != memory_region->sender) {
dlog_verbose(
"Incorrect sender ID %d for SPCI_MEM_RETRIEVE_REQ, "
@@ -1120,11 +1299,25 @@
goto out;
}
- if (memory_region->attributes[0].receiver != to_locked.vm->id) {
+ if (retrieve_request->receivers[0].receiver_permissions.receiver !=
+ to_locked.vm->id) {
+ dlog_verbose(
+ "Retrieve request receiver VM ID %d didn't match "
+ "caller of SPCI_MEM_RETRIEVE_REQ.\n",
+ retrieve_request->receivers[0]
+ .receiver_permissions.receiver);
+ ret = spci_error(SPCI_INVALID_PARAMETERS);
+ goto out;
+ }
+
+ if (memory_region->receivers[0].receiver_permissions.receiver !=
+ to_locked.vm->id) {
dlog_verbose(
"Incorrect receiver VM ID %d for "
"SPCI_MEM_RETRIEVE_REQ, expected %d for handle %#x.\n",
- to_locked.vm->id, memory_region->attributes[0].receiver,
+ to_locked.vm->id,
+ memory_region->receivers[0]
+ .receiver_permissions.receiver,
handle);
ret = spci_error(SPCI_INVALID_PARAMETERS);
goto out;
@@ -1137,64 +1330,97 @@
goto out;
}
- if (retrieve_request->attribute_count != 0) {
- dlog_verbose(
- "Multi-way memory sharing not supported (got %d "
- "attribute descriptors on SPCI_MEM_RETRIEVE_REQ, "
- "expected 0).\n",
- retrieve_request->attribute_count);
- ret = spci_error(SPCI_NOT_SUPPORTED);
- goto out;
- }
-
- if (retrieve_request->retrieve_properties_count != 1) {
- dlog_verbose(
- "Stream endpoints not supported (got %d retrieve "
- "properties descriptors on SPCI_MEM_RETRIEVE_REQ, "
- "expected 1).\n",
- retrieve_request->retrieve_properties_count);
- ret = spci_error(SPCI_INVALID_PARAMETERS);
- goto out;
- }
-
- retrieve_properties =
- spci_memory_retrieve_request_first_retrieve_properties(
- retrieve_request);
-
- if (retrieve_properties->attributes.receiver != to_locked.vm->id) {
- dlog_verbose(
- "Retrieve properties receiver VM ID %d didn't match "
- "caller of SPCI_MEM_RETRIEVE_REQ.\n",
- retrieve_properties->attributes.receiver);
- ret = spci_error(SPCI_INVALID_PARAMETERS);
- goto out;
- }
-
- if (retrieve_properties->page_count != memory_region->page_count) {
- dlog_verbose(
- "Incorrect page count %d for "
- "SPCI_MEM_RETRIEVE_REQ, expected %d for handle %#x.\n",
- retrieve_properties->page_count,
- memory_region->page_count, handle);
- ret = spci_error(SPCI_INVALID_PARAMETERS);
- goto out;
- }
-
- if (retrieve_properties->constituent_count != 0) {
+ if (retrieve_request->receivers[0].composite_memory_region_offset !=
+ 0) {
dlog_verbose(
"Retriever specified address ranges not supported (got "
+ "offset"
"%d).\n",
- retrieve_properties->constituent_count);
+ retrieve_request->receivers[0]
+ .composite_memory_region_offset);
ret = spci_error(SPCI_INVALID_PARAMETERS);
goto out;
}
- memory_to_attributes = spci_memory_attrs_to_mode(
- memory_region->attributes[0].memory_attributes);
+ /*
+ * Check permissions from sender against permissions requested by
+ * receiver.
+ */
+ /* TODO: Check attributes too. */
+ sent_permissions =
+ memory_region->receivers[0].receiver_permissions.permissions;
+ sent_data_access = spci_get_data_access_attr(sent_permissions);
+ sent_instruction_access =
+ spci_get_instruction_access_attr(sent_permissions);
+ requested_permissions =
+ retrieve_request->receivers[0].receiver_permissions.permissions;
+ requested_data_access =
+ spci_get_data_access_attr(requested_permissions);
+ requested_instruction_access =
+ spci_get_instruction_access_attr(requested_permissions);
+ permissions = 0;
+ switch (sent_data_access) {
+ case SPCI_DATA_ACCESS_NOT_SPECIFIED:
+ case SPCI_DATA_ACCESS_RW:
+ if (requested_data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED ||
+ requested_data_access == SPCI_DATA_ACCESS_RW) {
+ spci_set_data_access_attr(&permissions,
+ SPCI_DATA_ACCESS_RW);
+ break;
+ }
+ /* Intentional fall-through. */
+ case SPCI_DATA_ACCESS_RO:
+ if (requested_data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED ||
+ requested_data_access == SPCI_DATA_ACCESS_RO) {
+ spci_set_data_access_attr(&permissions,
+ SPCI_DATA_ACCESS_RO);
+ break;
+ }
+ dlog_verbose(
+ "Invalid data access requested; sender specified "
+ "permissions %#x but receiver requested %#x.\n",
+ sent_permissions, requested_permissions);
+ ret = spci_error(SPCI_DENIED);
+ goto out;
+ case SPCI_DATA_ACCESS_RESERVED:
+ panic("Got unexpected SPCI_DATA_ACCESS_RESERVED. Should be "
+ "checked before this point.");
+ }
+ switch (sent_instruction_access) {
+ case SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED:
+ case SPCI_INSTRUCTION_ACCESS_X:
+ if (requested_instruction_access ==
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
+ requested_instruction_access == SPCI_INSTRUCTION_ACCESS_X) {
+ spci_set_instruction_access_attr(
+ &permissions, SPCI_INSTRUCTION_ACCESS_X);
+ break;
+ }
+ case SPCI_INSTRUCTION_ACCESS_NX:
+ if (requested_instruction_access ==
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
+ requested_instruction_access ==
+ SPCI_INSTRUCTION_ACCESS_NX) {
+ spci_set_instruction_access_attr(
+ &permissions, SPCI_INSTRUCTION_ACCESS_NX);
+ break;
+ }
+ dlog_verbose(
+ "Invalid instruction access requested; sender "
+ "specified "
+ "permissions %#x but receiver requested %#x.\n",
+ sent_permissions, requested_permissions);
+ ret = spci_error(SPCI_DENIED);
+ goto out;
+ case SPCI_INSTRUCTION_ACCESS_RESERVED:
+ panic("Got unexpected SPCI_INSTRUCTION_ACCESS_RESERVED. Should "
+ "be checked before this point.");
+ }
+ memory_to_attributes = spci_memory_permissions_to_mode(permissions);
- constituents = spci_memory_region_get_constituents(memory_region);
- constituent_count = memory_region->constituent_count;
- ret = spci_retrieve_memory(to_locked, constituents, constituent_count,
+ composite = spci_memory_region_get_composite(memory_region, 0);
+ ret = spci_retrieve_memory(to_locked, composite->constituents,
+ composite->constituent_count,
memory_to_attributes,
share_state->share_func, false, page_pool);
if (ret.func != SPCI_SUCCESS_32) {
@@ -1205,9 +1431,12 @@
* Copy response to RX buffer of caller and deliver the message. This
* must be done before the share_state is (possibly) freed.
*/
+ /* TODO: combine attributes from sender and request. */
response_size = spci_retrieved_memory_region_init(
- to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE, to_locked.vm->id,
- constituents, constituent_count, memory_region->page_count);
+ to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
+ memory_region->sender, memory_region->attributes,
+ memory_region->flags, handle, to_locked.vm->id, permissions,
+ composite->constituents, composite->constituent_count);
to_locked.vm->mailbox.recv_size = response_size;
to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
to_locked.vm->mailbox.recv_func = SPCI_MEM_RETRIEVE_RESP_32;
@@ -1225,8 +1454,8 @@
}
ret = (struct spci_value){.func = SPCI_MEM_RETRIEVE_RESP_32,
- .arg3 = response_size,
- .arg4 = response_size};
+ .arg1 = response_size,
+ .arg2 = response_size};
out:
share_states_unlock(&share_states);
@@ -1243,23 +1472,22 @@
struct spci_memory_share_state *share_state;
struct spci_memory_region *memory_region;
bool clear;
- struct spci_memory_region_constituent *constituents;
- uint32_t constituent_count;
+ struct spci_composite_memory_region *composite;
struct spci_value ret;
- if (relinquish_request->endpoint_count != 0) {
+ if (relinquish_request->endpoint_count != 1) {
dlog_verbose(
- "Stream endpoints not supported (got %d extra "
- "endpoints on SPCI_MEM_RELINQUISH, expected 0).\n",
+ "Stream endpoints not supported (got %d endpoints on "
+ "SPCI_MEM_RELINQUISH, expected 1).\n",
relinquish_request->endpoint_count);
return spci_error(SPCI_INVALID_PARAMETERS);
}
- if (relinquish_request->sender != from_locked.vm->id) {
+ if (relinquish_request->endpoints[0] != from_locked.vm->id) {
dlog_verbose(
"VM ID %d in relinquish message doesn't match calling "
"VM ID %d.\n",
- relinquish_request->sender, from_locked.vm->id);
+ relinquish_request->endpoints[0], from_locked.vm->id);
return spci_error(SPCI_INVALID_PARAMETERS);
}
@@ -1276,12 +1504,14 @@
memory_region = share_state->memory_region;
CHECK(memory_region != NULL);
- if (memory_region->attributes[0].receiver != from_locked.vm->id) {
+ if (memory_region->receivers[0].receiver_permissions.receiver !=
+ from_locked.vm->id) {
dlog_verbose(
"VM ID %d tried to relinquish memory region with "
"handle %#x but receiver was %d.\n",
from_locked.vm->id, handle,
- memory_region->attributes[0].receiver);
+ memory_region->receivers[0]
+ .receiver_permissions.receiver);
ret = spci_error(SPCI_INVALID_PARAMETERS);
goto out;
}
@@ -1307,10 +1537,10 @@
goto out;
}
- constituents = spci_memory_region_get_constituents(memory_region);
- constituent_count = memory_region->constituent_count;
- ret = spci_relinquish_memory(from_locked, constituents,
- constituent_count, page_pool, clear);
+ composite = spci_memory_region_get_composite(memory_region, 0);
+ ret = spci_relinquish_memory(from_locked, composite->constituents,
+ composite->constituent_count, page_pool,
+ clear);
if (ret.func == SPCI_SUCCESS_32) {
/*
@@ -1338,8 +1568,7 @@
struct share_states_locked share_states;
struct spci_memory_share_state *share_state;
struct spci_memory_region *memory_region;
- struct spci_memory_region_constituent *constituents;
- uint32_t constituent_count;
+ struct spci_composite_memory_region *composite;
uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
struct spci_value ret;
@@ -1374,9 +1603,9 @@
goto out;
}
- constituents = spci_memory_region_get_constituents(memory_region);
- constituent_count = memory_region->constituent_count;
- ret = spci_retrieve_memory(to_locked, constituents, constituent_count,
+ composite = spci_memory_region_get_composite(memory_region, 0);
+ ret = spci_retrieve_memory(to_locked, composite->constituents,
+ composite->constituent_count,
memory_to_attributes, SPCI_MEM_RECLAIM_32,
clear, page_pool);
diff --git a/test/inc/test/vmapi/spci.h b/test/inc/test/vmapi/spci.h
index 24e9fc5..bab7f81 100644
--- a/test/inc/test/vmapi/spci.h
+++ b/test/inc/test/vmapi/spci.h
@@ -36,7 +36,14 @@
spci_vm_id_t recipient,
struct spci_memory_region_constituent constituents[],
uint32_t constituent_count, spci_memory_region_flags_t flags,
- enum spci_memory_access access);
+ enum spci_data_access send_data_access,
+ enum spci_data_access retrieve_data_access,
+ enum spci_instruction_access send_instruction_access,
+ enum spci_instruction_access retrieve_instruction_access);
spci_vm_id_t retrieve_memory_from_message(void *recv_buf, void *send_buf,
struct spci_value msg_ret,
spci_memory_handle_t *handle);
+spci_vm_id_t retrieve_memory_from_message_expect_fail(void *recv_buf,
+ void *send_buf,
+ struct spci_value msg_ret,
+ int32_t expected_error);
diff --git a/test/vmapi/common/spci.c b/test/vmapi/common/spci.c
index c3176b3..0bc3380 100644
--- a/test/vmapi/common/spci.c
+++ b/test/vmapi/common/spci.c
@@ -51,28 +51,30 @@
spci_vm_id_t recipient,
struct spci_memory_region_constituent constituents[],
uint32_t constituent_count, spci_memory_region_flags_t flags,
- enum spci_memory_access access)
+ enum spci_data_access send_data_access,
+ enum spci_data_access retrieve_data_access,
+ enum spci_instruction_access send_instruction_access,
+ enum spci_instruction_access retrieve_instruction_access)
{
uint32_t msg_size;
struct spci_value ret;
- uint32_t page_count = 0;
- uint32_t handle;
- uint32_t i;
+ spci_memory_handle_t handle;
/* Send the memory. */
msg_size = spci_memory_region_init(
tx_buffer, sender, recipient, constituents, constituent_count,
- 0, flags, access, SPCI_MEMORY_NORMAL_MEM,
- SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ 0, flags, send_data_access, send_instruction_access,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
switch (share_func) {
case SPCI_MEM_DONATE_32:
- ret = spci_mem_donate(msg_size, msg_size, 0);
+ ret = spci_mem_donate(msg_size, msg_size);
break;
case SPCI_MEM_LEND_32:
- ret = spci_mem_lend(msg_size, msg_size, 0);
+ ret = spci_mem_lend(msg_size, msg_size);
break;
case SPCI_MEM_SHARE_32:
- ret = spci_mem_share(msg_size, msg_size, 0);
+ ret = spci_mem_share(msg_size, msg_size);
break;
default:
FAIL("Invalid share_func %#x.\n", share_func);
@@ -82,18 +84,14 @@
EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
handle = spci_mem_success_handle(ret);
- /* Count pages. */
- for (i = 0; i < constituent_count; ++i) {
- page_count += constituents[i].page_count;
- }
-
/*
* Send the appropriate retrieve request to the VM so that it can use it
* to retrieve the memory.
*/
msg_size = spci_memory_retrieve_request_init(
- tx_buffer, handle, sender, recipient, share_func, 0, page_count,
- access, SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ tx_buffer, handle, sender, recipient, 0, 0,
+ retrieve_data_access, retrieve_instruction_access,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
EXPECT_EQ(spci_msg_send(sender, recipient, msg_size, 0).func,
SPCI_SUCCESS_32);
@@ -102,7 +100,7 @@
}
/*
- * Use the retrieve request from the recieve buffer to retrieve a memory region
+ * Use the retrieve request from the receive buffer to retrieve a memory region
* which has been sent to us. Returns the sender, and the handle via a return
* parameter.
*/
@@ -112,7 +110,7 @@
{
uint32_t msg_size;
struct spci_value ret;
- struct spci_retrieved_memory_region *memory_region;
+ struct spci_memory_region *memory_region;
spci_vm_id_t sender;
EXPECT_EQ(msg_ret.func, SPCI_MSG_SEND_32);
@@ -120,16 +118,44 @@
sender = spci_msg_send_sender(msg_ret);
if (handle != NULL) {
- struct spci_memory_retrieve_request *retrieve_request =
- (struct spci_memory_retrieve_request *)recv_buf;
+ struct spci_memory_region *retrieve_request =
+ (struct spci_memory_region *)recv_buf;
*handle = retrieve_request->handle;
}
memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
spci_rx_release();
- ret = spci_mem_retrieve_req(msg_size, msg_size, 0);
+ ret = spci_mem_retrieve_req(msg_size, msg_size);
EXPECT_EQ(ret.func, SPCI_MEM_RETRIEVE_RESP_32);
- memory_region = (struct spci_retrieved_memory_region *)recv_buf;
+ memory_region = (struct spci_memory_region *)recv_buf;
EXPECT_EQ(memory_region->receiver_count, 1);
+ EXPECT_EQ(memory_region->receivers[0].receiver_permissions.receiver,
+ hf_vm_get_id());
+
+ return sender;
+}
+
+/*
+ * Use the retrieve request from the receive buffer to retrieve a memory region
+ * which has been sent to us, expecting it to fail with the given error code.
+ * Returns the sender.
+ */
+spci_vm_id_t retrieve_memory_from_message_expect_fail(void *recv_buf,
+ void *send_buf,
+ struct spci_value msg_ret,
+ int32_t expected_error)
+{
+ uint32_t msg_size;
+ struct spci_value ret;
+ spci_vm_id_t sender;
+
+ EXPECT_EQ(msg_ret.func, SPCI_MSG_SEND_32);
+ msg_size = spci_msg_send_size(msg_ret);
+ sender = spci_msg_send_sender(msg_ret);
+
+ memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
+ spci_rx_release();
+ ret = spci_mem_retrieve_req(msg_size, msg_size);
+ EXPECT_SPCI_ERROR(ret, expected_error);
return sender;
}
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index 5050f11..f1a0aae 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -33,22 +33,26 @@
*/
static void check_cannot_send_memory(
struct mailbox_buffers mb,
- struct spci_value (*send_function)(uint32_t, uint32_t, uint32_t),
+ struct spci_value (*send_function)(uint32_t, uint32_t),
struct spci_memory_region_constituent constituents[],
int constituent_count, int32_t avoid_vm)
{
- enum spci_memory_access access[] = {SPCI_MEMORY_RO_NX, SPCI_MEMORY_RO_X,
- SPCI_MEMORY_RW_NX,
- SPCI_MEMORY_RW_X};
+ enum spci_data_access data_access[] = {
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RO,
+ SPCI_DATA_ACCESS_RW, SPCI_DATA_ACCESS_RESERVED};
+ enum spci_instruction_access instruction_access[] = {
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX, SPCI_INSTRUCTION_ACCESS_X,
+ SPCI_INSTRUCTION_ACCESS_RESERVED};
enum spci_memory_cacheability cacheability[] = {
- SPCI_MEMORY_CACHE_NON_CACHEABLE,
- SPCI_MEMORY_CACHE_WRITE_THROUGH, SPCI_MEMORY_CACHE_WRITE_BACK};
+ SPCI_MEMORY_CACHE_RESERVED, SPCI_MEMORY_CACHE_NON_CACHEABLE,
+ SPCI_MEMORY_CACHE_RESERVED_1, SPCI_MEMORY_CACHE_WRITE_BACK};
enum spci_memory_cacheability device[] = {
SPCI_MEMORY_DEV_NGNRNE, SPCI_MEMORY_DEV_NGNRE,
SPCI_MEMORY_DEV_NGRE, SPCI_MEMORY_DEV_GRE};
enum spci_memory_shareability shareability[] = {
- SPCI_MEMORY_SHARE_NON_SHAREABLE, SPCI_MEMORY_RESERVED,
+ SPCI_MEMORY_SHARE_NON_SHAREABLE, SPCI_MEMORY_SHARE_RESERVED,
SPCI_MEMORY_OUTER_SHAREABLE, SPCI_MEMORY_INNER_SHAREABLE};
uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM1, SERVICE_VM2};
@@ -56,45 +60,75 @@
size_t j = 0;
size_t k = 0;
size_t l = 0;
+ size_t m = 0;
for (i = 0; i < ARRAY_SIZE(vms); ++i) {
/* Optionally skip one VM as the send would succeed. */
if (vms[i] == avoid_vm) {
continue;
}
- for (j = 0; j < ARRAY_SIZE(access); ++j) {
- for (k = 0; k < ARRAY_SIZE(shareability); ++k) {
- for (l = 0; l < ARRAY_SIZE(cacheability); ++l) {
- uint32_t msg_size =
- spci_memory_region_init(
- mb.send,
- HF_PRIMARY_VM_ID,
- vms[i], constituents,
- constituent_count, 0, 0,
- access[j],
- SPCI_MEMORY_NORMAL_MEM,
- cacheability[l],
- shareability[k]);
- EXPECT_SPCI_ERROR(
- send_function(msg_size,
- msg_size, 0),
- SPCI_INVALID_PARAMETERS);
- }
- for (l = 0; l < ARRAY_SIZE(device); ++l) {
- uint32_t msg_size =
- spci_memory_region_init(
- mb.send,
- HF_PRIMARY_VM_ID,
- vms[i], constituents,
- constituent_count, 0, 0,
- access[j],
- SPCI_MEMORY_DEVICE_MEM,
- device[l],
- shareability[k]);
- EXPECT_SPCI_ERROR(
- send_function(msg_size,
- msg_size, 0),
- SPCI_INVALID_PARAMETERS);
+ for (j = 0; j < ARRAY_SIZE(data_access); ++j) {
+ for (k = 0; k < ARRAY_SIZE(instruction_access); ++k) {
+ for (l = 0; l < ARRAY_SIZE(shareability); ++l) {
+ for (m = 0;
+ m < ARRAY_SIZE(cacheability);
+ ++m) {
+ uint32_t msg_size =
+ spci_memory_region_init(
+ mb.send,
+ HF_PRIMARY_VM_ID,
+ vms[i],
+ constituents,
+ constituent_count,
+ 0, 0,
+ data_access[j],
+ instruction_access
+ [k],
+ SPCI_MEMORY_NORMAL_MEM,
+ cacheability[m],
+ shareability
+ [l]);
+ struct spci_value ret =
+ send_function(msg_size,
+ msg_size);
+
+ EXPECT_EQ(ret.func,
+ SPCI_ERROR_32);
+ EXPECT_TRUE(
+ ret.arg2 ==
+ SPCI_DENIED ||
+ ret.arg2 ==
+ SPCI_INVALID_PARAMETERS);
+ }
+ for (m = 0; m < ARRAY_SIZE(device);
+ ++m) {
+ uint32_t msg_size =
+ spci_memory_region_init(
+ mb.send,
+ HF_PRIMARY_VM_ID,
+ vms[i],
+ constituents,
+ constituent_count,
+ 0, 0,
+ data_access[j],
+ instruction_access
+ [k],
+ SPCI_MEMORY_DEVICE_MEM,
+ device[m],
+ shareability
+ [l]);
+ struct spci_value ret =
+ send_function(msg_size,
+ msg_size);
+
+ EXPECT_EQ(ret.func,
+ SPCI_ERROR_32);
+ EXPECT_TRUE(
+ ret.arg2 ==
+ SPCI_DENIED ||
+ ret.arg2 ==
+ SPCI_INVALID_PARAMETERS);
+ }
}
}
}
@@ -142,17 +176,21 @@
size_t i;
for (i = 0; i < ARRAY_SIZE(vms); ++i) {
uint32_t msg_size;
+ struct spci_value ret;
/* Optionally skip one VM as the donate would succeed. */
if (vms[i] == avoid_vm) {
continue;
}
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, vms[i], constituents,
- constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ constituent_count, 0, 0, SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
- SPCI_INVALID_PARAMETERS);
+ ret = spci_mem_donate(msg_size, msg_size);
+ EXPECT_EQ(ret.func, SPCI_ERROR_32);
+ EXPECT_TRUE(ret.arg2 == SPCI_DENIED ||
+ ret.arg2 == SPCI_INVALID_PARAMETERS);
}
}
@@ -167,9 +205,12 @@
size_t i;
for (i = 0; i < ARRAY_SIZE(vms); ++i) {
- *(struct spci_mem_relinquish *)mb.send =
- (struct spci_mem_relinquish){.handle = handle,
- .sender = vms[i]};
+ struct spci_mem_relinquish *relinquish_req =
+ (struct spci_mem_relinquish *)mb.send;
+
+ *relinquish_req = (struct spci_mem_relinquish){
+ .handle = handle, .endpoint_count = 1};
+ relinquish_req->endpoints[0] = vms[i];
EXPECT_SPCI_ERROR(spci_mem_relinquish(),
SPCI_INVALID_PARAMETERS);
}
@@ -190,7 +231,7 @@
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = pages;
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
SERVICE_SELECT(SERVICE_VM1, "memory_increment", mb.send);
@@ -199,7 +240,9 @@
send_memory_and_retrieve_request(
SPCI_MEM_SHARE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX);
run_res = spci_run(SERVICE_VM1, 0);
EXPECT_EQ(run_res.func, SPCI_YIELD_32);
@@ -229,7 +272,7 @@
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = pages;
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
@@ -239,7 +282,9 @@
handle = send_memory_and_retrieve_request(
SPCI_MEM_SHARE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX);
/* Let the memory be returned. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -262,7 +307,7 @@
{
struct mailbox_buffers mb = set_up_mailbox();
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)PAGE_SIZE, 1),
+ {.address = (uint64_t)PAGE_SIZE, .page_count = 1},
};
SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
@@ -292,14 +337,15 @@
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
- spci_memory_region_constituent_init((uint64_t)pages + PAGE_SIZE,
- 2),
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE, .page_count = 2},
};
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM1, 0);
@@ -333,14 +379,16 @@
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
- spci_memory_region_constituent_init((uint64_t)pages + PAGE_SIZE,
- 2),
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE, .page_count = 2},
};
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/*
* Let the service access the memory, and try and fail to relinquish it.
@@ -358,7 +406,7 @@
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = pages;
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
@@ -368,7 +416,10 @@
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Let the memory be returned, and retrieve it. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -395,7 +446,7 @@
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = pages;
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
@@ -405,7 +456,9 @@
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Let the memory be returned. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -430,7 +483,7 @@
struct spci_value run_res;
struct mailbox_buffers mb = set_up_mailbox();
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
@@ -438,7 +491,9 @@
/* Lend the memory initially. */
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Let the memory be returned. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -449,7 +504,9 @@
/* Lend the memory again after it has been returned. */
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Observe the service doesn't fault when accessing the memory. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -467,7 +524,7 @@
struct spci_value run_res;
struct mailbox_buffers mb = set_up_mailbox();
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
@@ -476,7 +533,9 @@
/* Lend the memory initially. */
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Let the memory be returned. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -487,7 +546,9 @@
/* Share the memory with a different VM after it has been returned. */
send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM1, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
@@ -501,8 +562,8 @@
{
struct spci_value run_res;
struct mailbox_buffers mb = set_up_mailbox();
- struct spci_retrieved_memory_region *memory_region;
- struct spci_receiver_address_range *range;
+ struct spci_memory_region *memory_region;
+ struct spci_composite_memory_region *composite;
uint8_t *ptr;
SERVICE_SELECT(SERVICE_VM1, "give_memory_and_fault", mb.send);
@@ -513,11 +574,12 @@
SERVICE_VM1);
/* Check the memory was cleared. */
- memory_region = (struct spci_retrieved_memory_region *)mb.recv;
- range = spci_retrieved_memory_region_first_receiver_range(
- memory_region);
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[0]);
+ memory_region = (struct spci_memory_region *)mb.recv;
+ ASSERT_EQ(memory_region->receiver_count, 1);
+ ASSERT_NE(memory_region->receivers[0].composite_memory_region_offset,
+ 0);
+ composite = spci_memory_region_get_composite(memory_region, 0);
+ ptr = (uint8_t *)composite->constituents[0].address;
for (int i = 0; i < PAGE_SIZE; ++i) {
ASSERT_EQ(ptr[i], 0);
}
@@ -535,8 +597,8 @@
{
struct spci_value run_res;
struct mailbox_buffers mb = set_up_mailbox();
- struct spci_retrieved_memory_region *memory_region;
- struct spci_receiver_address_range *range;
+ struct spci_memory_region *memory_region;
+ struct spci_composite_memory_region *composite;
uint8_t *ptr;
SERVICE_SELECT(SERVICE_VM1, "lend_memory_and_fault", mb.send);
@@ -547,11 +609,12 @@
SERVICE_VM1);
/* Check the memory was cleared. */
- memory_region = (struct spci_retrieved_memory_region *)mb.recv;
- range = spci_retrieved_memory_region_first_receiver_range(
- memory_region);
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[0]);
+ memory_region = (struct spci_memory_region *)mb.recv;
+ ASSERT_EQ(memory_region->receiver_count, 1);
+ ASSERT_NE(memory_region->receivers[0].composite_memory_region_offset,
+ 0);
+ composite = spci_memory_region_get_composite(memory_region, 0);
+ ptr = (uint8_t *)composite->constituents[0].address;
for (int i = 0; i < PAGE_SIZE; ++i) {
ASSERT_EQ(ptr[i], 0);
}
@@ -570,7 +633,6 @@
struct spci_value run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = pages;
- uint64_t address;
SERVICE_SELECT(SERVICE_VM1, "spci_check_upper_bound", mb.send);
SERVICE_SELECT(SERVICE_VM2, "spci_check_upper_bound", mb.send);
@@ -580,9 +642,8 @@
/* Specify non-contiguous memory regions. */
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
- spci_memory_region_constituent_init(
- (uint64_t)pages + PAGE_SIZE * 2, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE * 2, .page_count = 1},
};
/*
@@ -593,19 +654,18 @@
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM1, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
1);
/* Use different memory regions for verifying the second constituent. */
- address = (uint64_t)pages + PAGE_SIZE * 1;
- constituents[0].address_high = address << 32;
- constituents[0].address_low = (uint32_t)address;
- address = (uint64_t)pages + PAGE_SIZE * 3;
- constituents[1].address_high = address << 32;
- constituents[1].address_low = (uint32_t)address;
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE * 1;
+ constituents[1].address = (uint64_t)pages + PAGE_SIZE * 3;
/*
* Specify that we now want to test the second constituent of the
@@ -619,7 +679,10 @@
*/
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM2, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
@@ -634,7 +697,6 @@
struct spci_value run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = pages;
- uint64_t address;
SERVICE_SELECT(SERVICE_VM1, "spci_check_lower_bound", mb.send);
SERVICE_SELECT(SERVICE_VM2, "spci_check_lower_bound", mb.send);
@@ -644,9 +706,8 @@
/* Specify non-contiguous memory regions. */
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
- spci_memory_region_constituent_init(
- (uint64_t)pages + PAGE_SIZE * 2, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE * 2, .page_count = 1},
};
/*
@@ -657,19 +718,18 @@
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM1, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
1);
/* Use different memory regions for verifying the second constituent. */
- address = (uint64_t)pages + PAGE_SIZE * 1;
- constituents[0].address_high = address << 32;
- constituents[0].address_low = (uint32_t)address;
- address = (uint64_t)pages + PAGE_SIZE * 3;
- constituents[1].address_high = address << 32;
- constituents[1].address_low = (uint32_t)address;
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE * 1;
+ constituents[1].address = (uint64_t)pages + PAGE_SIZE * 3;
/*
* Specify that we now want to test the second constituent of the
@@ -683,7 +743,10 @@
*/
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM2, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
@@ -707,12 +770,15 @@
memset_s(ptr, sizeof(pages), 'b', 1 * PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM1, 0);
@@ -724,7 +790,10 @@
/* Share the memory with another VM. */
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM1, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
@@ -748,7 +817,7 @@
memset_s(ptr, sizeof(pages), 'b', 1 * PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
/* Set up VM2 to wait for message. */
@@ -758,7 +827,10 @@
/* Donate memory. */
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Let the memory be sent from VM1 to VM2. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -796,13 +868,16 @@
memset_s(ptr, sizeof(pages), 'b', 1 * PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
/* Donate memory to VM1. */
handle = send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Let the memory be received. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -844,16 +919,16 @@
/* Initialise the memory before giving it. */
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
}
@@ -869,15 +944,15 @@
/* Initialise the memory before giving it. */
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size, 0),
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
}
@@ -893,15 +968,15 @@
/* Initialise the memory before giving it. */
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size, 0),
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
}
@@ -921,38 +996,41 @@
/* Initialise the memory before giving it. */
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
/* Try invalid configurations. */
msg_size = spci_memory_region_init(
mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
msg_size = spci_memory_region_init(
mb.send, SERVICE_VM1, SERVICE_VM1, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
msg_size = spci_memory_region_init(
mb.send, SERVICE_VM2, SERVICE_VM1, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
/* Successfully donate to VM1. */
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Receive and return memory from VM1. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -982,27 +1060,30 @@
continue;
}
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init(
- (uint64_t)pages + i, 1),
- spci_memory_region_constituent_init(
- (uint64_t)pages + PAGE_SIZE + j, 1),
+ {.address = (uint64_t)pages + i,
+ .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE + j,
+ .page_count = 1},
};
uint32_t msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
constituents, ARRAY_SIZE(constituents), 0, 0,
- SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_MEMORY_NORMAL_MEM,
SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(
- spci_mem_donate(msg_size, msg_size, 0),
- SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size),
+ SPCI_INVALID_PARAMETERS);
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
constituents, ARRAY_SIZE(constituents), 0, 0,
- SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_MEMORY_NORMAL_MEM,
SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size, 0),
+ EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
}
}
@@ -1024,22 +1105,24 @@
/* Initialise the memory before giving it. */
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
/* Check cannot swap VM IDs. */
msg_size = spci_memory_region_init(
mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size, 0),
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
/* Lend memory to VM1. */
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Receive and return memory from VM1. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1069,12 +1152,14 @@
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Let the memory be accessed. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1091,7 +1176,9 @@
send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RO_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RO,
+ SPCI_DATA_ACCESS_RO, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Let the memory be accessed. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1103,32 +1190,35 @@
}
/**
- * Memory can be shared with executable permissions.
+ * Memory cannot be shared with executable permissions.
* Check RO and RW permissions.
*/
-TEST(memory_sharing, share_relinquish_X_RW)
+TEST(memory_sharing, share_X_RW)
{
spci_memory_handle_t handle;
struct spci_value run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = pages;
- SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send);
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_share_fail", mb.send);
/* Initialise the memory before giving it. */
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
handle = send_memory_and_retrieve_request(
SPCI_MEM_SHARE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
- /* Let the memory be accessed. */
+ /* Let the secondary VM fail to retrieve the memory. */
run_res = spci_run(SERVICE_VM1, 0);
- EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
/* Ensure we still have access. */
for (int i = 0; i < PAGE_SIZE; ++i) {
@@ -1136,10 +1226,7 @@
ptr[i]++;
}
- /* Let service write to and return memory. */
- run_res = spci_run(SERVICE_VM1, 0);
- EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
- EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ /* Reclaim the memory. */
EXPECT_EQ(spci_mem_reclaim(handle, 0).func, SPCI_SUCCESS_32);
/* Re-initialise the memory before giving it. */
@@ -1147,11 +1234,14 @@
send_memory_and_retrieve_request(
SPCI_MEM_SHARE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RO_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RO,
+ SPCI_DATA_ACCESS_RO, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
- /* Let the memory be accessed. */
+ /* Let the secondary VM fail to retrieve the memory. */
run_res = spci_run(SERVICE_VM1, 0);
- EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
/* Ensure we still have access. */
for (int i = 0; i < PAGE_SIZE; ++i) {
@@ -1159,9 +1249,8 @@
ptr[i]++;
}
- run_res = spci_run(SERVICE_VM1, 0);
- EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
- 1);
+ /* Reclaim the memory. */
+ EXPECT_EQ(spci_mem_reclaim(handle, 0).func, SPCI_SUCCESS_32);
}
/**
@@ -1181,12 +1270,14 @@
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
handle = send_memory_and_retrieve_request(
SPCI_MEM_SHARE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_NX);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX);
/* Let the memory be accessed. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1208,7 +1299,9 @@
send_memory_and_retrieve_request(
SPCI_MEM_SHARE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RO_NX);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RO,
+ SPCI_DATA_ACCESS_RO, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX);
/* Let the memory be accessed. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1243,12 +1336,14 @@
memset_s(ptr, sizeof(pages) * 2, 'b', PAGE_SIZE * 2);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 2),
+ {.address = (uint64_t)pages, .page_count = 2},
};
handle = send_memory_and_retrieve_request(
SPCI_MEM_SHARE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_NX);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX);
/* Let the memory be received, fail to be cleared, and then returned. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1282,12 +1377,14 @@
*ptr2 = 0xD65F03C0;
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Attempt to execute from memory. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1297,7 +1394,9 @@
send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_NX);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX);
run_res = spci_run(SERVICE_VM1, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
@@ -1324,12 +1423,14 @@
*ptr2 = 0xD65F03C0;
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
};
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RO_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RO,
+ SPCI_DATA_ACCESS_RO, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Attempt to execute from memory. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1339,7 +1440,9 @@
send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RO_NX);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RO,
+ SPCI_DATA_ACCESS_RO, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX);
run_res = spci_run(SERVICE_VM1, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
@@ -1363,13 +1466,15 @@
memset_s(ptr, sizeof(pages) * 2, 'b', PAGE_SIZE * 2);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 2),
+ {.address = (uint64_t)pages, .page_count = 2},
};
/* Lend memory to VM1. */
send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RO_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RO,
+ SPCI_DATA_ACCESS_RO, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Let the memory be accessed. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1378,26 +1483,25 @@
/* Ensure we can't donate any sub section of memory to another VM. */
constituents[0].page_count = 1;
for (int i = 1; i < PAGE_SIZE * 2; i++) {
- uint64_t address = (uint64_t)pages + PAGE_SIZE;
- constituents[0].address_high = address << 32;
- constituents[0].address_low = (uint32_t)address;
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE;
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ ARRAY_SIZE(constituents), 0, 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
- SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size),
+ SPCI_DENIED);
}
/* Ensure we can't donate to the only borrower. */
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
- SPCI_INVALID_PARAMETERS);
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size), SPCI_DENIED);
}
/**
@@ -1417,14 +1521,15 @@
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE * 4);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 2),
- spci_memory_region_constituent_init(
- (uint64_t)pages + PAGE_SIZE * 2, 2),
+ {.address = (uint64_t)pages, .page_count = 2},
+ {.address = (uint64_t)pages + PAGE_SIZE * 2, .page_count = 2},
};
send_memory_and_retrieve_request(
SPCI_MEM_SHARE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RO_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RO,
+ SPCI_DATA_ACCESS_RO, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX);
/* Let the memory be accessed. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1437,26 +1542,25 @@
/* Ensure we can't donate any sub section of memory to another VM. */
constituents[0].page_count = 1;
for (int i = 1; i < PAGE_SIZE * 2; i++) {
- uint64_t address = (uint64_t)pages + PAGE_SIZE;
- constituents[0].address_high = address << 32;
- constituents[0].address_low = (uint32_t)address;
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE;
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ ARRAY_SIZE(constituents), 0, 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
- SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size),
+ SPCI_DENIED);
}
/* Ensure we can't donate to the only borrower. */
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
- SPCI_INVALID_PARAMETERS);
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size), SPCI_DENIED);
}
/**
@@ -1477,15 +1581,16 @@
memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE * 4);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 2),
- spci_memory_region_constituent_init(
- (uint64_t)pages + PAGE_SIZE * 3, 1),
+ {.address = (uint64_t)pages, .page_count = 2},
+ {.address = (uint64_t)pages + PAGE_SIZE * 3, .page_count = 1},
};
/* Lend memory to VM1. */
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Let the memory be accessed. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1505,8 +1610,7 @@
/* Now attempt to share only a portion of the same area of memory. */
struct spci_memory_region_constituent constituents_subsection[] = {
- spci_memory_region_constituent_init(
- (uint64_t)pages + PAGE_SIZE * 3, 1),
+ {.address = (uint64_t)pages + PAGE_SIZE * 3, .page_count = 1},
};
check_cannot_lend_memory(mb, constituents_subsection,
ARRAY_SIZE(constituents_subsection), -1);
@@ -1517,16 +1621,15 @@
/* Attempt to lend again with different permissions. */
constituents[0].page_count = 1;
for (int i = 0; i < 2; i++) {
- uint64_t address = (uint64_t)pages + i * PAGE_SIZE;
- constituents[0].address_high = address << 32;
- constituents[0].address_low = (uint32_t)address;
+ constituents[0].address = (uint64_t)pages + i * PAGE_SIZE;
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_X,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_RO,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size, 0),
- SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size),
+ SPCI_DENIED);
}
}
@@ -1548,12 +1651,14 @@
memset_s(ptr, sizeof(pages) * 2, 'b', PAGE_SIZE * 2);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 2),
+ {.address = (uint64_t)pages, .page_count = 2},
};
handle = send_memory_and_retrieve_request(
SPCI_MEM_SHARE_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NX);
/* Let the memory be accessed. */
run_res = spci_run(SERVICE_VM1, 0);
@@ -1576,16 +1681,15 @@
/* Attempt to share again with different permissions. */
constituents[0].page_count = 1;
for (int i = 0; i < 2; i++) {
- uint64_t address = (uint64_t)pages + i * PAGE_SIZE;
- constituents[0].address_high = address << 32;
- constituents[0].address_low = (uint32_t)address;
+ constituents[0].address = (uint64_t)pages + i * PAGE_SIZE;
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_X,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_RO,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size, 0),
- SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size),
+ SPCI_DENIED);
}
}
@@ -1605,14 +1709,16 @@
memset_s(ptr, sizeof(pages) * 2, 'b', PAGE_SIZE * 2);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 2),
+ {.address = (uint64_t)pages, .page_count = 2},
};
/* Lend memory with clear flag. */
handle = send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
constituents, ARRAY_SIZE(constituents),
- SPCI_MEMORY_REGION_FLAG_CLEAR, SPCI_MEMORY_RO_X);
+ SPCI_MEMORY_REGION_FLAG_CLEAR, SPCI_DATA_ACCESS_RO,
+ SPCI_DATA_ACCESS_RO, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Take it back again. */
spci_mem_reclaim(handle, 0);
@@ -1638,15 +1744,16 @@
memset_s(ptr, sizeof(pages) * 2, 'b', PAGE_SIZE * 2);
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 2),
+ {.address = (uint64_t)pages, .page_count = 2},
};
msg_size = spci_memory_region_init(
mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
ARRAY_SIZE(constituents), 0, SPCI_MEMORY_REGION_FLAG_CLEAR,
- SPCI_MEMORY_RO_X, SPCI_MEMORY_NORMAL_MEM,
- SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size, 0),
+ SPCI_DATA_ACCESS_RO, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
/* Check that it has not been cleared. */
@@ -1663,7 +1770,6 @@
struct spci_value run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = pages;
- uint64_t address;
SERVICE_SELECT(SERVICE_VM1, "spci_check_upper_bound", mb.send);
SERVICE_SELECT(SERVICE_VM2, "spci_check_upper_bound", mb.send);
@@ -1673,9 +1779,8 @@
/* Specify non-contiguous memory regions. */
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
- spci_memory_region_constituent_init(
- (uint64_t)pages + PAGE_SIZE * 2, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE * 2, .page_count = 1},
};
/*
@@ -1686,19 +1791,17 @@
send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM1, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
1);
/* Use different memory regions for verifying the second constituent. */
- address = (uint64_t)pages + PAGE_SIZE * 1;
- constituents[0].address_high = address << 32;
- constituents[0].address_low = (uint32_t)address;
- address = (uint64_t)pages + PAGE_SIZE * 3;
- constituents[1].address_high = address << 32;
- constituents[1].address_low = (uint32_t)address;
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE * 1;
+ constituents[1].address = (uint64_t)pages + PAGE_SIZE * 3;
/*
* Specify that we now want to test the second constituent of the
@@ -1712,7 +1815,9 @@
*/
send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM2, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
@@ -1727,7 +1832,6 @@
struct spci_value run_res;
struct mailbox_buffers mb = set_up_mailbox();
uint8_t *ptr = pages;
- uint64_t address;
SERVICE_SELECT(SERVICE_VM1, "spci_check_lower_bound", mb.send);
SERVICE_SELECT(SERVICE_VM2, "spci_check_lower_bound", mb.send);
@@ -1737,9 +1841,8 @@
/* Specify non-contiguous memory regions. */
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)pages, 1),
- spci_memory_region_constituent_init(
- (uint64_t)pages + PAGE_SIZE * 2, 1),
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE * 2, .page_count = 1},
};
/*
@@ -1750,19 +1853,17 @@
send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM1, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
1);
/* Use different memory regions for verifying the second constituent. */
- address = (uint64_t)pages + PAGE_SIZE * 1;
- constituents[0].address_high = address << 32;
- constituents[0].address_low = (uint32_t)address;
- address = (uint64_t)pages + PAGE_SIZE * 3;
- constituents[1].address_high = address << 32;
- constituents[1].address_low = (uint32_t)address;
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE * 1;
+ constituents[1].address = (uint64_t)pages + PAGE_SIZE * 3;
/*
* Specify that we now want to test the second constituent of the
@@ -1776,7 +1877,9 @@
*/
send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2,
- constituents, ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X);
+ constituents, ARRAY_SIZE(constituents), 0, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
run_res = spci_run(SERVICE_VM2, 0);
EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index 0a1c561..2c07835 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -16,6 +16,7 @@
#include "hf/arch/vm/interrupts.h"
+#include "hf/check.h"
#include "hf/mm.h"
#include "hf/std.h"
@@ -39,14 +40,16 @@
struct spci_value ret = spci_msg_wait();
spci_vm_id_t sender = retrieve_memory_from_message(
recv_buf, send_buf, ret, NULL);
- struct spci_retrieved_memory_region *memory_region =
- (struct spci_retrieved_memory_region *)recv_buf;
- struct spci_receiver_address_range *range =
- spci_retrieved_memory_region_first_receiver_range(
- memory_region);
- uint8_t *ptr =
- (uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[0]);
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_composite_memory_region *composite =
+ spci_memory_region_get_composite(memory_region, 0);
+ uint8_t *ptr = (uint8_t *)composite->constituents[0].address;
+
+ ASSERT_EQ(memory_region->receiver_count, 1);
+ ASSERT_NE(memory_region->receivers[0]
+ .composite_memory_region_offset,
+ 0);
/* Allow the memory to be populated. */
EXPECT_EQ(spci_yield().func, SPCI_SUCCESS_32);
@@ -66,14 +69,16 @@
{
void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)&page, 1),
+ {.address = (uint64_t)&page, .page_count = 1},
};
/* Give memory to the primary. */
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID,
constituents, ARRAY_SIZE(constituents),
- SPCI_MEMORY_REGION_FLAG_CLEAR, SPCI_MEMORY_RW_X);
+ SPCI_MEMORY_REGION_FLAG_CLEAR, SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
exception_setup(NULL, exception_handler_yield_data_abort);
@@ -87,14 +92,16 @@
{
void *send_buf = SERVICE_SEND_BUFFER();
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init((uint64_t)&page, 1),
+ {.address = (uint64_t)&page, .page_count = 1},
};
/* Lend memory to the primary. */
send_memory_and_retrieve_request(
SPCI_MEM_LEND_32, send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID,
constituents, ARRAY_SIZE(constituents),
- SPCI_MEMORY_REGION_FLAG_CLEAR, SPCI_MEMORY_RW_X);
+ SPCI_MEMORY_REGION_FLAG_CLEAR, SPCI_DATA_ACCESS_RW,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
exception_setup(NULL, exception_handler_yield_data_abort);
@@ -116,13 +123,12 @@
spci_vm_id_t sender =
retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
- struct spci_retrieved_memory_region *memory_region =
- (struct spci_retrieved_memory_region *)recv_buf;
- struct spci_receiver_address_range *range =
- spci_retrieved_memory_region_first_receiver_range(
- memory_region);
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[0]);
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_composite_memory_region *composite =
+ spci_memory_region_get_composite(memory_region, 0);
+
+ ptr = (uint8_t *)composite->constituents[0].address;
/* Check that one has access to the shared region. */
for (i = 0; i < PAGE_SIZE; ++i) {
@@ -132,8 +138,10 @@
/* Give the memory back and notify the sender. */
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, send_buf, hf_vm_get_id(), sender,
- range->constituents, range->constituent_count, 0,
- SPCI_MEMORY_RW_X);
+ composite->constituents, composite->constituent_count, 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
/*
@@ -150,8 +158,8 @@
*/
TEST_SERVICE(spci_check_upper_bound)
{
- struct spci_retrieved_memory_region *memory_region;
- struct spci_receiver_address_range *range;
+ struct spci_memory_region *memory_region;
+ struct spci_composite_memory_region *composite;
uint8_t *ptr;
uint8_t index;
@@ -162,15 +170,12 @@
exception_setup(NULL, exception_handler_yield_data_abort);
retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
- memory_region = (struct spci_retrieved_memory_region *)recv_buf;
- range = spci_retrieved_memory_region_first_receiver_range(
- memory_region);
+ memory_region = (struct spci_memory_region *)recv_buf;
+ composite = spci_memory_region_get_composite(memory_region, 0);
/* Choose which constituent we want to test. */
- index = *(uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[0]);
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[index]);
+ index = *(uint8_t *)composite->constituents[0].address;
+ ptr = (uint8_t *)composite->constituents[index].address;
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
/*
@@ -187,8 +192,8 @@
*/
TEST_SERVICE(spci_check_lower_bound)
{
- struct spci_retrieved_memory_region *memory_region;
- struct spci_receiver_address_range *range;
+ struct spci_memory_region *memory_region;
+ struct spci_composite_memory_region *composite;
uint8_t *ptr;
uint8_t index;
@@ -199,15 +204,12 @@
exception_setup(NULL, exception_handler_yield_data_abort);
retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
- memory_region = (struct spci_retrieved_memory_region *)recv_buf;
- range = spci_retrieved_memory_region_first_receiver_range(
- memory_region);
+ memory_region = (struct spci_memory_region *)recv_buf;
+ composite = spci_memory_region_get_composite(memory_region, 0);
/* Choose which constituent we want to test. */
- index = *(uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[0]);
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[index]);
+ index = *(uint8_t *)composite->constituents[0].address;
+ ptr = (uint8_t *)composite->constituents[index].address;
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
/*
@@ -231,23 +233,23 @@
struct spci_value ret = spci_msg_wait();
spci_vm_id_t sender =
retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
- struct spci_retrieved_memory_region *memory_region =
- (struct spci_retrieved_memory_region *)recv_buf;
- struct spci_receiver_address_range *range =
- spci_retrieved_memory_region_first_receiver_range(
- memory_region);
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_composite_memory_region *composite =
+ spci_memory_region_get_composite(memory_region, 0);
ASSERT_EQ(sender, HF_PRIMARY_VM_ID);
exception_setup(NULL, exception_handler_yield_data_abort);
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[0]);
+ ptr = (uint8_t *)composite->constituents[0].address;
/* Donate memory to next VM. */
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, send_buf, hf_vm_get_id(), SERVICE_VM2,
- range->constituents, range->constituent_count, 0,
- SPCI_MEMORY_RW_X);
+ composite->constituents, composite->constituent_count, 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
/* Ensure that we are unable to modify memory any more. */
@@ -268,13 +270,12 @@
struct spci_value ret = spci_msg_wait();
spci_vm_id_t sender =
retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
- struct spci_retrieved_memory_region *memory_region =
- (struct spci_retrieved_memory_region *)recv_buf;
- struct spci_receiver_address_range *range =
- spci_retrieved_memory_region_first_receiver_range(
- memory_region);
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_composite_memory_region *composite =
+ spci_memory_region_get_composite(memory_region, 0);
struct spci_memory_region_constituent constituent =
- range->constituents[0];
+ composite->constituents[0];
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
@@ -282,17 +283,19 @@
spci_yield();
/* Give the memory back and notify the sender. */
- send_memory_and_retrieve_request(SPCI_MEM_DONATE_32, send_buf,
- hf_vm_get_id(), sender, &constituent,
- 1, 0, SPCI_MEMORY_RW_X);
+ send_memory_and_retrieve_request(
+ SPCI_MEM_DONATE_32, send_buf, hf_vm_get_id(), sender,
+ &constituent, 1, 0, SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Attempt to donate the memory to another VM. */
msg_size = spci_memory_region_init(
send_buf, hf_vm_get_id(), SERVICE_VM2, &constituent, 1, 0, 0,
- SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
- SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size), SPCI_DENIED);
spci_yield();
}
@@ -308,16 +311,14 @@
for (;;) {
struct spci_value ret = spci_msg_wait();
- struct spci_retrieved_memory_region *memory_region;
- struct spci_receiver_address_range *range;
+ struct spci_memory_region *memory_region;
+ struct spci_composite_memory_region *composite;
uint8_t *ptr;
retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
- memory_region = (struct spci_retrieved_memory_region *)recv_buf;
- range = spci_retrieved_memory_region_first_receiver_range(
- memory_region);
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[0]);
+ memory_region = (struct spci_memory_region *)recv_buf;
+ composite = spci_memory_region_get_composite(memory_region, 0);
+ ptr = (uint8_t *)composite->constituents[0].address;
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
ptr[0] = 'd';
@@ -341,26 +342,28 @@
struct spci_value ret = spci_msg_wait();
spci_vm_id_t sender =
retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
- struct spci_retrieved_memory_region *memory_region =
- (struct spci_retrieved_memory_region *)recv_buf;
- struct spci_receiver_address_range *range =
- spci_retrieved_memory_region_first_receiver_range(
- memory_region);
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_composite_memory_region *composite =
+ spci_memory_region_get_composite(memory_region, 0);
/* Give the memory back and notify the sender. */
send_memory_and_retrieve_request(
SPCI_MEM_DONATE_32, send_buf, hf_vm_get_id(), sender,
- range->constituents, range->constituent_count, 0,
- SPCI_MEMORY_RW_X);
+ composite->constituents, composite->constituent_count, 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_X);
/* Fail to donate the memory from the primary to VM2. */
msg_size = spci_memory_region_init(
- send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, range->constituents,
- range->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
+ send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
+ composite->constituents, composite->constituent_count, 0, 0,
+ SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
- EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0),
+ EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
spci_yield();
}
@@ -384,19 +387,19 @@
struct spci_value ret = spci_msg_wait();
spci_vm_id_t sender = retrieve_memory_from_message(
recv_buf, send_buf, ret, &handle);
- struct spci_retrieved_memory_region *memory_region =
- (struct spci_retrieved_memory_region *)recv_buf;
- struct spci_receiver_address_range *range =
- spci_retrieved_memory_region_first_receiver_range(
- memory_region);
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_composite_memory_region *composite =
+ spci_memory_region_get_composite(memory_region, 0);
struct spci_memory_region_constituent *constituents =
- range->constituents;
+ composite->constituents;
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &constituents[0]);
+ /* ASSERT_TRUE isn't enough for clang-analyze. */
+ CHECK(composite != NULL);
+
+ ptr = (uint8_t *)constituents[0].address;
count = constituents[0].page_count;
- ptr2 = (uint8_t *)spci_memory_region_constituent_get_address(
- &constituents[1]);
+ ptr2 = (uint8_t *)constituents[1].address;
count2 = constituents[1].page_count;
/* Relevant information read, mailbox can be cleared. */
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
@@ -410,9 +413,7 @@
}
/* Give the memory back and notify the sender. */
- *(struct spci_mem_relinquish *)send_buf =
- (struct spci_mem_relinquish){.handle = handle,
- .sender = hf_vm_get_id()};
+ spci_mem_relinquish_init(send_buf, handle, 0, hf_vm_get_id());
EXPECT_EQ(spci_mem_relinquish().func, SPCI_SUCCESS_32);
EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func,
SPCI_SUCCESS_32);
@@ -433,8 +434,8 @@
for (;;) {
size_t i;
spci_memory_handle_t handle;
- struct spci_retrieved_memory_region *memory_region;
- struct spci_receiver_address_range *range;
+ struct spci_memory_region *memory_region;
+ struct spci_composite_memory_region *composite;
uint8_t *ptr;
void *recv_buf = SERVICE_RECV_BUFFER();
@@ -442,12 +443,10 @@
struct spci_value ret = spci_msg_wait();
retrieve_memory_from_message(recv_buf, send_buf, ret, &handle);
- memory_region = (struct spci_retrieved_memory_region *)recv_buf;
- range = spci_retrieved_memory_region_first_receiver_range(
- memory_region);
+ memory_region = (struct spci_memory_region *)recv_buf;
+ composite = spci_memory_region_get_composite(memory_region, 0);
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &range->constituents[0]);
+ ptr = (uint8_t *)composite->constituents[0].address;
/* Check that we have access to the shared region. */
for (i = 0; i < PAGE_SIZE; ++i) {
@@ -458,9 +457,7 @@
* Attempt to relinquish the memory, which should fail because
* it was donated not lent.
*/
- *(struct spci_mem_relinquish *)send_buf =
- (struct spci_mem_relinquish){.handle = handle,
- .sender = hf_vm_get_id()};
+ spci_mem_relinquish_init(send_buf, handle, 0, hf_vm_get_id());
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
EXPECT_SPCI_ERROR(spci_mem_relinquish(),
SPCI_INVALID_PARAMETERS);
@@ -498,18 +495,14 @@
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
/* Trying to relinquish the memory and clear it should fail. */
- *(struct spci_mem_relinquish *)send_buf =
- (struct spci_mem_relinquish){
- .handle = handle,
- .sender = hf_vm_get_id(),
- .flags = SPCI_MEMORY_REGION_FLAG_CLEAR};
+ spci_mem_relinquish_init(send_buf, handle,
+ SPCI_MEMORY_REGION_FLAG_CLEAR,
+ hf_vm_get_id());
EXPECT_SPCI_ERROR(spci_mem_relinquish(),
SPCI_INVALID_PARAMETERS);
/* Give the memory back and notify the sender. */
- *(struct spci_mem_relinquish *)send_buf =
- (struct spci_mem_relinquish){.handle = handle,
- .sender = hf_vm_get_id()};
+ spci_mem_relinquish_init(send_buf, handle, 0, hf_vm_get_id());
EXPECT_EQ(spci_mem_relinquish().func, SPCI_SUCCESS_32);
EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func,
SPCI_SUCCESS_32);
@@ -529,36 +522,36 @@
struct spci_value ret = spci_msg_wait();
spci_vm_id_t sender =
retrieve_memory_from_message(recv_buf, send_buf, ret, &handle);
- struct spci_retrieved_memory_region *memory_region =
- (struct spci_retrieved_memory_region *)recv_buf;
- struct spci_receiver_address_range *range =
- spci_retrieved_memory_region_first_receiver_range(
- memory_region);
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_composite_memory_region *composite =
+ spci_memory_region_get_composite(memory_region, 0);
/* Give the memory back and notify the sender. */
- *(struct spci_mem_relinquish *)send_buf = (struct spci_mem_relinquish){
- .handle = handle, .sender = hf_vm_get_id()};
+ spci_mem_relinquish_init(send_buf, handle, 0, hf_vm_get_id());
EXPECT_EQ(spci_mem_relinquish().func, SPCI_SUCCESS_32);
EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func,
SPCI_SUCCESS_32);
/* Ensure we cannot lend from the primary to another secondary. */
msg_size = spci_memory_region_init(
- send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, range->constituents,
- range->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
+ composite->constituents, composite->constituent_count, 0, 0,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_X,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size, 0),
+ EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
/* Ensure we cannot share from the primary to another secondary. */
msg_size = spci_memory_region_init(
- send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, range->constituents,
- range->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
+ composite->constituents, composite->constituent_count, 0, 0,
+ SPCI_DATA_ACCESS_RW, SPCI_INSTRUCTION_ACCESS_X,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
- EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size, 0),
+ EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
spci_yield();
@@ -578,16 +571,18 @@
struct spci_value ret = spci_msg_wait();
spci_vm_id_t sender = retrieve_memory_from_message(
recv_buf, send_buf, ret, &handle);
- struct spci_retrieved_memory_region *memory_region =
- (struct spci_retrieved_memory_region *)recv_buf;
- struct spci_receiver_address_range *range =
- spci_retrieved_memory_region_first_receiver_range(
- memory_region);
- struct spci_memory_region_constituent *constituents =
- range->constituents;
- uint64_t *ptr =
- (uint64_t *)spci_memory_region_constituent_get_address(
- &constituents[0]);
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_composite_memory_region *composite =
+ spci_memory_region_get_composite(memory_region, 0);
+ struct spci_memory_region_constituent *constituents;
+ uint64_t *ptr;
+
+ /* ASSERT_TRUE isn't enough for clang-analyze. */
+ CHECK(composite != NULL);
+
+ constituents = composite->constituents;
+ ptr = (uint64_t *)constituents[0].address;
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
@@ -600,9 +595,7 @@
__asm__ volatile("blr %0" ::"r"(ptr));
/* Release the memory again. */
- *(struct spci_mem_relinquish *)send_buf =
- (struct spci_mem_relinquish){.handle = handle,
- .sender = hf_vm_get_id()};
+ spci_mem_relinquish_init(send_buf, handle, 0, hf_vm_get_id());
EXPECT_EQ(spci_mem_relinquish().func, SPCI_SUCCESS_32);
EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func,
SPCI_SUCCESS_32);
@@ -610,6 +603,24 @@
}
/**
+ * Attempt to retrieve a shared page but expect to fail.
+ */
+TEST_SERVICE(spci_memory_share_fail)
+{
+ for (;;) {
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_value ret = spci_msg_wait();
+ spci_vm_id_t sender = retrieve_memory_from_message_expect_fail(
+ recv_buf, send_buf, ret, SPCI_DENIED);
+
+ /* Return control to primary. */
+ EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func,
+ SPCI_SUCCESS_32);
+ }
+}
+
+/**
* Attempt to read and write to a shared page.
*/
TEST_SERVICE(spci_memory_lend_relinquish_RW)
@@ -626,18 +637,16 @@
struct spci_value ret = spci_msg_wait();
spci_vm_id_t sender = retrieve_memory_from_message(
recv_buf, send_buf, ret, &handle);
- struct spci_retrieved_memory_region *memory_region =
- (struct spci_retrieved_memory_region *)recv_buf;
- struct spci_receiver_address_range *range =
- spci_retrieved_memory_region_first_receiver_range(
- memory_region);
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_composite_memory_region *composite =
+ spci_memory_region_get_composite(memory_region, 0);
struct spci_memory_region_constituent constituent_copy =
- range->constituents[0];
+ composite->constituents[0];
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &constituent_copy);
+ ptr = (uint8_t *)constituent_copy.address;
/* Check that we have read access. */
for (i = 0; i < PAGE_SIZE; ++i) {
@@ -653,9 +662,7 @@
}
/* Give the memory back and notify the sender. */
- *(struct spci_mem_relinquish *)send_buf =
- (struct spci_mem_relinquish){.handle = handle,
- .sender = hf_vm_get_id()};
+ spci_mem_relinquish_init(send_buf, handle, 0, hf_vm_get_id());
EXPECT_EQ(spci_mem_relinquish().func, SPCI_SUCCESS_32);
EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func,
SPCI_SUCCESS_32);
@@ -671,20 +678,18 @@
void *recv_buf = SERVICE_RECV_BUFFER();
void *send_buf = SERVICE_SEND_BUFFER();
- struct spci_retrieved_memory_region *memory_region;
- struct spci_receiver_address_range *range;
+ struct spci_memory_region *memory_region;
+ struct spci_composite_memory_region *composite;
struct spci_memory_region_constituent constituent_copy;
retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
- memory_region = (struct spci_retrieved_memory_region *)recv_buf;
- range = spci_retrieved_memory_region_first_receiver_range(
- memory_region);
- constituent_copy = range->constituents[0];
+ memory_region = (struct spci_memory_region *)recv_buf;
+ composite = spci_memory_region_get_composite(memory_region, 0);
+ constituent_copy = composite->constituents[0];
EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
- ptr = (uint8_t *)spci_memory_region_constituent_get_address(
- &constituent_copy);
+ ptr = (uint8_t *)constituent_copy.address;
/* Check that we have read access. */
for (i = 0; i < PAGE_SIZE; ++i) {
@@ -697,24 +702,24 @@
}
for (i = 1; i < PAGE_SIZE * 2; i++) {
- uint64_t address = (uint64_t)ptr + i;
- constituent_copy.address_high = address << 32;
- constituent_copy.address_low = (uint32_t)address;
+ constituent_copy.address = (uint64_t)ptr + i;
/* Fail to lend or share the memory from the primary. */
msg_size = spci_memory_region_init(
send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
- &constituent_copy, 1, 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ &constituent_copy, 1, 0, 0, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size, 0),
+ EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
msg_size = spci_memory_region_init(
send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
- &constituent_copy, 1, 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ &constituent_copy, 1, 0, 0, SPCI_DATA_ACCESS_RW,
+ SPCI_INSTRUCTION_ACCESS_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
- EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size, 0),
+ EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size),
SPCI_INVALID_PARAMETERS);
}
diff --git a/test/vmapi/primary_with_secondaries/services/unmapped.c b/test/vmapi/primary_with_secondaries/services/unmapped.c
index b4a07a4..30d9f07 100644
--- a/test/vmapi/primary_with_secondaries/services/unmapped.c
+++ b/test/vmapi/primary_with_secondaries/services/unmapped.c
@@ -41,17 +41,16 @@
void *send_buf = SERVICE_SEND_BUFFER();
/* Give some memory to the primary VM so that it's unmapped. */
struct spci_memory_region_constituent constituents[] = {
- spci_memory_region_constituent_init(
- (uint64_t)(&pages[PAGE_SIZE]), 1),
+ {.address = (uint64_t)(&pages[PAGE_SIZE]), .page_count = 1},
};
uint32_t msg_size = spci_memory_region_init(
send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID, constituents,
- ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
- SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
- SPCI_MEMORY_OUTER_SHAREABLE);
+ ARRAY_SIZE(constituents), 0, 0, SPCI_DATA_ACCESS_NOT_SPECIFIED,
+ SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
exception_setup(NULL, exception_handler_yield_data_abort);
- EXPECT_EQ(spci_mem_donate(msg_size, msg_size, 0).func, SPCI_SUCCESS_32);
+ EXPECT_EQ(spci_mem_donate(msg_size, msg_size).func, SPCI_SUCCESS_32);
*(volatile uint64_t *)(&pages[PAGE_SIZE - 6]);
FAIL("Exception not generated by invalid access.");
diff --git a/vmlib/spci.c b/vmlib/spci.c
index 55347cf..c404e2f 100644
--- a/vmlib/spci.c
+++ b/vmlib/spci.c
@@ -24,19 +24,6 @@
#include <linux/kernel.h>
#include <linux/string.h>
-/* Linux doesn't have a checked memcpy, so just use the unchecked version. */
-void memcpy_s(void *dest, size_t destsz, const void *src, size_t count)
-{
- memcpy(dest, src, count);
-}
-
-/*
- * Use macro from Linux because we can't include Hafnium internal headers here.
- */
-#ifndef align_up
-#define align_up(v, a) ALIGN((v), (a))
-#endif
-
#else
#include "hf/std.h"
#endif
@@ -46,127 +33,156 @@
* information to it. Returns the length in bytes occupied by the data copied to
* `memory_region` (attributes, constituents and memory region header size).
*/
+static uint32_t spci_memory_region_init_internal(
+ struct spci_memory_region *memory_region, spci_vm_id_t sender,
+ spci_memory_attributes_t attributes, spci_memory_region_flags_t flags,
+ spci_memory_handle_t handle, uint32_t tag, spci_vm_id_t receiver,
+ spci_memory_access_permissions_t permissions,
+ const struct spci_memory_region_constituent constituents[],
+ uint32_t constituent_count)
+{
+ struct spci_composite_memory_region *composite_memory_region;
+ uint32_t index;
+ uint32_t constituents_length =
+ constituent_count *
+ sizeof(struct spci_memory_region_constituent);
+
+ memory_region->sender = sender;
+ memory_region->attributes = attributes;
+ memory_region->reserved_0 = 0;
+ memory_region->flags = flags;
+ memory_region->handle = handle;
+ memory_region->tag = tag;
+ memory_region->reserved_1 = 0;
+ memory_region->receiver_count = 1;
+ memory_region->receivers[0].receiver_permissions.receiver = receiver;
+ memory_region->receivers[0].receiver_permissions.permissions =
+ permissions;
+ memory_region->receivers[0].receiver_permissions.flags = 0;
+ /*
+ * Note that `sizeof(struct_spci_memory_region)` and `sizeof(struct
+ * spci_memory_access)` must both be multiples of 16 (as verified by the
+ * asserts in `spci_memory.c`, so it is guaranteed that the offset we
+ * calculate here is aligned to a 64-bit boundary and so 64-bit values
+ * can be copied without alignment faults.
+ */
+ memory_region->receivers[0].composite_memory_region_offset =
+ sizeof(struct spci_memory_region) +
+ memory_region->receiver_count *
+ sizeof(struct spci_memory_access);
+ memory_region->receivers[0].reserved_0 = 0;
+
+ composite_memory_region =
+ spci_memory_region_get_composite(memory_region, 0);
+
+ composite_memory_region->page_count = 0;
+ composite_memory_region->constituent_count = constituent_count;
+ composite_memory_region->reserved_0 = 0;
+
+ for (index = 0; index < constituent_count; index++) {
+ composite_memory_region->constituents[index] =
+ constituents[index];
+ composite_memory_region->page_count +=
+ constituents[index].page_count;
+ }
+
+ /*
+ * TODO: Add assert ensuring that the specified message
+ * length is not greater than SPCI_MSG_PAYLOAD_MAX.
+ */
+
+ return memory_region->receivers[0].composite_memory_region_offset +
+ sizeof(struct spci_composite_memory_region) +
+ constituents_length;
+}
+
+/**
+ * Initialises the given `spci_memory_region` and copies the constituent
+ * information to it. Returns the length in bytes occupied by the data copied to
+ * `memory_region` (attributes, constituents and memory region header size).
+ */
uint32_t spci_memory_region_init(
struct spci_memory_region *memory_region, spci_vm_id_t sender,
spci_vm_id_t receiver,
const struct spci_memory_region_constituent constituents[],
uint32_t constituent_count, uint32_t tag,
- spci_memory_region_flags_t flags, enum spci_memory_access access,
+ spci_memory_region_flags_t flags, enum spci_data_access data_access,
+ enum spci_instruction_access instruction_access,
enum spci_memory_type type, enum spci_memory_cacheability cacheability,
enum spci_memory_shareability shareability)
{
- uint32_t constituents_length =
- constituent_count *
- sizeof(struct spci_memory_region_constituent);
- uint32_t index;
- struct spci_memory_region_constituent *region_constituents;
- uint16_t attributes = 0;
+ spci_memory_access_permissions_t permissions = 0;
+ spci_memory_attributes_t attributes = 0;
+
+ /* Set memory region's permissions. */
+ spci_set_data_access_attr(&permissions, data_access);
+ spci_set_instruction_access_attr(&permissions, instruction_access);
/* Set memory region's page attributes. */
- spci_set_memory_access_attr(&attributes, access);
spci_set_memory_type_attr(&attributes, type);
spci_set_memory_cacheability_attr(&attributes, cacheability);
spci_set_memory_shareability_attr(&attributes, shareability);
- memory_region->tag = tag;
- memory_region->flags = flags;
- memory_region->sender = sender;
- memory_region->reserved_0 = 0;
- memory_region->reserved_1 = 0;
- memory_region->page_count = 0;
- memory_region->constituent_count = constituent_count;
- memory_region->attribute_count = 1;
- memory_region->attributes[0].receiver = receiver;
- memory_region->attributes[0].memory_attributes = attributes;
- memory_region->attributes[0].reserved_0 = 0;
- memory_region->attributes[0].reserved_1 = 0;
-
- /*
- * Constituent offset must be aligned to a 32-bit boundary so that
- * 32-bit values can be copied without alignment faults.
- */
- memory_region->constituent_offset = align_up(
- sizeof(struct spci_memory_region) +
- memory_region->attribute_count *
- sizeof(struct spci_memory_region_attributes),
- 4);
- region_constituents =
- spci_memory_region_get_constituents(memory_region);
-
- for (index = 0; index < constituent_count; index++) {
- region_constituents[index] = constituents[index];
- memory_region->page_count += constituents[index].page_count;
- }
-
- /*
- * TODO: Add assert ensuring that the specified message
- * length is not greater than SPCI_MSG_PAYLOAD_MAX.
- */
-
- return memory_region->constituent_offset + constituents_length;
+ return spci_memory_region_init_internal(
+ memory_region, sender, attributes, flags, 0, tag, receiver,
+ permissions, constituents, constituent_count);
}
uint32_t spci_memory_retrieve_request_init(
- struct spci_memory_retrieve_request *request,
- spci_memory_handle_t handle, spci_vm_id_t sender, spci_vm_id_t receiver,
- uint32_t share_func, uint32_t tag, uint32_t page_count,
- enum spci_memory_access access, enum spci_memory_type type,
- enum spci_memory_cacheability cacheability,
+ struct spci_memory_region *memory_region, spci_memory_handle_t handle,
+ spci_vm_id_t sender, spci_vm_id_t receiver, uint32_t tag,
+ spci_memory_region_flags_t flags, enum spci_data_access data_access,
+ enum spci_instruction_access instruction_access,
+ enum spci_memory_type type, enum spci_memory_cacheability cacheability,
enum spci_memory_shareability shareability)
{
- struct spci_memory_retrieve_properties *retrieve_properties =
- spci_memory_retrieve_request_first_retrieve_properties(request);
- uint16_t attributes = 0;
+ spci_memory_access_permissions_t permissions = 0;
+ spci_memory_attributes_t attributes = 0;
+
+ /* Set memory region's permissions. */
+ spci_set_data_access_attr(&permissions, data_access);
+ spci_set_instruction_access_attr(&permissions, instruction_access);
/* Set memory region's page attributes. */
- spci_set_memory_access_attr(&attributes, access);
spci_set_memory_type_attr(&attributes, type);
spci_set_memory_cacheability_attr(&attributes, cacheability);
spci_set_memory_shareability_attr(&attributes, shareability);
- request->reserved_0 = 0;
- request->reserved_1 = 0;
- request->handle = handle;
- request->sender = sender;
- request->share_func = share_func;
- request->tag = tag;
- request->attribute_count = 0;
- request->attribute_offset = 0;
- request->retrieve_properties_count = 1;
+ memory_region->sender = sender;
+ memory_region->attributes = attributes;
+ memory_region->reserved_0 = 0;
+ memory_region->flags = flags;
+ memory_region->reserved_1 = 0;
+ memory_region->handle = handle;
+ memory_region->tag = tag;
+ memory_region->receiver_count = 1;
+ memory_region->receivers[0].receiver_permissions.receiver = receiver;
+ memory_region->receivers[0].receiver_permissions.permissions =
+ permissions;
+ memory_region->receivers[0].receiver_permissions.flags = 0;
+ /*
+ * Offset 0 in this case means that the hypervisor should allocate the
+ * address ranges. This is the only configuration supported by Hafnium,
+ * as it enforces 1:1 mappings in the stage 2 page tables.
+ */
+ memory_region->receivers[0].composite_memory_region_offset = 0;
+ memory_region->receivers[0].reserved_0 = 0;
- retrieve_properties->attributes.receiver = receiver;
- retrieve_properties->attributes.memory_attributes = attributes;
- retrieve_properties->page_count = page_count;
- retrieve_properties->constituent_count = 0;
- retrieve_properties->reserved = 0;
-
- return sizeof(struct spci_memory_retrieve_request) +
- sizeof(struct spci_memory_retrieve_properties);
+ return sizeof(struct spci_memory_region) +
+ memory_region->receiver_count *
+ sizeof(struct spci_memory_access);
}
uint32_t spci_retrieved_memory_region_init(
- struct spci_retrieved_memory_region *response, size_t response_max_size,
- spci_vm_id_t receiver,
+ struct spci_memory_region *response, size_t response_max_size,
+ spci_vm_id_t sender, spci_memory_attributes_t attributes,
+ spci_memory_region_flags_t flags, spci_memory_handle_t handle,
+ spci_vm_id_t receiver, spci_memory_access_permissions_t permissions,
const struct spci_memory_region_constituent constituents[],
- uint32_t constituent_count, uint32_t page_count)
+ uint32_t constituent_count)
{
- struct spci_receiver_address_range *response_range =
- spci_retrieved_memory_region_first_receiver_range(response);
-
- response->receiver_count = 1;
- response_range->receiver = receiver;
- response_range->page_count = page_count;
- response_range->constituent_count = constituent_count;
- memcpy_s(response_range->constituents,
- response_max_size -
- sizeof(struct spci_retrieved_memory_region) -
- sizeof(struct spci_receiver_address_range),
- constituents,
- constituent_count *
- sizeof(struct spci_memory_region_constituent));
-
- return sizeof(struct spci_retrieved_memory_region) +
- sizeof(struct spci_receiver_address_range) +
- constituent_count *
- sizeof(struct spci_memory_region_constituent);
+ /* TODO: Check against response_max_size first. */
+ return spci_memory_region_init_internal(
+ response, sender, attributes, flags, handle, 0, receiver,
+ permissions, constituents, constituent_count);
}