Support exclusive lending as well as sharing memory.
Bug: 132420445
Change-Id: I3a79633427f5043d767aa8466b5c9325ba397ebe
diff --git a/inc/vmapi/hf/spci.h b/inc/vmapi/hf/spci.h
index 6977de8..8a58227 100644
--- a/inc/vmapi/hf/spci.h
+++ b/inc/vmapi/hf/spci.h
@@ -55,9 +55,10 @@
/* Architected memory sharing message IDs. */
enum spci_memory_share {
- SPCI_MEMORY_LEND = 0x0,
- SPCI_MEMORY_RELINQUISH = 0x1,
- SPCI_MEMORY_DONATE = 0x2,
+ SPCI_MEMORY_DONATE = 0x0,
+ SPCI_MEMORY_LEND = 0x1,
+ SPCI_MEMORY_SHARE = 0x2,
+ SPCI_MEMORY_RELINQUISH = 0x3,
};
/* SPCI function specific constants. */
@@ -366,9 +367,12 @@
return memory_region->constituent_offset + constituents_length;
}
-/** Constructs an SPCI donate memory region message. */
-static inline uint32_t spci_memory_donate_init(
- void *message, spci_vm_id_t receiver,
+/**
+ * Constructs an 'architected message' for SPCI memory sharing of the given
+ * type.
+ */
+static inline uint32_t spci_memory_init(
+ void *message, enum spci_memory_share share_type, spci_vm_id_t receiver,
struct spci_memory_region_constituent *region_constituents,
uint32_t constituent_count, uint32_t tag,
enum spci_memory_access access, enum spci_memory_type type,
@@ -381,7 +385,7 @@
spci_get_memory_region(message);
/* Fill in the details on the common message header. */
- spci_architected_message_init(message, SPCI_MEMORY_DONATE);
+ spci_architected_message_init(message, share_type);
/* Fill in memory region. */
message_length += spci_memory_region_init(
@@ -390,29 +394,18 @@
return message_length;
}
-/**
- * Constructs an SPCI memory region relinquish message.
- * A set of memory regions can be given back to the owner.
- */
-static inline uint32_t spci_memory_relinquish_init(
+/** Constructs an SPCI donate memory region message. */
+static inline uint32_t spci_memory_donate_init(
void *message, spci_vm_id_t receiver,
struct spci_memory_region_constituent *region_constituents,
- uint32_t constituent_count, uint32_t tag)
+ uint32_t constituent_count, uint32_t tag,
+ enum spci_memory_access access, enum spci_memory_type type,
+ enum spci_memory_cacheability cacheability,
+ enum spci_memory_shareability shareability)
{
- uint32_t message_length =
- sizeof(struct spci_architected_message_header);
- struct spci_memory_region *memory_region =
- spci_get_memory_region(message);
-
- /* Fill in the details on the common message header. */
- spci_architected_message_init(message, SPCI_MEMORY_RELINQUISH);
-
- /* Fill in memory region. */
- message_length += spci_memory_region_init(
- memory_region, receiver, region_constituents, constituent_count,
- tag, SPCI_MEMORY_RW_X, SPCI_MEMORY_DEVICE_MEM,
- SPCI_MEMORY_DEV_NGNRNE, SPCI_MEMORY_SHARE_NON_SHAREABLE);
- return message_length;
+ return spci_memory_init(message, SPCI_MEMORY_DONATE, receiver,
+ region_constituents, constituent_count, tag,
+ access, type, cacheability, shareability);
}
/**
@@ -426,17 +419,39 @@
enum spci_memory_cacheability cacheability,
enum spci_memory_shareability shareability)
{
- uint32_t message_length =
- sizeof(struct spci_architected_message_header);
- struct spci_memory_region *memory_region =
- spci_get_memory_region(message);
+ return spci_memory_init(message, SPCI_MEMORY_LEND, receiver,
+ region_constituents, constituent_count, tag,
+ access, type, cacheability, shareability);
+}
- /* Fill in the details on the common message header. */
- spci_architected_message_init(message, SPCI_MEMORY_LEND);
+/**
+ * Constructs an SPCI memory region share message.
+ */
+static inline uint32_t spci_memory_share_init(
+ void *message, spci_vm_id_t receiver,
+ struct spci_memory_region_constituent *region_constituents,
+ uint32_t constituent_count, uint32_t tag,
+ enum spci_memory_access access, enum spci_memory_type type,
+ enum spci_memory_cacheability cacheability,
+ enum spci_memory_shareability shareability)
+{
+ return spci_memory_init(message, SPCI_MEMORY_SHARE, receiver,
+ region_constituents, constituent_count, tag,
+ access, type, cacheability, shareability);
+}
- /* Fill in memory region. */
- message_length += spci_memory_region_init(
- memory_region, receiver, region_constituents, constituent_count,
- tag, access, type, cacheability, shareability);
- return message_length;
+/**
+ * Constructs an SPCI memory region relinquish message.
+ * A set of memory regions can be given back to the owner.
+ */
+static inline uint32_t spci_memory_relinquish_init(
+ void *message, spci_vm_id_t receiver,
+ struct spci_memory_region_constituent *region_constituents,
+ uint32_t constituent_count, uint32_t tag)
+{
+ return spci_memory_init(message, SPCI_MEMORY_RELINQUISH, receiver,
+ region_constituents, constituent_count, tag,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_DEVICE_MEM,
+ SPCI_MEMORY_DEV_NGNRNE,
+ SPCI_MEMORY_SHARE_NON_SHAREABLE);
}
diff --git a/src/spci_architected_message.c b/src/spci_architected_message.c
index 7d05ef5..f359b08 100644
--- a/src/spci_architected_message.c
+++ b/src/spci_architected_message.c
@@ -55,6 +55,7 @@
switch (share) {
case SPCI_MEMORY_DONATE:
case SPCI_MEMORY_LEND:
+ case SPCI_MEMORY_SHARE:
memory_to_attributes = spci_memory_attrs_to_mode(
memory_region->attributes[0].memory_attributes);
break;
@@ -221,35 +222,44 @@
static const uint32_t size_donate_transitions =
ARRAY_SIZE(donate_transitions);
- static const struct spci_mem_transitions relinquish_transitions[] = {
+ /*
+ * This data structure holds the allowed state transitions for the
+ * "lend" state machine. In this state machine the owner keeps ownership
+ * but loses access to the lent pages.
+ */
+ static const struct spci_mem_transitions lend_transitions[] = {
{
- /* 1) {!O-EA, O-NA} -> {!O-NA, O-EA} */
- .orig_from_mode = MM_MODE_UNOWNED,
- .orig_to_mode = MM_MODE_INVALID,
- .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
- MM_MODE_SHARED,
- .to_mode = 0,
+ /* 1) {O-EA, !O-NA} -> {O-NA, !O-EA} */
+ .orig_from_mode = 0,
+ .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED,
+ .from_mode = MM_MODE_INVALID,
+ .to_mode = MM_MODE_UNOWNED,
},
{
- /* 2) {!O-SA, O-SA} -> {!O-NA, O-EA} */
- .orig_from_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
- .orig_to_mode = MM_MODE_SHARED,
- .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
- MM_MODE_SHARED,
- .to_mode = 0,
+ /*
+ * Duplicate of 1) in order to cater for an alternative
+ * representation of !O-NA:
+ * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
+ * are both alternate representations of !O-NA.
+ */
+ /* 2) {O-EA, !O-NA} -> {O-NA, !O-EA} */
+ .orig_from_mode = 0,
+ .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+ .from_mode = MM_MODE_INVALID,
+ .to_mode = MM_MODE_UNOWNED,
},
};
- static const uint32_t size_relinquish_transitions =
- ARRAY_SIZE(relinquish_transitions);
+ static const uint32_t size_lend_transitions =
+ ARRAY_SIZE(lend_transitions);
/*
- * This data structure holds the allowed state transitions for the "lend
- * with shared access" state machine. In this state machine the owner
- * keeps the lent pages mapped on its stage2 table and keeps access as
- * well.
+ * This data structure holds the allowed state transitions for the
+ * "share" state machine. In this state machine the owner keeps the
+ * shared pages mapped on its stage2 table and keeps access as well.
*/
- static const struct spci_mem_transitions shared_lend_transitions[] = {
+ static const struct spci_mem_transitions share_transitions[] = {
{
/* 1) {O-EA, !O-NA} -> {O-SA, !O-SA} */
.orig_from_mode = 0,
@@ -273,8 +283,30 @@
},
};
- static const uint32_t size_shared_lend_transitions =
- ARRAY_SIZE(shared_lend_transitions);
+ static const uint32_t size_share_transitions =
+ ARRAY_SIZE(share_transitions);
+
+ static const struct spci_mem_transitions relinquish_transitions[] = {
+ {
+ /* 1) {!O-EA, O-NA} -> {!O-NA, O-EA} */
+ .orig_from_mode = MM_MODE_UNOWNED,
+ .orig_to_mode = MM_MODE_INVALID,
+ .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED,
+ .to_mode = 0,
+ },
+ {
+ /* 2) {!O-SA, O-SA} -> {!O-NA, O-EA} */
+ .orig_from_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
+ .orig_to_mode = MM_MODE_SHARED,
+ .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED,
+ .to_mode = 0,
+ },
+ };
+
+ static const uint32_t size_relinquish_transitions =
+ ARRAY_SIZE(relinquish_transitions);
/* Fail if addresses are not page-aligned. */
if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
@@ -299,16 +331,21 @@
transition_table_size = size_donate_transitions;
break;
+ case SPCI_MEMORY_LEND:
+ mem_transition_table = lend_transitions;
+ transition_table_size = size_lend_transitions;
+ break;
+
+ case SPCI_MEMORY_SHARE:
+ mem_transition_table = share_transitions;
+ transition_table_size = size_share_transitions;
+ break;
+
case SPCI_MEMORY_RELINQUISH:
mem_transition_table = relinquish_transitions;
transition_table_size = size_relinquish_transitions;
break;
- case SPCI_MEMORY_LEND:
- mem_transition_table = shared_lend_transitions;
- transition_table_size = size_shared_lend_transitions;
- break;
-
default:
return false;
}
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index 42f79d6..61505d0 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -49,9 +49,8 @@
}
/**
- * Helper function to test lend memory in the different configurations.
+ * Helper function to test lending memory in the different configurations.
*/
-
static void spci_check_cannot_lend_memory(
struct mailbox_buffers mb,
struct spci_memory_region_constituent constituents[])
@@ -118,6 +117,74 @@
}
/**
+ * Helper function to test sharing memory in the different configurations.
+ */
+static void spci_check_cannot_share_memory(
+ struct mailbox_buffers mb,
+ struct spci_memory_region_constituent constituents[])
+
+{
+ enum spci_memory_access lend_access[] = {
+ SPCI_MEMORY_RO_NX, SPCI_MEMORY_RO_X, SPCI_MEMORY_RW_NX,
+ SPCI_MEMORY_RW_X};
+ enum spci_memory_cacheability lend_cacheability[] = {
+ SPCI_MEMORY_CACHE_NON_CACHEABLE,
+ SPCI_MEMORY_CACHE_WRITE_THROUGH, SPCI_MEMORY_CACHE_WRITE_BACK};
+ enum spci_memory_cacheability lend_device[] = {
+ SPCI_MEMORY_DEV_NGNRNE, SPCI_MEMORY_DEV_NGNRE,
+ SPCI_MEMORY_DEV_NGRE, SPCI_MEMORY_DEV_GRE};
+ enum spci_memory_shareability lend_shareability[] = {
+ SPCI_MEMORY_SHARE_NON_SHAREABLE, SPCI_MEMORY_RESERVED,
+ SPCI_MEMORY_OUTER_SHAREABLE, SPCI_MEMORY_INNER_SHAREABLE};
+ uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM0, SERVICE_VM1};
+
+ int i = 0;
+ int j = 0;
+ int k = 0;
+ int l = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+ for (j = 0; j < ARRAY_SIZE(lend_access); ++j) {
+ for (k = 0; k < ARRAY_SIZE(lend_shareability); ++k) {
+ for (l = 0; l < ARRAY_SIZE(lend_cacheability);
+ ++l) {
+ uint32_t msg_size =
+ spci_memory_share_init(
+ mb.send, vms[i],
+ constituents, 1, 0,
+ lend_access[j],
+ SPCI_MEMORY_NORMAL_MEM,
+ lend_cacheability[l],
+ lend_shareability[k]);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(
+ HF_PRIMARY_VM_ID,
+ vms[i], msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
+ }
+ for (l = 0; l < ARRAY_SIZE(lend_device); ++l) {
+ uint32_t msg_size =
+ spci_memory_share_init(
+ mb.send, vms[i],
+ constituents, 1, 0,
+ lend_access[j],
+ SPCI_MEMORY_DEVICE_MEM,
+ lend_device[l],
+ lend_shareability[k]);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(
+ HF_PRIMARY_VM_ID,
+ vms[i], msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
+ }
+ }
+ }
+ }
+}
+
+/**
* Tries donating memory in available modes with different VMs and asserts that
* it will fail to all except the supplied VM ID as this would succeed if it
* is the only borrower.
@@ -310,6 +377,7 @@
SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
spci_check_cannot_lend_memory(mb, constituents);
+ spci_check_cannot_share_memory(mb, constituents);
spci_check_cannot_donate_memory(mb, constituents, 1, -1);
}
@@ -881,6 +949,30 @@
}
/**
+ * SPCI: Check cannot share to self.
+ */
+TEST(memory_sharing, spci_share_to_self)
+{
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = page;
+ uint32_t msg_size;
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)page, .page_count = 1},
+ };
+
+ msg_size = spci_memory_share_init(
+ mb.send, HF_PRIMARY_VM_ID, constituents, 1, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
+}
+
+/**
* SPCI: Check cannot donate from alternative VM.
*/
TEST(memory_sharing, spci_donate_invalid_source)
@@ -1057,6 +1149,66 @@
run_res = hf_vcpu_run(SERVICE_VM0, 0);
EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+ /* Let service write to and return memory. */
+ run_res = hf_vcpu_run(SERVICE_VM0, 0);
+ EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+ /* Re-initialise the memory before giving it. */
+ memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+
+ msg_size = spci_memory_lend_init(
+ mb.send, SERVICE_VM0, constituents, 1, 0, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = hf_vcpu_run(SERVICE_VM0, 0);
+ EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+ /* Observe the service faulting when writing to the memory. */
+ run_res = hf_vcpu_run(SERVICE_VM0, 0);
+ EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+}
+
+/**
+ * SPCI: Memory can be shared with executable permissions.
+ * Check RO and RW permissions.
+ */
+TEST(memory_sharing, spci_share_relinquish_X_RW)
+{
+ struct hf_vcpu_run_return run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = page;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)page, .page_count = 1},
+ };
+
+ msg_size = spci_memory_share_init(
+ mb.send, SERVICE_VM0, constituents, 1, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = hf_vcpu_run(SERVICE_VM0, 0);
+ EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
/* Ensure we still have access. */
for (int i = 0; i < PAGE_SIZE; ++i) {
ASSERT_EQ(ptr[i], 'b');
@@ -1070,7 +1222,7 @@
/* Re-initialise the memory before giving it. */
memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
- msg_size = spci_memory_lend_init(
+ msg_size = spci_memory_share_init(
mb.send, SERVICE_VM0, constituents, 1, 0, SPCI_MEMORY_RO_X,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
@@ -1096,10 +1248,10 @@
}
/**
- * SPCI: Memory can be lent without executable permissions.
+ * SPCI: Memory can be shared without executable permissions.
* Check RO and RW permissions.
*/
-TEST(memory_sharing, spci_lend_relinquish_NX_RW)
+TEST(memory_sharing, spci_share_relinquish_NX_RW)
{
struct hf_vcpu_run_return run_res;
struct mailbox_buffers mb = set_up_mailbox();
@@ -1115,7 +1267,7 @@
{.address = (uint64_t)page, .page_count = 1},
};
- msg_size = spci_memory_lend_init(
+ msg_size = spci_memory_share_init(
mb.send, SERVICE_VM0, constituents, 1, 0, SPCI_MEMORY_RW_NX,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
@@ -1141,7 +1293,7 @@
/* Re-initialise the memory before giving it. */
memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
- msg_size = spci_memory_lend_init(
+ msg_size = spci_memory_share_init(
mb.send, SERVICE_VM0, constituents, 1, 0, SPCI_MEMORY_RO_NX,
SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
SPCI_MEMORY_OUTER_SHAREABLE);
@@ -1327,6 +1479,66 @@
}
/**
+ * SPCI: Memory can be shared, but then no part can be donated.
+ */
+TEST(memory_sharing, spci_share_donate)
+{
+ struct hf_vcpu_run_return run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = page;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(page) * 2, 'b', PAGE_SIZE * 2);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)page, .page_count = 2},
+ };
+
+ msg_size = spci_memory_share_init(
+ mb.send, SERVICE_VM0, constituents, 1, 0, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = hf_vcpu_run(SERVICE_VM0, 0);
+ EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+ /* Ensure we can't donate any sub section of memory to another VM. */
+ constituents[0].page_count = 1;
+ for (int i = 1; i < PAGE_SIZE * 2; i++) {
+ constituents[0].address = (uint64_t)page + PAGE_SIZE;
+ msg_size = spci_memory_donate_init(
+ mb.send, SERVICE_VM1, constituents, 1, 0,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Ensure we can donate to the only borrower. */
+ msg_size = spci_memory_donate_init(
+ mb.send, SERVICE_VM0, constituents, 1, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
+}
+
+/**
* SPCI: Memory can be lent, but then no part can be lent again.
*/
TEST(memory_sharing, spci_lend_twice)
@@ -1362,6 +1574,8 @@
/* Attempt to lend the same area of memory. */
spci_check_cannot_lend_memory(mb, constituents);
+ /* Attempt to share the same area of memory. */
+ spci_check_cannot_share_memory(mb, constituents);
/* Fail to donate to VM apart from VM0. */
spci_check_cannot_donate_memory(mb, constituents, 1, SERVICE_VM0);
/* Fail to relinquish from any VM. */
@@ -1382,3 +1596,62 @@
SPCI_INVALID_PARAMETERS);
}
}
+
+/**
+ * SPCI: Memory can be shared, but then no part can be shared again.
+ */
+TEST(memory_sharing, spci_share_twice)
+{
+ struct hf_vcpu_run_return run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = page;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_twice", mb.send);
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_twice", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(page) * 2, 'b', PAGE_SIZE * 2);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)page, .page_count = 2},
+ };
+
+ msg_size = spci_memory_share_init(
+ mb.send, SERVICE_VM0, constituents, 1, 0, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM0, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = hf_vcpu_run(SERVICE_VM0, 0);
+ EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+ /* Attempt to share the same area of memory. */
+ spci_check_cannot_share_memory(mb, constituents);
+ /* Attempt to lend the same area of memory. */
+ spci_check_cannot_lend_memory(mb, constituents);
+ /* Fail to donate to VM apart from VM0. */
+ spci_check_cannot_donate_memory(mb, constituents, 1, SERVICE_VM0);
+ /* Fail to relinquish from any VM. */
+ spci_check_cannot_relinquish_memory(mb, constituents, 1);
+
+ /* Attempt to share again with different permissions. */
+ constituents[0].page_count = 1;
+ for (int i = 1; i < PAGE_SIZE * 2; i++) {
+ constituents[0].address = (uint64_t)page + PAGE_SIZE;
+ msg_size = spci_memory_share_init(
+ mb.send, SERVICE_VM0, constituents, 1, 0,
+ SPCI_MEMORY_RO_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
+ }
+}
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index ca72bf2..67c275d 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -557,6 +557,17 @@
EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
SPCI_MSG_SEND_LEGACY_MEMORY),
SPCI_INVALID_PARAMETERS);
+
+ /* Ensure we cannot share from the primary to another secondary. */
+ msg_size = spci_memory_share_init(
+ send_buf, SERVICE_VM1, constituents,
+ memory_region->constituent_count, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
+
spci_yield();
}
@@ -734,7 +745,7 @@
for (int i = 1; i < PAGE_SIZE * 2; i++) {
constituents[0].address = (uint64_t)ptr + i;
- /* Fail to lend the memory back to the primary. */
+ /* Fail to lend or share the memory back to the primary. */
msg_size = spci_memory_lend_init(
send_buf, SERVICE_VM1, constituents,
memory_region->constituent_count, 0, SPCI_MEMORY_RW_X,
@@ -744,6 +755,15 @@
spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
SPCI_MSG_SEND_LEGACY_MEMORY),
SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_share_init(
+ send_buf, SERVICE_VM1, constituents,
+ memory_region->constituent_count, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY),
+ SPCI_INVALID_PARAMETERS);
}
msg_size = spci_memory_relinquish_init(