SPCI Lend and Relinquish: Add initial tests to exercise functionality

Add tests to verify core functionality of `spci_memory_lend`,
`spci_memory_relinquish` and their interactions with
`spci_memory_donate`.

Change-Id: I440d0a385d27f0aad370eb555e2d20e3be726328
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index 30f4fc3..b094a7a 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -49,24 +49,99 @@
 }
 
 /**
- * Tries sharing memory in available modes with different VMs and asserts that
+ * Helper function to test lend memory in the different configurations.
+ */
+
+static void spci_check_cannot_lend_memory(
+	struct mailbox_buffers mb,
+	struct spci_memory_region_constituent constituents[])
+
+{
+	enum spci_lend_access lend_access[] = {SPCI_LEND_RO_NX, SPCI_LEND_RO_X,
+					       SPCI_LEND_RW_NX, SPCI_LEND_RW_X};
+	enum spci_lend_type lend_type[] = {
+		SPCI_LEND_NORMAL_MEM, SPCI_LEND_DEV_NGNRNE, SPCI_LEND_DEV_NGNRE,
+		SPCI_LEND_DEV_NGRE, SPCI_LEND_DEV_GRE};
+	enum spci_lend_cacheability lend_cacheability[] = {
+		SPCI_LEND_CACHE_NON_CACHEABLE, SPCI_LEND_CACHE_WRITE_THROUGH,
+		SPCI_LEND_CACHE_WRITE_BACK};
+	enum spci_lend_shareability lend_shareability[] = {
+		SPCI_LEND_SHARE_NON_SHAREABLE, SPCI_LEND_RESERVED,
+		SPCI_LEND_OUTER_SHAREABLE, SPCI_LEND_INNER_SHAREABLE};
+	uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM0, SERVICE_VM1};
+
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	int l = 0;
+	int m = 0;
+
+	for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+		for (j = 0; j < ARRAY_SIZE(lend_access); ++j) {
+			for (k = 0; k < ARRAY_SIZE(lend_type); ++k) {
+				for (l = 0; l < ARRAY_SIZE(lend_cacheability);
+				     ++l) {
+					for (m = 0;
+					     m < ARRAY_SIZE(lend_shareability);
+					     ++m) {
+						spci_memory_lend(
+							mb.send, vms[i],
+							HF_PRIMARY_VM_ID,
+							constituents, 1, 0,
+							lend_access[j],
+							lend_type[k],
+							lend_cacheability[l],
+							lend_shareability[m]);
+						EXPECT_EQ(
+							spci_msg_send(0),
+							SPCI_INVALID_PARAMETERS);
+					}
+				}
+			}
+		}
+	}
+}
+
+/**
+ * Tries donating memory in available modes with different VMs and asserts that
+ * it will fail to all except the supplied VM ID as this would succeed if it
+ * is the only borrower.
+ */
+static void spci_check_cannot_donate_memory(
+	struct mailbox_buffers mb,
+	struct spci_memory_region_constituent constituents[], int num_elements,
+	int32_t avoid_vm)
+{
+	uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM0, SERVICE_VM1};
+
+	int i;
+	for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+		/* Optionally skip one VM as the donate would succeed. */
+		if (vms[i] == avoid_vm) {
+			continue;
+		}
+		spci_memory_donate(mb.send, vms[i], HF_PRIMARY_VM_ID,
+				   constituents, num_elements, 0);
+		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	}
+}
+
+/**
+ * Tries relinquishing memory with different VMs and asserts that
  * it will fail.
  */
-static void spci_check_cannot_share_memory(
+static void spci_check_cannot_relinquish_memory(
 	struct mailbox_buffers mb,
 	struct spci_memory_region_constituent constituents[], int num_elements)
 {
 	uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM0, SERVICE_VM1};
-	void (*modes[])(struct spci_message *, spci_vm_id_t, spci_vm_id_t,
-			struct spci_memory_region_constituent *, uint32_t,
-			uint32_t) = {spci_memory_donate};
+
 	int i;
 	int j;
-
-	for (j = 0; j < ARRAY_SIZE(modes); ++j) {
-		for (i = 0; i < ARRAY_SIZE(vms); ++i) {
-			modes[j](mb.send, vms[i], HF_PRIMARY_VM_ID,
-				 constituents, num_elements, 0);
+	for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+		for (j = 0; j < ARRAY_SIZE(vms); ++j) {
+			spci_memory_relinquish(mb.send, vms[i], vms[j],
+					       constituents, num_elements, 0);
 			EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
 		}
 	}
@@ -226,7 +301,7 @@
 }
 
 /**
- * SPCI Memory given away can be given back.
+ * SPCI: Check that memory can be lent and is accessible by both parties.
  */
 TEST(memory_sharing, spci_lend_relinquish)
 {
@@ -234,7 +309,7 @@
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
 
-	SERVICE_SELECT(SERVICE_VM0, "memory_lend_relinquish_spci", mb.send);
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish", mb.send);
 
 	/* Initialise the memory before giving it. */
 	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
@@ -637,8 +712,10 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
 
-	/* Fail to share memory again with either VM0 or VM1. */
-	spci_check_cannot_share_memory(mb, constituents, 1);
+	/* Fail to share memory again with any VM. */
+	spci_check_cannot_donate_memory(mb, constituents, 1, -1);
+	/* Fail to relinquish memory from any VM. */
+	spci_check_cannot_relinquish_memory(mb, constituents, 1);
 
 	/* Let the memory be sent from VM0 to PRIMARY (returned). */
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
@@ -673,6 +750,27 @@
 }
 
 /**
+ * SPCI: Check cannot lend to self.
+ */
+TEST(memory_sharing, spci_lend_to_self)
+{
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_lend(mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
+			 constituents, 1, 0, SPCI_LEND_RW_X,
+			 SPCI_LEND_NORMAL_MEM, SPCI_LEND_CACHE_WRITE_BACK,
+			 SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+}
+
+/**
  * SPCI: Check cannot donate from alternative VM.
  */
 TEST(memory_sharing, spci_donate_invalid_source)
@@ -718,7 +816,7 @@
 }
 
 /**
- * SPCI: Check that unaligned addresses can not be donated.
+ * SPCI: Check that unaligned addresses can not be shared.
  */
 TEST(memory_sharing, spci_give_and_get_back_unaligned)
 {
@@ -733,5 +831,347 @@
 		spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID,
 				   constituents, 1, 0);
 		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+		spci_memory_lend(
+			mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, 1,
+			0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+			SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	}
+}
+
+/**
+ * SPCI: Check cannot lend from alternative VM.
+ */
+TEST(memory_sharing, spci_lend_invalid_source)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_lend_invalid_source", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	/* Check cannot swap VM IDs. */
+	spci_memory_lend(mb.send, HF_PRIMARY_VM_ID, SERVICE_VM0, constituents,
+			 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+
+	/* Lend memory to VM0. */
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Receive and return memory from VM0. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	/* Try to lend memory from primary in VM0. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+}
+
+/**
+ * SPCI: Memory can be lent with executable permissions.
+ * Check RO and RW permissions.
+ */
+TEST(memory_sharing, spci_lend_relinquish_X_RW)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Let the memory be accessed. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+	/* Ensure we still have access. */
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ASSERT_EQ(ptr[i], 'b');
+		ptr[i]++;
+	}
+
+	/* Let service write to and return memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	/* Re-initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Let the memory be accessed. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+	/* Ensure we still have access. */
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ASSERT_EQ(ptr[i], 'b');
+		ptr[i]++;
+	}
+
+	/* Observe the service faulting when writing to the memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+}
+
+/**
+ * SPCI: Memory can be lent without executable permissions.
+ * Check RO and RW permissions.
+ */
+TEST(memory_sharing, spci_lend_relinquish_NX_RW)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Let the memory be accessed. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+	/* Ensure we still have access. */
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ASSERT_EQ(ptr[i], 'b');
+	}
+
+	/* Let service write to and return memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	/* Re-initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Let the memory be accessed. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+	/* Ensure we still have access. */
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ASSERT_EQ(ptr[i], 'b');
+		ptr[i]++;
+	}
+
+	/* Observe the service faulting when writing to the memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+}
+
+/**
+ * SPCI: Exercise execution permissions for lending memory.
+ */
+TEST(memory_sharing, spci_lend_relinquish_RW_X)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_X", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 0, PAGE_SIZE);
+
+	uint64_t *ptr2 = (uint64_t *)page;
+	/* Set memory to contain the RET instruction to attempt to execute. */
+	*ptr2 = 0xD65F03C0;
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Attempt to execute from memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Try and fail to execute from the memory region. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+}
+
+/**
+ * SPCI: Exercise execution permissions for lending memory without write access.
+ */
+TEST(memory_sharing, spci_lend_relinquish_RO_X)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_X", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 0, PAGE_SIZE);
+
+	uint64_t *ptr2 = (uint64_t *)page;
+	/* Set memory to contain the RET instruction to attempt to execute. */
+	*ptr2 = 0xD65F03C0;
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Attempt to execute from memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Try and fail to execute from the memory region. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+}
+
+/**
+ * SPCI: Memory can be lent, but then no part can be donated.
+ */
+TEST(memory_sharing, spci_lend_donate)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page) * 2, 'b', PAGE_SIZE * 2);
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 2},
+	};
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Let the memory be accessed. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+	/* Ensure we can't donate any sub section of memory to another VM. */
+	constituents[0].page_count = 1;
+	for (int i = 1; i < PAGE_SIZE * 2; i++) {
+		constituents[0].address = (uint64_t)page + PAGE_SIZE;
+		spci_memory_donate(mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID,
+				   constituents, 1, 0);
+		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	}
+
+	/* Ensure we can donate to the only borrower. */
+	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			   1, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+}
+
+/**
+ * SPCI: Memory can be lent, but then no part can be lent again.
+ */
+TEST(memory_sharing, spci_lend_twice)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_twice", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_twice", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page) * 2, 'b', PAGE_SIZE * 2);
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 2},
+	};
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Let the memory be accessed. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+	/* Attempt to lend the same area of memory. */
+	spci_check_cannot_lend_memory(mb, constituents);
+	/* Fail to donate to VM apart from VM0. */
+	spci_check_cannot_donate_memory(mb, constituents, 1, SERVICE_VM0);
+	/* Fail to relinquish from any VM. */
+	spci_check_cannot_relinquish_memory(mb, constituents, 1);
+
+	/* Attempt to lend again with different permissions. */
+	constituents[0].page_count = 1;
+	for (int i = 1; i < PAGE_SIZE * 2; i++) {
+		constituents[0].address = (uint64_t)page + PAGE_SIZE;
+		spci_memory_lend(
+			mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents, 1,
+			0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM,
+			SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
 	}
 }
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index c640797..cfd05f6 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -147,7 +147,7 @@
 			  hf_vm_get_id());
 	EXPECT_EQ(spci_msg_send(0), 0);
 
-	/* Try using the memory that isn't valid unless it's been returned.  */
+	/* Try using the memory that isn't valid unless it's been returned. */
 	page[16] = 123;
 }
 
@@ -171,7 +171,7 @@
 			  hf_vm_get_id());
 	EXPECT_EQ(spci_msg_send(0), 0);
 
-	/* Try using the memory that isn't valid unless it's been returned.  */
+	/* Try using the memory that isn't valid unless it's been returned. */
 	page[633] = 180;
 }
 
@@ -280,21 +280,21 @@
 	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
 	struct spci_memory_region *memory_region =
 		spci_get_donated_memory_region(recv_buf);
+	struct spci_memory_region_constituent constituent =
+		memory_region->constituents[0];
 	hf_mailbox_clear();
 
 	/* Yield to allow attempt to re donate from primary. */
 	spci_yield();
 
 	/* Give the memory back and notify the sender. */
-	spci_memory_donate(send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
-			   memory_region->constituents, memory_region->count,
-			   0);
+	spci_memory_donate(send_buf, HF_PRIMARY_VM_ID, SERVICE_VM0,
+			   &constituent, memory_region->count, 0);
 	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
 
 	/* Attempt to donate the memory to another VM. */
 	spci_memory_donate(send_buf, SERVICE_VM1, recv_buf->target_vm_id,
-			   memory_region->constituents, memory_region->count,
-			   0);
+			   &constituent, memory_region->count, 0);
 	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
 
 	spci_yield();
@@ -351,3 +351,268 @@
 	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
 	spci_yield();
 }
+
+TEST_SERVICE(spci_memory_lend_relinquish)
+{
+	/* Loop, giving memory back to the sender. */
+	for (;;) {
+		EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS);
+		uint8_t *ptr;
+
+		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		struct spci_memory_region *memory_region =
+			(struct spci_memory_region *)(spci_get_lend_descriptor(
+							      recv_buf)
+							      ->payload);
+
+		ptr = (uint8_t *)memory_region->constituents[0].address;
+		/* Relevant information read, mailbox can be cleared. */
+		hf_mailbox_clear();
+
+		/* Check that one has access to the shared region. */
+		for (int i = 0; i < PAGE_SIZE; ++i) {
+			ptr[i]++;
+		}
+
+		hf_mailbox_clear();
+		/* Give the memory back and notify the sender. */
+		spci_memory_relinquish(
+			send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+			memory_region->constituents, memory_region->count, 0);
+		spci_msg_send(0);
+
+		/*
+		 * Try and access the memory which will cause a fault unless the
+		 * memory has been shared back again.
+		 */
+		ptr[0] = 123;
+	}
+}
+
+/**
+ * SPCI: Ensure that we can't relinquish donated memory.
+ */
+TEST_SERVICE(spci_memory_donate_relinquish)
+{
+	for (;;) {
+		EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS);
+		uint8_t *ptr;
+
+		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		struct spci_memory_region *memory_region =
+			spci_get_donated_memory_region(recv_buf);
+		hf_mailbox_clear();
+
+		ptr = (uint8_t *)memory_region->constituents[0].address;
+
+		/* Check that one has access to the shared region. */
+		for (int i = 0; i < PAGE_SIZE; ++i) {
+			ptr[i]++;
+		}
+		/* Give the memory back and notify the sender. */
+		spci_memory_relinquish(
+			send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+			memory_region->constituents, memory_region->count, 0);
+		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+
+		/* Ensure we still have access to the memory. */
+		ptr[0] = 123;
+
+		spci_yield();
+	}
+}
+
+/**
+ * SPCI: Receive memory and attempt to donate from primary VM.
+ */
+TEST_SERVICE(spci_lend_invalid_source)
+{
+	EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS);
+
+	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+	struct spci_memory_region *memory_region =
+		(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
+						      ->payload);
+	hf_mailbox_clear();
+
+	/* Attempt to relinquish from primary VM. */
+	spci_memory_relinquish(send_buf, recv_buf->target_vm_id,
+			       HF_PRIMARY_VM_ID, memory_region->constituents,
+			       memory_region->count, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+
+	/* Give the memory back and notify the sender. */
+	spci_memory_relinquish(
+		send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+		memory_region->constituents, memory_region->count, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Ensure we cannot lend from the primary to another secondary. */
+	spci_memory_lend(send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID,
+			 memory_region->constituents, memory_region->count, 0,
+			 SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	spci_yield();
+}
+
+/**
+ * SPCI: Attempt to execute an instruction from the lent memory.
+ */
+TEST_SERVICE(spci_memory_lend_relinquish_X)
+{
+	for (;;) {
+		EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS);
+		uint64_t *ptr;
+
+		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		struct spci_memory_region *memory_region =
+			(struct spci_memory_region *)(spci_get_lend_descriptor(
+							      recv_buf)
+							      ->payload);
+		hf_mailbox_clear();
+
+		ptr = (uint64_t *)memory_region->constituents[0].address;
+		/*
+		 * Verify that the instruction in memory is the encoded RET
+		 * instruction.
+		 */
+		EXPECT_EQ(*ptr, 0xD65F03C0);
+		/* Try to execute instruction from the shared memory region. */
+		__asm__ volatile("blr %0" ::"r"(ptr));
+
+		/* Release the memory again. */
+		spci_memory_relinquish(
+			send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+			memory_region->constituents, memory_region->count, 0);
+		EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	}
+}
+
+/**
+ * SPCI: Attempt to read and write to a shared page.
+ */
+TEST_SERVICE(spci_memory_lend_relinquish_RW)
+{
+	for (;;) {
+		EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS);
+		uint8_t *ptr;
+
+		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		struct spci_memory_region *memory_region =
+			(struct spci_memory_region *)(spci_get_lend_descriptor(
+							      recv_buf)
+							      ->payload);
+		hf_mailbox_clear();
+
+		ptr = (uint8_t *)memory_region->constituents[0].address;
+
+		/* Check that we have read access. */
+		for (int i = 0; i < PAGE_SIZE; ++i) {
+			EXPECT_EQ(ptr[i], 'b');
+		}
+
+		/* Return control to primary, to verify shared access. */
+		spci_yield();
+
+		/* Attempt to modify the memory. */
+		for (int i = 0; i < PAGE_SIZE; ++i) {
+			ptr[i]++;
+		}
+
+		spci_memory_relinquish(
+			send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+			memory_region->constituents, memory_region->count, 0);
+		EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	}
+}
+
+/**
+ * SPCI: Attempt to modify below the lower bound for the lent memory.
+ */
+TEST_SERVICE(spci_lend_check_lower_bound)
+{
+	EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS);
+	uint8_t *ptr;
+
+	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	struct spci_memory_region *memory_region =
+		(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
+						      ->payload);
+	hf_mailbox_clear();
+
+	ptr = (uint8_t *)memory_region->constituents[0].address;
+
+	/* Check that one cannot access before donated region. */
+	ptr[-1]++;
+}
+
+/**
+ * SPCI: Attempt to modify above the upper bound for the lent memory.
+ */
+TEST_SERVICE(spci_lend_check_upper_bound)
+{
+	EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS);
+	uint8_t *ptr;
+
+	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	struct spci_memory_region *memory_region =
+		(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
+						      ->payload);
+	hf_mailbox_clear();
+
+	ptr = (uint8_t *)memory_region->constituents[0].address;
+
+	/* Check that one cannot access after donated region. */
+	ptr[PAGE_SIZE]++;
+}
+
+TEST_SERVICE(spci_memory_lend_twice)
+{
+	EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS);
+	uint8_t *ptr;
+
+	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+	struct spci_memory_region *memory_region =
+		(struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf)
+						      ->payload);
+	hf_mailbox_clear();
+
+	ptr = (uint8_t *)memory_region->constituents[0].address;
+
+	/* Check that we have read access. */
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		EXPECT_EQ(ptr[i], 'b');
+	}
+
+	/* Return control to primary. */
+	spci_yield();
+
+	/* Attempt to modify the memory. */
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ptr[i]++;
+	}
+
+	for (int i = 1; i < PAGE_SIZE * 2; i++) {
+		memory_region->constituents[0].address = (uint64_t)ptr + i;
+
+		/* Fail to lend the memory back to the primary. */
+		spci_memory_lend(
+			send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID,
+			memory_region->constituents, memory_region->count, 0,
+			SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+			SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	}
+
+	spci_memory_relinquish(
+		send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+		memory_region->constituents, memory_region->count, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+}