Remove legacy memory sharing API.

Bug: 132420445
Change-Id: I3e0b50ce86363acb48e51ac76e0f9cab32818ff9
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 292f036..74860a3 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -30,8 +30,6 @@
 void api_regs_state_saved(struct vcpu *vcpu);
 int64_t api_mailbox_writable_get(const struct vcpu *current);
 int64_t api_mailbox_waiter_get(spci_vm_id_t vm_id, const struct vcpu *current);
-int64_t api_share_memory(spci_vm_id_t vm_id, ipaddr_t addr, size_t size,
-			 enum hf_share share, struct vcpu *current);
 int64_t api_debug_log(char c, struct vcpu *current);
 
 struct vcpu *api_preempt(struct vcpu *current);
diff --git a/inc/vmapi/hf/abi.h b/inc/vmapi/hf/abi.h
index ac6fe4b..ed004b7 100644
--- a/inc/vmapi/hf/abi.h
+++ b/inc/vmapi/hf/abi.h
@@ -30,7 +30,6 @@
 #define HF_INTERRUPT_ENABLE            0xff05
 #define HF_INTERRUPT_GET               0xff06
 #define HF_INTERRUPT_INJECT            0xff07
-#define HF_SHARE_MEMORY                0xff08
 
 /* Custom SPCI-like calls returned from SPCI_RUN. */
 #define HF_SPCI_RUN_WAIT_FOR_INTERRUPT 0xff09
@@ -40,23 +39,3 @@
 #define HF_DEBUG_LOG            0xbd000000
 
 /* clang-format on */
-
-enum hf_share {
-	/**
-	 * Relinquish ownership and access to the memory and pass them to the
-	 * recipient.
-	 */
-	HF_MEMORY_GIVE,
-
-	/**
-	 * Retain ownership of the memory but relinquish access to the
-	 * recipient.
-	 */
-	HF_MEMORY_LEND,
-
-	/**
-	 * Retain ownership and access but additionally allow access to the
-	 * recipient.
-	 */
-	HF_MEMORY_SHARE,
-};
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index 6ec1694..e9af0e7 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -257,21 +257,6 @@
 }
 
 /**
- * Shares a region of memory with another VM.
- *
- * Returns 0 on success or -1 if the sharing was not allowed or failed.
- *
- * TODO: replace this with a better API once we have decided what that should
- *       look like.
- */
-static inline int64_t hf_share_memory(spci_vm_id_t vm_id, hf_ipaddr_t addr,
-				      size_t size, enum hf_share share)
-{
-	return hf_call(HF_SHARE_MEMORY, (((uint64_t)vm_id) << 32) | share, addr,
-		       size);
-}
-
-/**
  * Sends a character to the debug log for the VM.
  *
  * Returns 0 on success, or -1 if it failed for some reason.
diff --git a/src/api.c b/src/api.c
index c748fb5..4db97dd 100644
--- a/src/api.c
+++ b/src/api.c
@@ -1581,167 +1581,6 @@
 	return ret;
 }
 
-/**
- * Shares memory from the calling VM with another. The memory can be shared in
- * different modes.
- *
- * TODO: the interface for sharing memory will need to be enhanced to allow
- *       sharing with different modes e.g. read-only, informing the recipient
- *       of the memory they have been given, opting to not wipe the memory and
- *       possibly allowing multiple blocks to be transferred. What this will
- *       look like is TBD.
- */
-int64_t api_share_memory(spci_vm_id_t vm_id, ipaddr_t addr, size_t size,
-			 enum hf_share share, struct vcpu *current)
-{
-	struct vm *from = current->vm;
-	struct vm *to;
-	uint32_t orig_from_mode;
-	uint32_t from_mode;
-	uint32_t to_mode;
-	ipaddr_t begin;
-	ipaddr_t end;
-	paddr_t pa_begin;
-	paddr_t pa_end;
-	struct mpool local_page_pool;
-	int64_t ret;
-
-	/* Disallow reflexive shares as this suggests an error in the VM. */
-	if (vm_id == from->id) {
-		return -1;
-	}
-
-	/* Ensure the target VM exists. */
-	to = vm_find(vm_id);
-	if (to == NULL) {
-		return -1;
-	}
-
-	begin = addr;
-	end = ipa_add(addr, size);
-
-	/* Fail if addresses are not page-aligned. */
-	if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
-	    !is_aligned(ipa_addr(end), PAGE_SIZE)) {
-		return -1;
-	}
-
-	/* Convert the sharing request to memory management modes. */
-	switch (share) {
-	case HF_MEMORY_GIVE:
-		from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED;
-		to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
-		break;
-
-	case HF_MEMORY_LEND:
-		from_mode = MM_MODE_INVALID;
-		to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_UNOWNED;
-		break;
-
-	case HF_MEMORY_SHARE:
-		from_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_SHARED;
-		to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_UNOWNED |
-			  MM_MODE_SHARED;
-		break;
-
-	default:
-		/* The input is untrusted so might not be a valid value. */
-		return -1;
-	}
-
-	/*
-	 * Create a local pool so any freed memory can't be used by another
-	 * thread. This is to ensure the original mapping can be restored if any
-	 * stage of the process fails.
-	 */
-	mpool_init_with_fallback(&local_page_pool, &api_page_pool);
-
-	sl_lock_both(&from->lock, &to->lock);
-
-	/*
-	 * Ensure that the memory range is mapped with the same mode so that
-	 * changes can be reverted if the process fails.
-	 */
-	if (!mm_vm_get_mode(&from->ptable, begin, end, &orig_from_mode)) {
-		goto fail;
-	}
-
-	/* Ensure the address range is normal memory and not a device. */
-	if (orig_from_mode & MM_MODE_D) {
-		goto fail;
-	}
-
-	/*
-	 * Ensure the memory range is valid for the sender. If it isn't, the
-	 * sender has either shared it with another VM already or has no claim
-	 * to the memory.
-	 */
-	if (orig_from_mode & MM_MODE_INVALID) {
-		goto fail;
-	}
-
-	/*
-	 * The sender must own the memory and have exclusive access to it in
-	 * order to share it. Alternatively, it is giving memory back to the
-	 * owning VM.
-	 */
-	if (orig_from_mode & MM_MODE_UNOWNED) {
-		uint32_t orig_to_mode;
-
-		if (share != HF_MEMORY_GIVE ||
-		    !mm_vm_get_mode(&to->ptable, begin, end, &orig_to_mode) ||
-		    orig_to_mode & MM_MODE_UNOWNED) {
-			goto fail;
-		}
-	} else if (orig_from_mode & MM_MODE_SHARED) {
-		goto fail;
-	}
-
-	pa_begin = pa_from_ipa(begin);
-	pa_end = pa_from_ipa(end);
-
-	/*
-	 * First update the mapping for the sender so there is not overlap with
-	 * the recipient.
-	 */
-	if (!mm_vm_identity_map(&from->ptable, pa_begin, pa_end, from_mode,
-				NULL, &local_page_pool)) {
-		goto fail;
-	}
-
-	/* Clear the memory so no VM or device can see the previous contents. */
-	if (!api_clear_memory(pa_begin, pa_end, &local_page_pool)) {
-		goto fail_return_to_sender;
-	}
-
-	/* Complete the transfer by mapping the memory into the recipient. */
-	if (!mm_vm_identity_map(&to->ptable, pa_begin, pa_end, to_mode, NULL,
-				&local_page_pool)) {
-		/* TODO: partial defrag of failed range. */
-		/* Recover any memory consumed in failed mapping. */
-		mm_vm_defrag(&from->ptable, &local_page_pool);
-		goto fail_return_to_sender;
-	}
-
-	ret = 0;
-	goto out;
-
-fail_return_to_sender:
-	CHECK(mm_vm_identity_map(&from->ptable, pa_begin, pa_end,
-				 orig_from_mode, NULL, &local_page_pool));
-
-fail:
-	ret = -1;
-
-out:
-	sl_unlock(&from->lock);
-	sl_unlock(&to->lock);
-
-	mpool_fini(&local_page_pool);
-
-	return ret;
-}
-
 /** Returns the version of the implemented SPCI specification. */
 struct spci_value api_spci_version(void)
 {
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 4852f69..0e75b97 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -485,12 +485,6 @@
 						       args.arg3, vcpu, &next);
 		break;
 
-	case HF_SHARE_MEMORY:
-		vcpu->regs.r[0] = api_share_memory(
-			args.arg1 >> 32, ipa_init(args.arg2), args.arg3,
-			args.arg1 & 0xffffffff, vcpu);
-		break;
-
 	case HF_DEBUG_LOG:
 		vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu);
 		break;
diff --git a/test/vmapi/primary_with_secondaries/abort.c b/test/vmapi/primary_with_secondaries/abort.c
index e8559f8..3b6b995 100644
--- a/test/vmapi/primary_with_secondaries/abort.c
+++ b/test/vmapi/primary_with_secondaries/abort.c
@@ -44,6 +44,12 @@
 
 	SERVICE_SELECT(SERVICE_VM1, "straddling_data_abort", mb.send);
 
+	/*
+	 * First we get a message about the memory being donated to us, then we
+	 * get the abort.
+	 */
+	run_res = spci_run(SERVICE_VM1, 0);
+	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
 	run_res = spci_run(SERVICE_VM1, 0);
 	EXPECT_SPCI_ERROR(run_res, SPCI_ABORTED);
 }
@@ -72,6 +78,12 @@
 
 	SERVICE_SELECT(SERVICE_VM1, "straddling_instruction_abort", mb.send);
 
+	/*
+	 * First we get a message about the memory being donated to us, then we
+	 * get the abort.
+	 */
+	run_res = spci_run(SERVICE_VM1, 0);
+	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
 	run_res = spci_run(SERVICE_VM1, 0);
 	EXPECT_SPCI_ERROR(run_res, SPCI_ABORTED);
 }
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index 108bbb9..bbd1c92 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -28,30 +28,9 @@
 alignas(PAGE_SIZE) static uint8_t page[PAGE_SIZE];
 
 /**
- * Tries sharing memory in different modes with different VMs and asserts that
- * it will fail.
- */
-void check_cannot_share_memory(void *ptr, size_t size)
-{
-	uint32_t vms[] = {SERVICE_VM1, SERVICE_VM2};
-	enum hf_share modes[] = {HF_MEMORY_GIVE, HF_MEMORY_LEND,
-				 HF_MEMORY_SHARE};
-	size_t i;
-	size_t j;
-
-	for (i = 0; i < ARRAY_SIZE(vms); ++i) {
-		for (j = 0; j < ARRAY_SIZE(modes); ++j) {
-			ASSERT_EQ(hf_share_memory(vms[i], (hf_ipaddr_t)ptr,
-						  size, modes[j]),
-				  -1);
-		}
-	}
-}
-
-/**
  * Helper function to test sending memory in the different configurations.
  */
-static void spci_check_cannot_send_memory(
+static void check_cannot_send_memory(
 	struct mailbox_buffers mb, enum spci_memory_share mode,
 	struct spci_memory_region_constituent constituents[],
 	int num_constituents, int32_t avoid_vm)
@@ -120,27 +99,27 @@
 /**
  * Helper function to test lending memory in the different configurations.
  */
-static void spci_check_cannot_lend_memory(
+static void check_cannot_lend_memory(
 	struct mailbox_buffers mb,
 	struct spci_memory_region_constituent constituents[],
 	int num_constituents, int32_t avoid_vm)
 
 {
-	spci_check_cannot_send_memory(mb, SPCI_MEMORY_LEND, constituents,
-				      num_constituents, avoid_vm);
+	check_cannot_send_memory(mb, SPCI_MEMORY_LEND, constituents,
+				 num_constituents, avoid_vm);
 }
 
 /**
  * Helper function to test sharing memory in the different configurations.
  */
-static void spci_check_cannot_share_memory(
+static void check_cannot_share_memory(
 	struct mailbox_buffers mb,
 	struct spci_memory_region_constituent constituents[],
 	int num_constituents, int32_t avoid_vm)
 
 {
-	spci_check_cannot_send_memory(mb, SPCI_MEMORY_SHARE, constituents,
-				      num_constituents, avoid_vm);
+	check_cannot_send_memory(mb, SPCI_MEMORY_SHARE, constituents,
+				 num_constituents, avoid_vm);
 }
 
 /**
@@ -148,7 +127,7 @@
  * it will fail to all except the supplied VM ID as this would succeed if it
  * is the only borrower.
  */
-static void spci_check_cannot_donate_memory(
+static void check_cannot_donate_memory(
 	struct mailbox_buffers mb,
 	struct spci_memory_region_constituent constituents[],
 	int num_constituents, int32_t avoid_vm)
@@ -178,7 +157,7 @@
  * Tries relinquishing memory with different VMs and asserts that
  * it will fail.
  */
-static void spci_check_cannot_relinquish_memory(
+static void check_cannot_relinquish_memory(
 	struct mailbox_buffers mb,
 	struct spci_memory_region_constituent constituents[],
 	int num_constituents)
@@ -201,47 +180,6 @@
 }
 
 /**
- * Device address space cannot be shared, only normal memory.
- */
-TEST(memory_sharing, cannot_share_device_memory)
-{
-	check_cannot_share_memory((void *)PAGE_SIZE, PAGE_SIZE);
-}
-
-/**
- * After memory has been shared concurrently, it can't be shared again.
- */
-TEST(memory_sharing, cannot_share_concurrent_memory_twice)
-{
-	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_SHARE),
-		  0);
-	check_cannot_share_memory(page, PAGE_SIZE);
-}
-
-/**
- * After memory has been given away, it can't be shared again.
- */
-TEST(memory_sharing, cannot_share_given_memory_twice)
-{
-	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_GIVE),
-		  0);
-	check_cannot_share_memory(page, PAGE_SIZE);
-}
-
-/**
- * After memory has been lent, it can't be shared again.
- */
-TEST(memory_sharing, cannot_share_lent_memory_twice)
-{
-	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_LEND),
-		  0);
-	check_cannot_share_memory(page, PAGE_SIZE);
-}
-
-/**
  * Sharing memory concurrently gives both VMs access to the memory so it can be
  * used for communication.
  */
@@ -250,21 +188,23 @@
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
 
 	SERVICE_SELECT(SERVICE_VM1, "memory_increment", mb.send);
 
 	memset_s(ptr, sizeof(page), 'a', PAGE_SIZE);
-	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_SHARE),
-		  0);
 
-	/*
-	 * TODO: the address of the memory will be part of the proper API. That
-	 *       API is still to be agreed on so the address is passed
-	 *       explicitly to test the mechanism.
-	 */
-	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(ptr), 0)
+	msg_size = spci_memory_init(
+		mb.send, SPCI_MEMORY_SHARE, SERVICE_VM1, constituents,
+		ARRAY_SIZE(constituents), 0, SPCI_MEMORY_REGION_FLAG_CLEAR,
+		SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+		SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
 			  .func,
 		  SPCI_SUCCESS_32);
 
@@ -293,94 +233,28 @@
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
-
-	SERVICE_SELECT(SERVICE_VM1, "memory_return", mb.send);
-
-	/* Dirty the memory before sharing it. */
-	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
-	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_SHARE),
-		  0);
-
-	/*
-	 * TODO: the address of the memory will be part of the proper API. That
-	 *       API is still to be agreed on so the address is passed
-	 *       explicitly to test the mechanism.
-	 */
-	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(ptr), 0)
-			  .func,
-		  SPCI_SUCCESS_32);
-
-	/* Let the memory be returned. */
-	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
-	for (int i = 0; i < PAGE_SIZE; ++i) {
-		ASSERT_EQ(ptr[i], 0);
-	}
-
-	/* Observe the service faulting when accessing the memory. */
-	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_SPCI_ERROR(run_res, SPCI_ABORTED);
-}
-
-/**
- * Device address space cannot be shared, only normal memory.
- */
-TEST(memory_sharing, spci_cannot_share_device_memory)
-{
-	struct mailbox_buffers mb = set_up_mailbox();
-	struct spci_memory_region_constituent constituents[] = {
-		{.address = PAGE_SIZE, .page_count = 1},
-	};
-
-	SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
-	SERVICE_SELECT(SERVICE_VM2, "spci_memory_return", mb.send);
-
-	spci_check_cannot_lend_memory(mb, constituents,
-				      ARRAY_SIZE(constituents), -1);
-	spci_check_cannot_share_memory(mb, constituents,
-				       ARRAY_SIZE(constituents), -1);
-	spci_check_cannot_donate_memory(mb, constituents,
-					ARRAY_SIZE(constituents), -1);
-}
-
-/**
- * SPCI Memory given away can be given back.
- * Employing SPCI donate architected messages.
- */
-TEST(memory_sharing, spci_give_and_get_back)
-{
-	struct spci_value run_res;
-	struct mailbox_buffers mb = set_up_mailbox();
-	uint8_t *ptr = page;
 	uint32_t msg_size;
-
-	SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
-
-	/* Initialise the memory before giving it. */
-	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
-
-	/* Can only donate single constituent memory region. */
 	struct spci_memory_region_constituent constituents[] = {
 		{.address = (uint64_t)page, .page_count = 1},
 	};
 
-	msg_size = spci_memory_donate_init(
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
+
+	/* Dirty the memory before sharing it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+
+	msg_size = spci_memory_share_init(
 		mb.send, SERVICE_VM1, constituents, ARRAY_SIZE(constituents), 0,
 		SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
 		SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
-
 	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
 				SPCI_MSG_SEND_LEGACY_MEMORY)
 			  .func,
 		  SPCI_SUCCESS_32);
-	run_res = spci_run(SERVICE_VM1, 0);
 
 	/* Let the memory be returned. */
+	run_res = spci_run(SERVICE_VM1, 0);
 	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
-
-	/* Ensure that the secondary VM accessed the region. */
 	for (int i = 0; i < PAGE_SIZE; ++i) {
 		ASSERT_EQ(ptr[i], 'c');
 	}
@@ -391,9 +265,30 @@
 }
 
 /**
- * SPCI: Check that memory can be lent and is accessible by both parties.
+ * Device address space cannot be shared, only normal memory.
  */
-TEST(memory_sharing, spci_lend_relinquish)
+TEST(memory_sharing, cannot_share_device_memory)
+{
+	struct mailbox_buffers mb = set_up_mailbox();
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = PAGE_SIZE, .page_count = 1},
+	};
+
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
+	SERVICE_SELECT(SERVICE_VM2, "spci_memory_return", mb.send);
+
+	check_cannot_lend_memory(mb, constituents, ARRAY_SIZE(constituents),
+				 -1);
+	check_cannot_share_memory(mb, constituents, ARRAY_SIZE(constituents),
+				  -1);
+	check_cannot_donate_memory(mb, constituents, ARRAY_SIZE(constituents),
+				   -1);
+}
+
+/**
+ * Check that memory can be lent and is accessible by both parties.
+ */
+TEST(memory_sharing, lend_relinquish)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -441,22 +336,22 @@
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
 
-	SERVICE_SELECT(SERVICE_VM1, "memory_return", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
 
 	/* Dirty the memory before giving it. */
 	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
-	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_GIVE),
-		  0);
 
-	/*
-	 * TODO: the address of the memory will be part of the proper API. That
-	 *       API is still to be agreed on so the address is passed
-	 *       explicitly to test the mechanism.
-	 */
-	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(ptr), 0)
+	msg_size = spci_memory_donate_init(
+		mb.send, SERVICE_VM1, constituents, ARRAY_SIZE(constituents), 0,
+		SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+		SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
 			  .func,
 		  SPCI_SUCCESS_32);
 
@@ -464,7 +359,7 @@
 	run_res = spci_run(SERVICE_VM1, 0);
 	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
 	for (int i = 0; i < PAGE_SIZE; ++i) {
-		ASSERT_EQ(ptr[i], 0);
+		ASSERT_EQ(ptr[i], 'c');
 	}
 
 	/* Observe the service faulting when accessing the memory. */
@@ -480,22 +375,22 @@
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
+	uint32_t msg_size;
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
 
-	SERVICE_SELECT(SERVICE_VM1, "memory_return", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
 
 	/* Dirty the memory before lending it. */
 	memset_s(ptr, sizeof(page), 'c', PAGE_SIZE);
-	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_LEND),
-		  0);
 
-	/*
-	 * TODO: the address of the memory will be part of the proper API. That
-	 *       API is still to be agreed on so the address is passed
-	 *       explicitly to test the mechanism.
-	 */
-	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(ptr), 0)
+	msg_size = spci_memory_lend_init(
+		mb.send, SERVICE_VM1, constituents, ARRAY_SIZE(constituents), 0,
+		SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+		SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
 			  .func,
 		  SPCI_SUCCESS_32);
 
@@ -503,7 +398,7 @@
 	run_res = spci_run(SERVICE_VM1, 0);
 	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
 	for (int i = 0; i < PAGE_SIZE; ++i) {
-		ASSERT_EQ(ptr[i], 0);
+		ASSERT_EQ(ptr[i], 'd');
 	}
 
 	/* Observe the service faulting when accessing the memory. */
@@ -518,38 +413,41 @@
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
-	uint8_t *ptr = page;
+	uint32_t msg_size;
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
 
-	SERVICE_SELECT(SERVICE_VM1, "memory_return", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
 
 	/* Share the memory initially. */
-	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_LEND),
-		  0);
-
-	/*
-	 * TODO: the address of the memory will be part of the proper API. That
-	 *       API is still to be agreed on so the address is passed
-	 *       explicitly to test the mechanism.
-	 */
-	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(ptr), 0)
+	msg_size = spci_memory_lend_init(
+		mb.send, SERVICE_VM1, constituents, ARRAY_SIZE(constituents), 0,
+		SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+		SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
 			  .func,
 		  SPCI_SUCCESS_32);
 
 	/* Let the memory be returned. */
 	run_res = spci_run(SERVICE_VM1, 0);
 	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+	spci_rx_release();
 
 	/* Share the memory again after it has been returned. */
-	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_LEND),
-		  0);
+	msg_size = spci_memory_lend_init(
+		mb.send, SERVICE_VM1, constituents, ARRAY_SIZE(constituents), 0,
+		SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+		SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Observe the service doesn't fault when accessing the memory. */
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
-	EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
 }
 
 /**
@@ -559,22 +457,21 @@
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
-	uint8_t *ptr = page;
+	uint32_t msg_size;
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
 
-	SERVICE_SELECT(SERVICE_VM1, "memory_return", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
+	SERVICE_SELECT(SERVICE_VM2, "spci_memory_lend_relinquish", mb.send);
 
 	/* Share the memory initially. */
-	ASSERT_EQ(hf_share_memory(SERVICE_VM1, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_LEND),
-		  0);
-
-	/*
-	 * TODO: the address of the memory will be part of the proper API. That
-	 *       API is still to be agreed on so the address is passed
-	 *       explicitly to test the mechanism.
-	 */
-	memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, &ptr, sizeof(ptr));
-	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(ptr), 0)
+	msg_size = spci_memory_lend_init(
+		mb.send, SERVICE_VM1, constituents, ARRAY_SIZE(constituents), 0,
+		SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+		SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
 			  .func,
 		  SPCI_SUCCESS_32);
 
@@ -582,10 +479,15 @@
 	run_res = spci_run(SERVICE_VM1, 0);
 	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
 
-	/* Share the memory with a differnt VM after it has been returned. */
-	ASSERT_EQ(hf_share_memory(SERVICE_VM2, (hf_ipaddr_t)&page, PAGE_SIZE,
-				  HF_MEMORY_LEND),
-		  0);
+	/* Share the memory with a different VM after it has been returned. */
+	msg_size = spci_memory_lend_init(
+		mb.send, SERVICE_VM2, constituents, ARRAY_SIZE(constituents), 0,
+		SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+		SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Observe the service faulting when accessing the memory. */
 	run_res = spci_run(SERVICE_VM1, 0);
@@ -599,6 +501,8 @@
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
+	struct spci_memory_region *memory_region;
+	struct spci_memory_region_constituent *constituents;
 	uint8_t *ptr;
 
 	SERVICE_SELECT(SERVICE_VM1, "give_memory_and_fault", mb.send);
@@ -608,7 +512,9 @@
 	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
 
 	/* Check the memory was cleared. */
-	ptr = *(uint8_t **)mb.recv;
+	memory_region = spci_get_memory_region(mb.recv);
+	constituents = spci_memory_region_get_constituents(memory_region);
+	ptr = (uint8_t *)constituents[0].address;
 	for (int i = 0; i < PAGE_SIZE; ++i) {
 		ASSERT_EQ(ptr[i], 0);
 	}
@@ -625,6 +531,8 @@
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
+	struct spci_memory_region *memory_region;
+	struct spci_memory_region_constituent *constituents;
 	uint8_t *ptr;
 
 	SERVICE_SELECT(SERVICE_VM1, "lend_memory_and_fault", mb.send);
@@ -634,7 +542,9 @@
 	EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
 
 	/* Check the memory was cleared. */
-	ptr = *(uint8_t **)mb.recv;
+	memory_region = spci_get_memory_region(mb.recv);
+	constituents = spci_memory_region_get_constituents(memory_region);
+	ptr = (uint8_t *)constituents[0].address;
 	for (int i = 0; i < PAGE_SIZE; ++i) {
 		ASSERT_EQ(ptr[i], 0);
 	}
@@ -645,9 +555,9 @@
 }
 
 /**
- * SPCI: Verify past the upper bound of the donated region cannot be accessed.
+ * Verify past the upper bound of the donated region cannot be accessed.
  */
-TEST(memory_sharing, spci_donate_check_upper_bounds)
+TEST(memory_sharing, donate_check_upper_bounds)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -678,9 +588,9 @@
 }
 
 /**
- * SPCI: Verify past the lower bound of the donated region cannot be accessed.
+ * Verify past the lower bound of the donated region cannot be accessed.
  */
-TEST(memory_sharing, spci_donate_check_lower_bounds)
+TEST(memory_sharing, donate_check_lower_bounds)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -711,10 +621,10 @@
 }
 
 /**
- * SPCI: After memory has been returned, it is free to be shared with another
+ * After memory has been returned, it is free to be shared with another
  * VM.
  */
-TEST(memory_sharing, spci_donate_elsewhere_after_return)
+TEST(memory_sharing, donate_elsewhere_after_return)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -761,10 +671,10 @@
 }
 
 /**
- * SPCI: Check if memory can be donated between secondary VMs.
+ * Check if memory can be donated between secondary VMs.
  * Ensure that the memory can no longer be accessed by the first VM.
  */
-TEST(memory_sharing, spci_donate_vms)
+TEST(memory_sharing, donate_vms)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -813,9 +723,9 @@
 }
 
 /**
- * SPCI: Check that memory is unable to be donated to multiple parties.
+ * Check that memory is unable to be donated to multiple parties.
  */
-TEST(memory_sharing, spci_donate_twice)
+TEST(memory_sharing, donate_twice)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -847,15 +757,15 @@
 	EXPECT_EQ(run_res.func, SPCI_YIELD_32);
 
 	/* Fail to share memory again with any VM. */
-	spci_check_cannot_share_memory(mb, constituents,
-				       ARRAY_SIZE(constituents), -1);
-	spci_check_cannot_lend_memory(mb, constituents,
-				      ARRAY_SIZE(constituents), -1);
-	spci_check_cannot_donate_memory(mb, constituents,
-					ARRAY_SIZE(constituents), -1);
+	check_cannot_share_memory(mb, constituents, ARRAY_SIZE(constituents),
+				  -1);
+	check_cannot_lend_memory(mb, constituents, ARRAY_SIZE(constituents),
+				 -1);
+	check_cannot_donate_memory(mb, constituents, ARRAY_SIZE(constituents),
+				   -1);
 	/* Fail to relinquish memory from any VM. */
-	spci_check_cannot_relinquish_memory(mb, constituents,
-					    ARRAY_SIZE(constituents));
+	check_cannot_relinquish_memory(mb, constituents,
+				       ARRAY_SIZE(constituents));
 
 	/* Let the memory be sent from VM1 to PRIMARY (returned). */
 	run_res = spci_run(SERVICE_VM1, 0);
@@ -870,9 +780,9 @@
 }
 
 /**
- * SPCI: Check cannot donate to self.
+ * Check cannot donate to self.
  */
-TEST(memory_sharing, spci_donate_to_self)
+TEST(memory_sharing, donate_to_self)
 {
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
@@ -896,9 +806,9 @@
 }
 
 /**
- * SPCI: Check cannot lend to self.
+ * Check cannot lend to self.
  */
-TEST(memory_sharing, spci_lend_to_self)
+TEST(memory_sharing, lend_to_self)
 {
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
@@ -921,9 +831,9 @@
 }
 
 /**
- * SPCI: Check cannot share to self.
+ * Check cannot share to self.
  */
-TEST(memory_sharing, spci_share_to_self)
+TEST(memory_sharing, share_to_self)
 {
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
@@ -946,9 +856,9 @@
 }
 
 /**
- * SPCI: Check cannot donate from alternative VM.
+ * Check cannot donate from alternative VM.
  */
-TEST(memory_sharing, spci_donate_invalid_source)
+TEST(memory_sharing, donate_invalid_source)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1010,9 +920,9 @@
 }
 
 /**
- * SPCI: Check that unaligned addresses can not be shared.
+ * Check that unaligned addresses can not be shared.
  */
-TEST(memory_sharing, spci_give_and_get_back_unaligned)
+TEST(memory_sharing, give_and_get_back_unaligned)
 {
 	struct mailbox_buffers mb = set_up_mailbox();
 
@@ -1044,9 +954,9 @@
 }
 
 /**
- * SPCI: Check cannot lend from alternative VM.
+ * Check cannot lend from alternative VM.
  */
-TEST(memory_sharing, spci_lend_invalid_source)
+TEST(memory_sharing, lend_invalid_source)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1090,10 +1000,10 @@
 }
 
 /**
- * SPCI: Memory can be lent with executable permissions.
+ * Memory can be lent with executable permissions.
  * Check RO and RW permissions.
  */
-TEST(memory_sharing, spci_lend_relinquish_X_RW)
+TEST(memory_sharing, lend_relinquish_X_RW)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1150,10 +1060,10 @@
 }
 
 /**
- * SPCI: Memory can be shared with executable permissions.
+ * Memory can be shared with executable permissions.
  * Check RO and RW permissions.
  */
-TEST(memory_sharing, spci_share_relinquish_X_RW)
+TEST(memory_sharing, share_relinquish_X_RW)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1222,10 +1132,10 @@
 }
 
 /**
- * SPCI: Memory can be shared without executable permissions.
+ * Memory can be shared without executable permissions.
  * Check RO and RW permissions.
  */
-TEST(memory_sharing, spci_share_relinquish_NX_RW)
+TEST(memory_sharing, share_relinquish_NX_RW)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1293,9 +1203,9 @@
 }
 
 /**
- * SPCI: Exercise execution permissions for lending memory.
+ * Exercise execution permissions for lending memory.
  */
-TEST(memory_sharing, spci_lend_relinquish_RW_X)
+TEST(memory_sharing, lend_relinquish_RW_X)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1343,9 +1253,9 @@
 }
 
 /**
- * SPCI: Exercise execution permissions for lending memory without write access.
+ * Exercise execution permissions for lending memory without write access.
  */
-TEST(memory_sharing, spci_lend_relinquish_RO_X)
+TEST(memory_sharing, lend_relinquish_RO_X)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1393,9 +1303,9 @@
 }
 
 /**
- * SPCI: Memory can be lent, but then no part can be donated.
+ * Memory can be lent, but then no part can be donated.
  */
-TEST(memory_sharing, spci_lend_donate)
+TEST(memory_sharing, lend_donate)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1453,9 +1363,9 @@
 }
 
 /**
- * SPCI: Memory can be shared, but then no part can be donated.
+ * Memory can be shared, but then no part can be donated.
  */
-TEST(memory_sharing, spci_share_donate)
+TEST(memory_sharing, share_donate)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1487,8 +1397,8 @@
 	EXPECT_EQ(run_res.func, SPCI_YIELD_32);
 
 	/* Attempt to share the same area of memory. */
-	spci_check_cannot_share_memory(mb, constituents,
-				       ARRAY_SIZE(constituents), SERVICE_VM1);
+	check_cannot_share_memory(mb, constituents, ARRAY_SIZE(constituents),
+				  SERVICE_VM1);
 
 	/* Ensure we can't donate any sub section of memory to another VM. */
 	constituents[0].page_count = 1;
@@ -1517,9 +1427,9 @@
 }
 
 /**
- * SPCI: Memory can be lent, but then no part can be lent again.
+ * Memory can be lent, but then no part can be lent again.
  */
-TEST(memory_sharing, spci_lend_twice)
+TEST(memory_sharing, lend_twice)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1551,17 +1461,17 @@
 	EXPECT_EQ(run_res.func, SPCI_YIELD_32);
 
 	/* Attempt to lend the same area of memory. */
-	spci_check_cannot_lend_memory(mb, constituents,
-				      ARRAY_SIZE(constituents), -1);
+	check_cannot_lend_memory(mb, constituents, ARRAY_SIZE(constituents),
+				 -1);
 	/* Attempt to share the same area of memory. */
-	spci_check_cannot_share_memory(mb, constituents,
-				       ARRAY_SIZE(constituents), -1);
+	check_cannot_share_memory(mb, constituents, ARRAY_SIZE(constituents),
+				  -1);
 	/* Fail to donate to VM apart from VM1. */
-	spci_check_cannot_donate_memory(mb, constituents,
-					ARRAY_SIZE(constituents), SERVICE_VM1);
+	check_cannot_donate_memory(mb, constituents, ARRAY_SIZE(constituents),
+				   SERVICE_VM1);
 	/* Fail to relinquish from any VM. */
-	spci_check_cannot_relinquish_memory(mb, constituents,
-					    ARRAY_SIZE(constituents));
+	check_cannot_relinquish_memory(mb, constituents,
+				       ARRAY_SIZE(constituents));
 
 	/* Attempt to lend again with different permissions. */
 	constituents[0].page_count = 1;
@@ -1580,9 +1490,9 @@
 }
 
 /**
- * SPCI: Memory can be shared, but then no part can be shared again.
+ * Memory can be shared, but then no part can be shared again.
  */
-TEST(memory_sharing, spci_share_twice)
+TEST(memory_sharing, share_twice)
 {
 	struct spci_value run_res;
 	struct mailbox_buffers mb = set_up_mailbox();
@@ -1617,16 +1527,16 @@
 	 * Attempting to share or lend the same area of memory with any VM
 	 * should fail.
 	 */
-	spci_check_cannot_share_memory(mb, constituents,
-				       ARRAY_SIZE(constituents), -1);
-	spci_check_cannot_lend_memory(mb, constituents,
-				      ARRAY_SIZE(constituents), -1);
+	check_cannot_share_memory(mb, constituents, ARRAY_SIZE(constituents),
+				  -1);
+	check_cannot_lend_memory(mb, constituents, ARRAY_SIZE(constituents),
+				 -1);
 	/* Fail to donate to VM apart from VM1. */
-	spci_check_cannot_donate_memory(mb, constituents,
-					ARRAY_SIZE(constituents), SERVICE_VM1);
+	check_cannot_donate_memory(mb, constituents, ARRAY_SIZE(constituents),
+				   SERVICE_VM1);
 	/* Fail to relinquish from any VM. */
-	spci_check_cannot_relinquish_memory(mb, constituents,
-					    ARRAY_SIZE(constituents));
+	check_cannot_relinquish_memory(mb, constituents,
+				       ARRAY_SIZE(constituents));
 
 	/* Attempt to share again with different permissions. */
 	constituents[0].page_count = 1;
@@ -1645,9 +1555,9 @@
 }
 
 /**
- * SPCI: Memory can be cleared while being shared.
+ * Memory can be cleared while being shared.
  */
-TEST(memory_sharing, spci_share_clear)
+TEST(memory_sharing, share_clear)
 {
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
diff --git a/test/vmapi/primary_with_secondaries/services/abort.c b/test/vmapi/primary_with_secondaries/services/abort.c
index 3d11bd2..a4c445e 100644
--- a/test/vmapi/primary_with_secondaries/services/abort.c
+++ b/test/vmapi/primary_with_secondaries/services/abort.c
@@ -32,11 +32,21 @@
 
 TEST_SERVICE(straddling_data_abort)
 {
+	void *send_buf = SERVICE_SEND_BUFFER();
 	/* Give some memory to the primary VM so that it's unmapped. */
-	ASSERT_EQ(hf_share_memory(HF_PRIMARY_VM_ID,
-				  (hf_ipaddr_t)(&pages[PAGE_SIZE]), PAGE_SIZE,
-				  HF_MEMORY_GIVE),
-		  0);
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)(&pages[PAGE_SIZE]), .page_count = 1},
+	};
+	uint32_t msg_size = spci_memory_donate_init(
+		send_buf, HF_PRIMARY_VM_ID, constituents,
+		ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X,
+		SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+		SPCI_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
+
 	*(volatile uint64_t *)(&pages[PAGE_SIZE - 6]);
 }
 
@@ -49,6 +59,8 @@
 
 TEST_SERVICE(straddling_instruction_abort)
 {
+	void *send_buf = SERVICE_SEND_BUFFER();
+
 	/*
 	 * Get a function pointer which, when branched to, will attempt to
 	 * execute a 4-byte instruction straddling two pages.
@@ -56,10 +68,18 @@
 	int (*f)(void) = (int (*)(void))(&pages[PAGE_SIZE - 2]);
 
 	/* Give second page to the primary VM so that it's unmapped. */
-	ASSERT_EQ(hf_share_memory(HF_PRIMARY_VM_ID,
-				  (hf_ipaddr_t)(&pages[PAGE_SIZE]), PAGE_SIZE,
-				  HF_MEMORY_GIVE),
-		  0);
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)(&pages[PAGE_SIZE]), .page_count = 1},
+	};
+	uint32_t msg_size = spci_memory_donate_init(
+		send_buf, HF_PRIMARY_VM_ID, constituents,
+		ARRAY_SIZE(constituents), 0, SPCI_MEMORY_RW_X,
+		SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+		SPCI_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Branch to instruction whose 2 bytes are now in an unmapped page. */
 	f();
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index dc076c2..b573a58 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -32,13 +32,19 @@
 		struct spci_value ret = spci_msg_wait();
 		uint8_t *ptr;
 		size_t i;
+		void *recv_buf = SERVICE_RECV_BUFFER();
+		struct spci_memory_region *memory_region =
+			spci_get_memory_region(recv_buf);
+		struct spci_memory_region_constituent *constituents =
+			spci_memory_region_get_constituents(memory_region);
 
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+		EXPECT_EQ(spci_msg_send_attributes(ret),
+			  SPCI_MSG_SEND_LEGACY_MEMORY);
+
+		ptr = (uint8_t *)constituents[0].address;
 
 		/* Check the memory was cleared. */
-		void *recv_buf = SERVICE_RECV_BUFFER();
-		ptr = *(uint8_t **)recv_buf;
-
 		for (int i = 0; i < PAGE_SIZE; ++i) {
 			ASSERT_EQ(ptr[i], 0);
 		}
@@ -78,19 +84,18 @@
 			spci_memory_region_get_constituents(memory_region);
 
 		ptr = (uint8_t *)constituents[0].address;
-		/* Relevant information read, mailbox can be cleared. */
-		spci_rx_release();
 
 		/* Check that one has access to the shared region. */
 		for (int i = 0; i < PAGE_SIZE; ++i) {
 			ptr[i]++;
 		}
 
-		spci_rx_release();
 		/* Give the memory back and notify the sender. */
 		msg_size = spci_memory_relinquish_init(
 			send_buf, HF_PRIMARY_VM_ID, constituents,
 			memory_region->constituent_count, 0);
+		/* Relevant information read, RX mailbox can be cleared. */
+		spci_rx_release();
 		spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
 			      msg_size, SPCI_MSG_SEND_LEGACY_MEMORY);
 
@@ -102,60 +107,23 @@
 	}
 }
 
-TEST_SERVICE(memory_return)
-{
-	/* Loop, giving memory back to the sender. */
-	for (;;) {
-		struct spci_value ret = spci_msg_wait();
-		uint8_t *ptr;
-
-		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
-
-		/* Check the memory was cleared. */
-		void *recv_buf = SERVICE_RECV_BUFFER();
-		ptr = *(uint8_t **)recv_buf;
-
-		for (int i = 0; i < PAGE_SIZE; ++i) {
-			ASSERT_EQ(ptr[i], 0);
-		}
-
-		/* Give the memory back and notify the sender. */
-		ASSERT_EQ(hf_share_memory(spci_msg_send_sender(ret),
-					  (hf_ipaddr_t)ptr, PAGE_SIZE,
-					  HF_MEMORY_GIVE),
-			  0);
-		spci_rx_release();
-		spci_msg_send(hf_vm_get_id(), spci_msg_send_sender(ret),
-			      sizeof(ptr), 0);
-
-		/*
-		 * Try and access the memory which will cause a fault unless the
-		 * memory has been shared back again.
-		 */
-		ptr[0] = 123;
-	}
-}
-
 TEST_SERVICE(give_memory_and_fault)
 {
-	uint8_t *ptr = page;
+	void *send_buf = SERVICE_SEND_BUFFER();
 
 	/* Give memory to the primary. */
-	ASSERT_EQ(hf_share_memory(HF_PRIMARY_VM_ID, (hf_ipaddr_t)&page,
-				  PAGE_SIZE, HF_MEMORY_GIVE),
-		  0);
-
-	/*
-	 * TODO: the address of the memory will be part of the proper API. That
-	 *       API is still to be agreed on so the address is passed
-	 *       explicitly to test the mechanism.
-	 */
-	memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, &ptr,
-		 sizeof(ptr));
-	EXPECT_EQ(
-		spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ptr), 0)
-			.func,
-		SPCI_SUCCESS_32);
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)&page, .page_count = 1},
+	};
+	uint32_t msg_size = spci_memory_init(
+		send_buf, SPCI_MEMORY_DONATE, HF_PRIMARY_VM_ID, constituents,
+		ARRAY_SIZE(constituents), 0, SPCI_MEMORY_REGION_FLAG_CLEAR,
+		SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+		SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Try using the memory that isn't valid unless it's been returned. */
 	page[16] = 123;
@@ -163,24 +131,21 @@
 
 TEST_SERVICE(lend_memory_and_fault)
 {
-	uint8_t *ptr = page;
+	void *send_buf = SERVICE_SEND_BUFFER();
 
 	/* Lend memory to the primary. */
-	ASSERT_EQ(hf_share_memory(HF_PRIMARY_VM_ID, (hf_ipaddr_t)&page,
-				  PAGE_SIZE, HF_MEMORY_LEND),
-		  0);
-
-	/*
-	 * TODO: the address of the memory will be part of the proper API. That
-	 *       API is still to be agreed on so the address is passed
-	 *       explicitly to test the mechanism.
-	 */
-	memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, &ptr,
-		 sizeof(ptr));
-	EXPECT_EQ(
-		spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ptr), 0)
-			.func,
-		SPCI_SUCCESS_32);
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)&page, .page_count = 1},
+	};
+	uint32_t msg_size = spci_memory_init(
+		send_buf, SPCI_MEMORY_LEND, HF_PRIMARY_VM_ID, constituents,
+		ARRAY_SIZE(constituents), 0, SPCI_MEMORY_REGION_FLAG_CLEAR,
+		SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+		SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg_size,
+				SPCI_MSG_SEND_LEGACY_MEMORY)
+			  .func,
+		  SPCI_SUCCESS_32);
 
 	/* Try using the memory that isn't valid unless it's been returned. */
 	page[633] = 180;
@@ -195,16 +160,16 @@
 		uint32_t msg_size;
 		void *recv_buf = SERVICE_RECV_BUFFER();
 		void *send_buf = SERVICE_SEND_BUFFER();
-		struct spci_memory_region *memory_region =
-			spci_get_memory_region(recv_buf);
-		struct spci_memory_region_constituent *constituents =
-			spci_memory_region_get_constituents(memory_region);
+		struct spci_memory_region *memory_region;
+		struct spci_memory_region_constituent *constituents;
 
 		EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
 		EXPECT_EQ(spci_msg_send_attributes(ret),
 			  SPCI_MSG_SEND_LEGACY_MEMORY);
-		spci_rx_release();
 
+		memory_region = spci_get_memory_region(recv_buf);
+		constituents =
+			spci_memory_region_get_constituents(memory_region);
 		ptr = (uint8_t *)constituents[0].address;
 
 		/* Check that one has access to the shared region. */
@@ -218,8 +183,12 @@
 			memory_region->constituent_count, 0, SPCI_MEMORY_RW_X,
 			SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
 			SPCI_MEMORY_OUTER_SHAREABLE);
-		spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
-			      msg_size, SPCI_MSG_SEND_LEGACY_MEMORY);
+		spci_rx_release();
+		EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
+					spci_msg_send_sender(ret), msg_size,
+					SPCI_MSG_SEND_LEGACY_MEMORY)
+				  .func,
+			  SPCI_SUCCESS_32);
 
 		/*
 		 * Try and access the memory which will cause a fault unless the
@@ -234,17 +203,17 @@
 	struct spci_value ret = spci_msg_wait();
 	uint8_t *ptr;
 	void *recv_buf = SERVICE_RECV_BUFFER();
-	struct spci_memory_region *memory_region =
-		spci_get_memory_region(recv_buf);
-	struct spci_memory_region_constituent *constituents =
-		spci_memory_region_get_constituents(memory_region);
+	struct spci_memory_region *memory_region;
+	struct spci_memory_region_constituent *constituents;
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
 	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
-	spci_rx_release();
-
+	memory_region = spci_get_memory_region(recv_buf);
+	constituents = spci_memory_region_get_constituents(memory_region);
 	ptr = (uint8_t *)constituents[0].address;
 
+	spci_rx_release();
+
 	/* Check that one cannot access out of bounds after donated region. */
 	ptr[PAGE_SIZE]++;
 }
@@ -254,23 +223,23 @@
 	struct spci_value ret = spci_msg_wait();
 	uint8_t *ptr;
 	void *recv_buf = SERVICE_RECV_BUFFER();
-	struct spci_memory_region *memory_region =
-		spci_get_memory_region(recv_buf);
-	struct spci_memory_region_constituent *constituents =
-		spci_memory_region_get_constituents(memory_region);
+	struct spci_memory_region *memory_region;
+	struct spci_memory_region_constituent *constituents;
 
 	EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
 	EXPECT_EQ(spci_msg_send_attributes(ret), SPCI_MSG_SEND_LEGACY_MEMORY);
-	spci_rx_release();
-
+	memory_region = spci_get_memory_region(recv_buf);
+	constituents = spci_memory_region_get_constituents(memory_region);
 	ptr = (uint8_t *)constituents[0].address;
 
+	spci_rx_release();
+
 	/* Check that one cannot access out of bounds before donated region. */
 	ptr[-1]++;
 }
 
 /**
- * SPCI: Attempt to donate memory and then modify.
+ * Attempt to donate memory and then modify.
  */
 TEST_SERVICE(spci_donate_secondary_and_fault)
 {
@@ -308,7 +277,7 @@
 }
 
 /**
- * SPCI: Attempt to donate memory twice from VM.
+ * Attempt to donate memory twice from VM.
  */
 TEST_SERVICE(spci_donate_twice)
 {
@@ -354,8 +323,8 @@
 }
 
 /**
- * SPCI: Continually receive memory, check if we have access
- * and ensure it is not changed by a third party.
+ * Continually receive memory, check if we have access and ensure it is not
+ * changed by a third party.
  */
 TEST_SERVICE(spci_memory_receive)
 {
@@ -384,7 +353,7 @@
 }
 
 /**
- * SPCI: Receive memory and attempt to donate from primary VM.
+ * Receive memory and attempt to donate from primary VM.
  */
 TEST_SERVICE(spci_donate_invalid_source)
 {
@@ -443,21 +412,23 @@
 		EXPECT_EQ(spci_msg_send_attributes(ret),
 			  SPCI_MSG_SEND_LEGACY_MEMORY);
 		ptr = (uint8_t *)constituents[0].address;
-		/* Relevant information read, mailbox can be cleared. */
-		spci_rx_release();
 
 		/* Check that one has access to the shared region. */
 		for (int i = 0; i < PAGE_SIZE; ++i) {
 			ptr[i]++;
 		}
 
-		spci_rx_release();
 		/* Give the memory back and notify the sender. */
 		msg_size = spci_memory_relinquish_init(
 			send_buf, HF_PRIMARY_VM_ID, constituents,
 			memory_region->constituent_count, 0);
-		spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
-			      msg_size, SPCI_MSG_SEND_LEGACY_MEMORY);
+		/* Relevant information read, mailbox can be cleared. */
+		spci_rx_release();
+		EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
+					spci_msg_send_sender(ret), msg_size,
+					SPCI_MSG_SEND_LEGACY_MEMORY)
+				  .func,
+			  SPCI_SUCCESS_32);
 
 		/*
 		 * Try and access the memory which will cause a fault unless the
@@ -468,7 +439,7 @@
 }
 
 /**
- * SPCI: Ensure that we can't relinquish donated memory.
+ * Ensure that we can't relinquish donated memory.
  */
 TEST_SERVICE(spci_memory_donate_relinquish)
 {
@@ -512,7 +483,7 @@
 }
 
 /**
- * SPCI: Receive memory and attempt to donate from primary VM.
+ * Receive memory and attempt to donate from primary VM.
  */
 TEST_SERVICE(spci_lend_invalid_source)
 {
@@ -572,7 +543,7 @@
 }
 
 /**
- * SPCI: Attempt to execute an instruction from the lent memory.
+ * Attempt to execute an instruction from the lent memory.
  */
 TEST_SERVICE(spci_memory_lend_relinquish_X)
 {
@@ -615,7 +586,7 @@
 }
 
 /**
- * SPCI: Attempt to read and write to a shared page.
+ * Attempt to read and write to a shared page.
  */
 TEST_SERVICE(spci_memory_lend_relinquish_RW)
 {
@@ -663,7 +634,7 @@
 }
 
 /**
- * SPCI: Attempt to modify below the lower bound for the lent memory.
+ * Attempt to modify below the lower bound for the lent memory.
  */
 TEST_SERVICE(spci_lend_check_lower_bound)
 {
@@ -687,7 +658,7 @@
 }
 
 /**
- * SPCI: Attempt to modify above the upper bound for the lent memory.
+ * Attempt to modify above the upper bound for the lent memory.
  */
 TEST_SERVICE(spci_lend_check_upper_bound)
 {