Support fragmented memory sharing messages.

Bug: 132420445
Change-Id: I638f7fece9a8f83976c0e9ff2fd3ad66dac3ad25
diff --git a/inc/hf/api.h b/inc/hf/api.h
index bfeed7c..be825fd 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -71,3 +71,11 @@
 struct ffa_value api_ffa_mem_reclaim(ffa_memory_handle_t handle,
 				     ffa_memory_region_flags_t flags,
 				     struct vcpu *current);
+struct ffa_value api_ffa_mem_frag_rx(ffa_memory_handle_t handle,
+				     uint32_t fragment_offset,
+				     ffa_vm_id_t sender_vm_id,
+				     struct vcpu *current);
+struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle,
+				     uint32_t fragment_length,
+				     ffa_vm_id_t sender_vm_id,
+				     struct vcpu *current);
diff --git a/inc/hf/ffa_memory.h b/inc/hf/ffa_memory.h
index 9715918..3637980 100644
--- a/inc/hf/ffa_memory.h
+++ b/inc/hf/ffa_memory.h
@@ -30,17 +30,34 @@
 	struct vm_locked from_locked, struct vm_locked to_locked,
 	struct ffa_memory_region *memory_region, uint32_t memory_share_length,
 	uint32_t fragment_length, uint32_t share_func, struct mpool *page_pool);
+struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
+					  void *fragment,
+					  uint32_t fragment_length,
+					  ffa_memory_handle_t handle,
+					  struct mpool *page_pool);
+struct ffa_value ffa_memory_tee_send_continue(struct vm_locked from_locked,
+					      struct vm_locked to_locked,
+					      void *fragment,
+					      uint32_t fragment_length,
+					      ffa_memory_handle_t handle,
+					      struct mpool *page_pool);
 struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
 				     struct ffa_memory_region *retrieve_request,
 				     uint32_t retrieve_request_length,
 				     struct mpool *page_pool);
+struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
+					      ffa_memory_handle_t handle,
+					      uint32_t fragment_offset,
+					      struct mpool *page_pool);
 struct ffa_value ffa_memory_relinquish(
 	struct vm_locked from_locked,
 	struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool);
 struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
-				    ffa_memory_handle_t handle, bool clear,
+				    ffa_memory_handle_t handle,
+				    ffa_memory_region_flags_t flags,
 				    struct mpool *page_pool);
 struct ffa_value ffa_memory_tee_reclaim(struct vm_locked to_locked,
+					struct vm_locked from_locked,
 					ffa_memory_handle_t handle,
-					struct ffa_memory_region *memory_region,
-					bool clear, struct mpool *page_pool);
+					ffa_memory_region_flags_t flags,
+					struct mpool *page_pool);
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index 1416f77..204a248 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -175,6 +175,26 @@
 					   .arg3 = flags});
 }
 
+static inline struct ffa_value ffa_mem_frag_rx(ffa_memory_handle_t handle,
+					       uint32_t fragment_offset)
+{
+	/* Note that sender MBZ at virtual instance. */
+	return ffa_call((struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
+					   .arg1 = (uint32_t)handle,
+					   .arg2 = (uint32_t)(handle >> 32),
+					   .arg3 = fragment_offset});
+}
+
+static inline struct ffa_value ffa_mem_frag_tx(ffa_memory_handle_t handle,
+					       uint32_t fragment_length)
+{
+	/* Note that sender MBZ at virtual instance. */
+	return ffa_call((struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
+					   .arg1 = (uint32_t)handle,
+					   .arg2 = (uint32_t)(handle >> 32),
+					   .arg3 = fragment_length});
+}
+
 /**
  * Called by secondary VMs to receive a message. This will block until a message
  * is received.
diff --git a/inc/vmapi/hf/ffa.h b/inc/vmapi/hf/ffa.h
index f7d7356..e6a3015 100644
--- a/inc/vmapi/hf/ffa.h
+++ b/inc/vmapi/hf/ffa.h
@@ -56,6 +56,8 @@
 #define FFA_MEM_RETRIEVE_RESP_32     0x84000075
 #define FFA_MEM_RELINQUISH_32        0x84000076
 #define FFA_MEM_RECLAIM_32           0x84000077
+#define FFA_MEM_FRAG_RX_32           0x8400007A
+#define FFA_MEM_FRAG_TX_32           0x8400007B
 
 /* FF-A error codes. */
 #define FFA_NOT_SUPPORTED      INT32_C(-1)
@@ -194,6 +196,7 @@
 	((ffa_memory_handle_t)(UINT64_C(1) << 63))
 #define FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR \
 	((ffa_memory_handle_t)(UINT64_C(1) << 63))
+#define FFA_MEMORY_HANDLE_INVALID (~UINT64_C(0))
 
 /** The ID of a VM. These are assigned sequentially starting with an offset. */
 typedef uint16_t ffa_vm_id_t;
@@ -261,6 +264,11 @@
 	return ffa_assemble_handle(args.arg2, args.arg3);
 }
 
+static inline ffa_memory_handle_t ffa_frag_handle(struct ffa_value args)
+{
+	return ffa_assemble_handle(args.arg1, args.arg2);
+}
+
 static inline struct ffa_value ffa_mem_success(ffa_memory_handle_t handle)
 {
 	return (struct ffa_value){.func = FFA_SUCCESS_32,
@@ -284,6 +292,11 @@
 	return ((uint32_t)vm_id << 16) | vcpu_index;
 }
 
+static inline ffa_vm_id_t ffa_frag_sender(struct ffa_value args)
+{
+	return (args.arg4 >> 16) & 0xffff;
+}
+
 /**
  * A set of contiguous pages which is part of a memory region. This corresponds
  * to table 40 of the FF-A 1.0 EAC specification, "Constituent memory region
@@ -469,14 +482,15 @@
 }
 
 uint32_t ffa_memory_region_init(
-	struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
-	ffa_vm_id_t receiver,
+	struct ffa_memory_region *memory_region, size_t memory_region_max_size,
+	ffa_vm_id_t sender, ffa_vm_id_t receiver,
 	const struct ffa_memory_region_constituent constituents[],
 	uint32_t constituent_count, uint32_t tag,
 	ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
 	enum ffa_instruction_access instruction_access,
 	enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
-	enum ffa_memory_shareability shareability);
+	enum ffa_memory_shareability shareability, uint32_t *fragment_length,
+	uint32_t *total_length);
 uint32_t ffa_memory_retrieve_request_init(
 	struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
 	ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
@@ -487,10 +501,17 @@
 uint32_t ffa_memory_lender_retrieve_request_init(
 	struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
 	ffa_vm_id_t sender);
-uint32_t ffa_retrieved_memory_region_init(
+bool ffa_retrieved_memory_region_init(
 	struct ffa_memory_region *response, size_t response_max_size,
 	ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
 	ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
 	ffa_vm_id_t receiver, ffa_memory_access_permissions_t permissions,
+	uint32_t page_count, uint32_t total_constituent_count,
 	const struct ffa_memory_region_constituent constituents[],
-	uint32_t constituent_count);
+	uint32_t fragment_constituent_count, uint32_t *total_length,
+	uint32_t *fragment_length);
+uint32_t ffa_memory_fragment_init(
+	struct ffa_memory_region_constituent *fragment,
+	size_t fragment_max_size,
+	const struct ffa_memory_region_constituent constituents[],
+	uint32_t constituent_count, uint32_t *fragment_length);
diff --git a/src/api.c b/src/api.c
index 41ccdac..d95c5c1 100644
--- a/src/api.c
+++ b/src/api.c
@@ -1473,8 +1473,20 @@
 		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
-	if (fragment_length != length) {
-		dlog_verbose("Fragmentation not yet supported.\n");
+	if (fragment_length > length) {
+		dlog_verbose(
+			"Fragment length %d greater than total length %d.\n",
+			fragment_length, length);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+	if (fragment_length < sizeof(struct ffa_memory_region) +
+				      sizeof(struct ffa_memory_access)) {
+		dlog_verbose(
+			"Initial fragment length %d smaller than header size "
+			"%d.\n",
+			fragment_length,
+			sizeof(struct ffa_memory_region) +
+				sizeof(struct ffa_memory_access));
 		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
@@ -1706,74 +1718,6 @@
 	return ret;
 }
 
-static struct ffa_value ffa_mem_reclaim_tee(struct vm_locked to_locked,
-					    struct vm_locked from_locked,
-					    ffa_memory_handle_t handle,
-					    ffa_memory_region_flags_t flags,
-					    struct cpu *cpu)
-{
-	struct ffa_value tee_ret;
-	uint32_t length;
-	uint32_t fragment_length;
-	struct ffa_memory_region *memory_region =
-		(struct ffa_memory_region *)cpu_get_buffer(cpu);
-	uint32_t message_buffer_size = cpu_get_buffer_size(cpu);
-	uint32_t request_length = ffa_memory_lender_retrieve_request_init(
-		from_locked.vm->mailbox.recv, handle, to_locked.vm->id);
-
-	CHECK(request_length <= HF_MAILBOX_SIZE);
-
-	/* Retrieve memory region information from the TEE. */
-	tee_ret = arch_tee_call(
-		(struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
-				   .arg1 = request_length,
-				   .arg2 = request_length});
-	if (tee_ret.func == FFA_ERROR_32) {
-		dlog_verbose("Got error %d from EL3.\n", tee_ret.arg2);
-		return tee_ret;
-	}
-	if (tee_ret.func != FFA_MEM_RETRIEVE_RESP_32) {
-		dlog_verbose(
-			"Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n",
-			tee_ret.func);
-		return ffa_error(FFA_INVALID_PARAMETERS);
-	}
-
-	length = tee_ret.arg1;
-	fragment_length = tee_ret.arg2;
-
-	if (fragment_length > HF_MAILBOX_SIZE ||
-	    fragment_length > message_buffer_size) {
-		dlog_verbose("Invalid fragment length %d (max %d).\n", length,
-			     HF_MAILBOX_SIZE);
-		return ffa_error(FFA_INVALID_PARAMETERS);
-	}
-
-	/* TODO: Support fragmentation. */
-	if (fragment_length != length) {
-		dlog_verbose(
-			"Message fragmentation not yet supported (fragment "
-			"length %d but length %d).\n",
-			fragment_length, length);
-		return ffa_error(FFA_INVALID_PARAMETERS);
-	}
-
-	/*
-	 * Copy the memory region descriptor to an internal buffer, so that the
-	 * sender can't change it underneath us.
-	 */
-	memcpy_s(memory_region, message_buffer_size,
-		 from_locked.vm->mailbox.send, fragment_length);
-
-	/*
-	 * Validate that transition is allowed (e.g. that caller is owner),
-	 * forward the reclaim request to the TEE, and update page tables.
-	 */
-	return ffa_memory_tee_reclaim(to_locked, handle, memory_region,
-				      flags & FFA_MEM_RECLAIM_CLEAR,
-				      &api_page_pool);
-}
-
 struct ffa_value api_ffa_mem_reclaim(ffa_memory_handle_t handle,
 				     ffa_memory_region_flags_t flags,
 				     struct vcpu *current)
@@ -1785,8 +1729,7 @@
 	    FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
 		struct vm_locked to_locked = vm_lock(to);
 
-		ret = ffa_memory_reclaim(to_locked, handle,
-					 flags & FFA_MEM_RECLAIM_CLEAR,
+		ret = ffa_memory_reclaim(to_locked, handle, flags,
 					 &api_page_pool);
 
 		vm_unlock(&to_locked);
@@ -1794,9 +1737,144 @@
 		struct vm *from = vm_find(HF_TEE_VM_ID);
 		struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
 
-		ret = ffa_mem_reclaim_tee(vm_to_from_lock.vm1,
-					  vm_to_from_lock.vm2, handle, flags,
-					  current->cpu);
+		ret = ffa_memory_tee_reclaim(vm_to_from_lock.vm1,
+					     vm_to_from_lock.vm2, handle, flags,
+					     &api_page_pool);
+
+		vm_unlock(&vm_to_from_lock.vm1);
+		vm_unlock(&vm_to_from_lock.vm2);
+	}
+
+	return ret;
+}
+
+struct ffa_value api_ffa_mem_frag_rx(ffa_memory_handle_t handle,
+				     uint32_t fragment_offset,
+				     ffa_vm_id_t sender_vm_id,
+				     struct vcpu *current)
+{
+	struct vm *to = current->vm;
+	struct vm_locked to_locked;
+	struct ffa_value ret;
+
+	/* Sender ID MBZ at virtual instance. */
+	if (sender_vm_id != 0) {
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	to_locked = vm_lock(to);
+
+	if (msg_receiver_busy(to_locked, NULL, false)) {
+		/*
+		 * Can't retrieve memory information if the mailbox is not
+		 * available.
+		 */
+		dlog_verbose("RX buffer not ready.\n");
+		ret = ffa_error(FFA_BUSY);
+		goto out;
+	}
+
+	ret = ffa_memory_retrieve_continue(to_locked, handle, fragment_offset,
+					   &api_page_pool);
+
+out:
+	vm_unlock(&to_locked);
+	return ret;
+}
+
+struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle,
+				     uint32_t fragment_length,
+				     ffa_vm_id_t sender_vm_id,
+				     struct vcpu *current)
+{
+	struct vm *from = current->vm;
+	const void *from_msg;
+	void *fragment_copy;
+	struct ffa_value ret;
+
+	/* Sender ID MBZ at virtual instance. */
+	if (sender_vm_id != 0) {
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	/*
+	 * Check that the sender has configured its send buffer. If the TX
+	 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
+	 * be safely accessed after releasing the lock since the TX mailbox
+	 * address can only be configured once.
+	 */
+	sl_lock(&from->lock);
+	from_msg = from->mailbox.send;
+	sl_unlock(&from->lock);
+
+	if (from_msg == NULL) {
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	/*
+	 * Copy the fragment to a fresh page from the memory pool. This prevents
+	 * the sender from changing it underneath us, and also lets us keep it
+	 * around in the share state table if needed.
+	 */
+	if (fragment_length > HF_MAILBOX_SIZE ||
+	    fragment_length > MM_PPOOL_ENTRY_SIZE) {
+		dlog_verbose(
+			"Fragment length %d larger than mailbox size %d.\n",
+			fragment_length, HF_MAILBOX_SIZE);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+	if (fragment_length < sizeof(struct ffa_memory_region_constituent) ||
+	    fragment_length % sizeof(struct ffa_memory_region_constituent) !=
+		    0) {
+		dlog_verbose("Invalid fragment length %d.\n", fragment_length);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+	fragment_copy = mpool_alloc(&api_page_pool);
+	if (fragment_copy == NULL) {
+		dlog_verbose("Failed to allocate fragment copy.\n");
+		return ffa_error(FFA_NO_MEMORY);
+	}
+	memcpy_s(fragment_copy, MM_PPOOL_ENTRY_SIZE, from_msg, fragment_length);
+
+	/*
+	 * Hafnium doesn't support fragmentation of memory retrieve requests
+	 * (because it doesn't support caller-specified mappings, so a request
+	 * will never be larger than a single page), so this must be part of a
+	 * memory send (i.e. donate, lend or share) request.
+	 *
+	 * We can tell from the handle whether the memory transaction is for the
+	 * TEE or not.
+	 */
+	if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
+	    FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
+		struct vm_locked from_locked = vm_lock(from);
+
+		ret = ffa_memory_send_continue(from_locked, fragment_copy,
+					       fragment_length, handle,
+					       &api_page_pool);
+		/*
+		 * `ffa_memory_send_continue` takes ownership of the
+		 * fragment_copy, so we don't need to free it here.
+		 */
+		vm_unlock(&from_locked);
+	} else {
+		struct vm *to = vm_find(HF_TEE_VM_ID);
+		struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
+
+		/*
+		 * The TEE RX buffer state is checked in
+		 * `ffa_memory_tee_send_continue` rather than here, as we need
+		 * to return `FFA_MEM_FRAG_RX` with the current offset rather
+		 * than FFA_ERROR FFA_BUSY in case it is busy.
+		 */
+
+		ret = ffa_memory_tee_send_continue(
+			vm_to_from_lock.vm2, vm_to_from_lock.vm1, fragment_copy,
+			fragment_length, handle, &api_page_pool);
+		/*
+		 * `ffa_memory_tee_send_continue` takes ownership of the
+		 * fragment_copy, so we don't need to free it here.
+		 */
 
 		vm_unlock(&vm_to_from_lock.vm1);
 		vm_unlock(&vm_to_from_lock.vm2);
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index c67ede0..9a48895 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -382,6 +382,16 @@
 			ffa_assemble_handle(args->arg1, args->arg2), args->arg3,
 			current());
 		return true;
+	case FFA_MEM_FRAG_RX_32:
+		*args = api_ffa_mem_frag_rx(ffa_frag_handle(*args), args->arg3,
+					    (args->arg4 >> 16) & 0xffff,
+					    current());
+		return true;
+	case FFA_MEM_FRAG_TX_32:
+		*args = api_ffa_mem_frag_tx(ffa_frag_handle(*args), args->arg3,
+					    (args->arg4 >> 16) & 0xffff,
+					    current());
+		return true;
 	}
 
 	return false;
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index 47f45f5..849a7cb 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -36,6 +36,12 @@
  */
 #define MAX_MEM_SHARES 100
 
+/**
+ * The maximum number of fragments into which a memory sharing message may be
+ * broken.
+ */
+#define MAX_FRAGMENTS 20
+
 static_assert(sizeof(struct ffa_memory_region_constituent) % 16 == 0,
 	      "struct ffa_memory_region_constituent must be a multiple of 16 "
 	      "bytes long.");
@@ -53,12 +59,25 @@
 	      "bytes long.");
 
 struct ffa_memory_share_state {
+	ffa_memory_handle_t handle;
+
 	/**
 	 * The memory region being shared, or NULL if this share state is
 	 * unallocated.
 	 */
 	struct ffa_memory_region *memory_region;
 
+	struct ffa_memory_region_constituent *fragments[MAX_FRAGMENTS];
+
+	/** The number of constituents in each fragment. */
+	uint32_t fragment_constituent_counts[MAX_FRAGMENTS];
+
+	/**
+	 * The number of valid elements in the `fragments` and
+	 * `fragment_constituent_counts` arrays.
+	 */
+	uint32_t fragment_count;
+
 	/**
 	 * The FF-A function used for sharing the memory. Must be one of
 	 * FFA_MEM_DONATE_32, FFA_MEM_LEND_32 or FFA_MEM_SHARE_32 if the
@@ -67,12 +86,18 @@
 	uint32_t share_func;
 
 	/**
-	 * Whether each recipient has retrieved the memory region yet. The order
-	 * of this array matches the order of the attribute descriptors in the
-	 * memory region descriptor. Any entries beyond the attribute_count will
-	 * always be false.
+	 * True if all the fragments of this sharing request have been sent and
+	 * Hafnium has updated the sender page table accordingly.
 	 */
-	bool retrieved[MAX_MEM_SHARE_RECIPIENTS];
+	bool sending_complete;
+
+	/**
+	 * How many fragments of the memory region each recipient has retrieved
+	 * so far. The order of this array matches the order of the endpoint
+	 * memory access descriptors in the memory region descriptor. Any
+	 * entries beyond the receiver_count will always be 0.
+	 */
+	uint32_t retrieved_fragment_count[MAX_MEM_SHARE_RECIPIENTS];
 };
 
 /**
@@ -90,36 +115,69 @@
 static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
 
 /**
- * Initialises the next available `struct ffa_memory_share_state` and sets
- * `handle` to its handle. Returns true on succes or false if none are
- * available.
+ * Buffer for retrieving memory region information from the TEE for when a
+ * region is reclaimed by a VM. Access to this buffer must be guarded by the VM
+ * lock of the TEE VM.
  */
-static bool allocate_share_state(uint32_t share_func,
-				 struct ffa_memory_region *memory_region,
-				 ffa_memory_handle_t *handle)
+alignas(PAGE_SIZE) static uint8_t
+	tee_retrieve_buffer[HF_MAILBOX_SIZE * MAX_FRAGMENTS];
+
+/**
+ * Initialises the next available `struct ffa_memory_share_state` and sets
+ * `share_state_ret` to a pointer to it. If `handle` is
+ * `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle, otherwise
+ * uses the provided handle which is assumed to be globally unique.
+ *
+ * Returns true on success or false if none are available.
+ */
+static bool allocate_share_state(
+	struct share_states_locked share_states, uint32_t share_func,
+	struct ffa_memory_region *memory_region, uint32_t fragment_length,
+	ffa_memory_handle_t handle,
+	struct ffa_memory_share_state **share_state_ret)
 {
 	uint64_t i;
 
+	CHECK(share_states.share_states != NULL);
 	CHECK(memory_region != NULL);
 
-	sl_lock(&share_states_lock_instance);
 	for (i = 0; i < MAX_MEM_SHARES; ++i) {
-		if (share_states[i].share_func == 0) {
+		if (share_states.share_states[i].share_func == 0) {
 			uint32_t j;
 			struct ffa_memory_share_state *allocated_state =
-				&share_states[i];
+				&share_states.share_states[i];
+			struct ffa_composite_memory_region *composite =
+				ffa_memory_region_get_composite(memory_region,
+								0);
+
+			if (handle == FFA_MEMORY_HANDLE_INVALID) {
+				allocated_state->handle =
+					i |
+					FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
+			} else {
+				allocated_state->handle = handle;
+			}
 			allocated_state->share_func = share_func;
 			allocated_state->memory_region = memory_region;
+			allocated_state->fragment_count = 1;
+			allocated_state->fragments[0] = composite->constituents;
+			allocated_state->fragment_constituent_counts[0] =
+				(fragment_length -
+				 ffa_composite_constituent_offset(memory_region,
+								  0)) /
+				sizeof(struct ffa_memory_region_constituent);
+			allocated_state->sending_complete = false;
 			for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) {
-				allocated_state->retrieved[j] = false;
+				allocated_state->retrieved_fragment_count[j] =
+					0;
 			}
-			*handle = i | FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
-			sl_unlock(&share_states_lock_instance);
+			if (share_state_ret != NULL) {
+				*share_state_ret = allocated_state;
+			}
 			return true;
 		}
 	}
 
-	sl_unlock(&share_states_lock_instance);
 	return false;
 }
 
@@ -140,29 +198,47 @@
 }
 
 /**
- * If the given handle is a valid handle for an allocated share state then takes
- * the lock, initialises `share_state_locked` to point to the share state and
- * returns true. Otherwise returns false and doesn't take the lock.
+ * If the given handle is a valid handle for an allocated share state then
+ * initialises `share_state_ret` to point to the share state and returns true.
+ * Otherwise returns false.
  */
 static bool get_share_state(struct share_states_locked share_states,
 			    ffa_memory_handle_t handle,
 			    struct ffa_memory_share_state **share_state_ret)
 {
 	struct ffa_memory_share_state *share_state;
-	uint32_t index = handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
+	uint32_t index;
 
-	if (index >= MAX_MEM_SHARES) {
-		return false;
+	CHECK(share_states.share_states != NULL);
+	CHECK(share_state_ret != NULL);
+
+	/*
+	 * First look for a share_state allocated by us, in which case the
+	 * handle is based on the index.
+	 */
+	if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
+	    FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
+		index = handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
+		if (index < MAX_MEM_SHARES) {
+			share_state = &share_states.share_states[index];
+			if (share_state->share_func != 0) {
+				*share_state_ret = share_state;
+				return true;
+			}
+		}
 	}
 
-	share_state = &share_states.share_states[index];
-
-	if (share_state->share_func == 0) {
-		return false;
+	/* Fall back to a linear scan. */
+	for (index = 0; index < MAX_MEM_SHARES; ++index) {
+		share_state = &share_states.share_states[index];
+		if (share_state->handle == handle &&
+		    share_state->share_func != 0) {
+			*share_state_ret = share_state;
+			return true;
+		}
 	}
 
-	*share_state_ret = share_state;
-	return true;
+	return false;
 }
 
 /** Marks a share state as unallocated. */
@@ -170,31 +246,86 @@
 			     struct ffa_memory_share_state *share_state,
 			     struct mpool *page_pool)
 {
+	uint32_t i;
+
 	CHECK(share_states.share_states != NULL);
 	share_state->share_func = 0;
+	share_state->sending_complete = false;
 	mpool_free(page_pool, share_state->memory_region);
+	/*
+	 * First fragment is part of the same page as the `memory_region`, so it
+	 * doesn't need to be freed separately.
+	 */
+	share_state->fragments[0] = NULL;
+	share_state->fragment_constituent_counts[0] = 0;
+	for (i = 1; i < share_state->fragment_count; ++i) {
+		mpool_free(page_pool, share_state->fragments[i]);
+		share_state->fragments[i] = NULL;
+		share_state->fragment_constituent_counts[i] = 0;
+	}
+	share_state->fragment_count = 0;
 	share_state->memory_region = NULL;
 }
 
-/**
- * Marks the share state with the given handle as unallocated, or returns false
- * if the handle was invalid.
- */
-static bool share_state_free_handle(ffa_memory_handle_t handle,
-				    struct mpool *page_pool)
+/** Checks whether the given share state has been fully sent. */
+static bool share_state_sending_complete(
+	struct share_states_locked share_states,
+	struct ffa_memory_share_state *share_state)
 {
-	struct share_states_locked share_states = share_states_lock();
-	struct ffa_memory_share_state *share_state;
+	struct ffa_composite_memory_region *composite;
+	uint32_t expected_constituent_count;
+	uint32_t fragment_constituent_count_total = 0;
+	uint32_t i;
 
-	if (!get_share_state(share_states, handle, &share_state)) {
-		share_states_unlock(&share_states);
-		return false;
+	/* Lock must be held. */
+	CHECK(share_states.share_states != NULL);
+
+	/*
+	 * Share state must already be valid, or it's not possible to get hold
+	 * of it.
+	 */
+	CHECK(share_state->memory_region != NULL &&
+	      share_state->share_func != 0);
+
+	composite =
+		ffa_memory_region_get_composite(share_state->memory_region, 0);
+	expected_constituent_count = composite->constituent_count;
+	for (i = 0; i < share_state->fragment_count; ++i) {
+		fragment_constituent_count_total +=
+			share_state->fragment_constituent_counts[i];
+	}
+	dlog_verbose(
+		"Checking completion: constituent count %d/%d from %d "
+		"fragments.\n",
+		fragment_constituent_count_total, expected_constituent_count,
+		share_state->fragment_count);
+
+	return fragment_constituent_count_total == expected_constituent_count;
+}
+
+/**
+ * Calculates the offset of the next fragment expected for the given share
+ * state.
+ */
+static uint32_t share_state_next_fragment_offset(
+	struct share_states_locked share_states,
+	struct ffa_memory_share_state *share_state)
+{
+	uint32_t next_fragment_offset;
+	uint32_t i;
+
+	/* Lock must be held. */
+	CHECK(share_states.share_states != NULL);
+
+	next_fragment_offset =
+		ffa_composite_constituent_offset(share_state->memory_region, 0);
+	for (i = 0; i < share_state->fragment_count; ++i) {
+		next_fragment_offset +=
+			share_state->fragment_constituent_counts[i] *
+			sizeof(struct ffa_memory_region_constituent);
 	}
 
-	share_state_free(share_states, share_state, page_pool);
-	share_states_unlock(&share_states);
-
-	return true;
+	return next_fragment_offset;
 }
 
 static void dump_memory_region(struct ffa_memory_region *memory_region)
@@ -236,7 +367,7 @@
 	sl_lock(&share_states_lock_instance);
 	for (i = 0; i < MAX_MEM_SHARES; ++i) {
 		if (share_states[i].share_func != 0) {
-			dlog("%d: ", i);
+			dlog("%#x: ", share_states[i].handle);
 			switch (share_states[i].share_func) {
 			case FFA_MEM_SHARE_32:
 				dlog("SHARE");
@@ -253,11 +384,14 @@
 			}
 			dlog(" (");
 			dump_memory_region(share_states[i].memory_region);
-			if (share_states[i].retrieved[0]) {
-				dlog("): retrieved\n");
+			if (share_states[i].sending_complete) {
+				dlog("): fully sent");
 			} else {
-				dlog("): not retrieved\n");
+				dlog("): partially sent");
 			}
+			dlog(" with %d fragments, %d retrieved\n",
+			     share_states[i].fragment_count,
+			     share_states[i].retrieved_fragment_count[0]);
 			break;
 		}
 	}
@@ -303,12 +437,13 @@
  */
 static struct ffa_value constituents_get_mode(
 	struct vm_locked vm, uint32_t *orig_mode,
-	struct ffa_memory_region_constituent *constituents,
-	uint32_t constituent_count)
+	struct ffa_memory_region_constituent **fragments,
+	const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
 {
 	uint32_t i;
+	uint32_t j;
 
-	if (constituent_count == 0) {
+	if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
 		/*
 		 * Fail if there are no constituents. Otherwise we would get an
 		 * uninitialised *orig_mode.
@@ -316,34 +451,43 @@
 		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
-	for (i = 0; i < constituent_count; ++i) {
-		ipaddr_t begin = ipa_init(constituents[i].address);
-		size_t size = constituents[i].page_count * PAGE_SIZE;
-		ipaddr_t end = ipa_add(begin, size);
-		uint32_t current_mode;
+	for (i = 0; i < fragment_count; ++i) {
+		for (j = 0; j < fragment_constituent_counts[i]; ++j) {
+			ipaddr_t begin = ipa_init(fragments[i][j].address);
+			size_t size = fragments[i][j].page_count * PAGE_SIZE;
+			ipaddr_t end = ipa_add(begin, size);
+			uint32_t current_mode;
 
-		/* Fail if addresses are not page-aligned. */
-		if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
-		    !is_aligned(ipa_addr(end), PAGE_SIZE)) {
-			return ffa_error(FFA_INVALID_PARAMETERS);
-		}
+			/* Fail if addresses are not page-aligned. */
+			if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
+			    !is_aligned(ipa_addr(end), PAGE_SIZE)) {
+				return ffa_error(FFA_INVALID_PARAMETERS);
+			}
 
-		/*
-		 * Ensure that this constituent memory range is all mapped with
-		 * the same mode.
-		 */
-		if (!mm_vm_get_mode(&vm.vm->ptable, begin, end,
-				    &current_mode)) {
-			return ffa_error(FFA_DENIED);
-		}
+			/*
+			 * Ensure that this constituent memory range is all
+			 * mapped with the same mode.
+			 */
+			if (!mm_vm_get_mode(&vm.vm->ptable, begin, end,
+					    &current_mode)) {
+				return ffa_error(FFA_DENIED);
+			}
 
-		/*
-		 * Ensure that all constituents are mapped with the same mode.
-		 */
-		if (i == 0) {
-			*orig_mode = current_mode;
-		} else if (current_mode != *orig_mode) {
-			return ffa_error(FFA_DENIED);
+			/*
+			 * Ensure that all constituents are mapped with the same
+			 * mode.
+			 */
+			if (i == 0) {
+				*orig_mode = current_mode;
+			} else if (current_mode != *orig_mode) {
+				dlog_verbose(
+					"Expected mode %#x but was %#x for %d "
+					"pages at %#x.\n",
+					*orig_mode, current_mode,
+					fragments[i][j].page_count,
+					ipa_addr(begin));
+				return ffa_error(FFA_DENIED);
+			}
 		}
 	}
 
@@ -367,8 +511,9 @@
 static struct ffa_value ffa_send_check_transition(
 	struct vm_locked from, uint32_t share_func,
 	ffa_memory_access_permissions_t permissions, uint32_t *orig_from_mode,
-	struct ffa_memory_region_constituent *constituents,
-	uint32_t constituent_count, uint32_t *from_mode)
+	struct ffa_memory_region_constituent **fragments,
+	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
+	uint32_t *from_mode)
 {
 	const uint32_t state_mask =
 		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
@@ -376,9 +521,11 @@
 		ffa_memory_permissions_to_mode(permissions);
 	struct ffa_value ret;
 
-	ret = constituents_get_mode(from, orig_from_mode, constituents,
-				    constituent_count);
+	ret = constituents_get_mode(from, orig_from_mode, fragments,
+				    fragment_constituent_counts,
+				    fragment_count);
 	if (ret.func != FFA_SUCCESS_32) {
+		dlog_verbose("Inconsistent modes.\n", fragment_count);
 		return ret;
 	}
 
@@ -429,16 +576,18 @@
 
 static struct ffa_value ffa_relinquish_check_transition(
 	struct vm_locked from, uint32_t *orig_from_mode,
-	struct ffa_memory_region_constituent *constituents,
-	uint32_t constituent_count, uint32_t *from_mode)
+	struct ffa_memory_region_constituent **fragments,
+	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
+	uint32_t *from_mode)
 {
 	const uint32_t state_mask =
 		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
 	uint32_t orig_from_state;
 	struct ffa_value ret;
 
-	ret = constituents_get_mode(from, orig_from_mode, constituents,
-				    constituent_count);
+	ret = constituents_get_mode(from, orig_from_mode, fragments,
+				    fragment_constituent_counts,
+				    fragment_count);
 	if (ret.func != FFA_SUCCESS_32) {
 		return ret;
 	}
@@ -458,8 +607,7 @@
 	if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
 		dlog_verbose(
 			"Tried to relinquish memory in state %#x (masked %#x "
-			"but "
-			"should be %#x).\n",
+			"but should be %#x).\n",
 			*orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
 		return ffa_error(FFA_DENIED);
 	}
@@ -486,16 +634,18 @@
  */
 static struct ffa_value ffa_retrieve_check_transition(
 	struct vm_locked to, uint32_t share_func,
-	struct ffa_memory_region_constituent *constituents,
-	uint32_t constituent_count, uint32_t memory_to_attributes,
-	uint32_t *to_mode)
+	struct ffa_memory_region_constituent **fragments,
+	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
+	uint32_t memory_to_attributes, uint32_t *to_mode)
 {
 	uint32_t orig_to_mode;
 	struct ffa_value ret;
 
-	ret = constituents_get_mode(to, &orig_to_mode, constituents,
-				    constituent_count);
+	ret = constituents_get_mode(to, &orig_to_mode, fragments,
+				    fragment_constituent_counts,
+				    fragment_count);
 	if (ret.func != FFA_SUCCESS_32) {
+		dlog_verbose("Inconsistent modes.\n");
 		return ret;
 	}
 
@@ -540,6 +690,7 @@
 		break;
 
 	default:
+		dlog_error("Invalid share_func %#x.\n", share_func);
 		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
@@ -566,22 +717,28 @@
  */
 static bool ffa_region_group_identity_map(
 	struct vm_locked vm_locked,
-	struct ffa_memory_region_constituent *constituents,
-	uint32_t constituent_count, int mode, struct mpool *ppool, bool commit)
+	struct ffa_memory_region_constituent **fragments,
+	const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
+	int mode, struct mpool *ppool, bool commit)
 {
-	/* Iterate over the memory region constituents. */
-	for (uint32_t index = 0; index < constituent_count; index++) {
-		size_t size = constituents[index].page_count * PAGE_SIZE;
-		paddr_t pa_begin =
-			pa_from_ipa(ipa_init(constituents[index].address));
-		paddr_t pa_end = pa_add(pa_begin, size);
+	uint32_t i;
+	uint32_t j;
 
-		if (commit) {
-			vm_identity_commit(vm_locked, pa_begin, pa_end, mode,
-					   ppool, NULL);
-		} else if (!vm_identity_prepare(vm_locked, pa_begin, pa_end,
-						mode, ppool)) {
-			return false;
+	/* Iterate over the memory region constituents within each fragment. */
+	for (i = 0; i < fragment_count; ++i) {
+		for (j = 0; j < fragment_constituent_counts[i]; ++j) {
+			size_t size = fragments[i][j].page_count * PAGE_SIZE;
+			paddr_t pa_begin =
+				pa_from_ipa(ipa_init(fragments[i][j].address));
+			paddr_t pa_end = pa_add(pa_begin, size);
+
+			if (commit) {
+				vm_identity_commit(vm_locked, pa_begin, pa_end,
+						   mode, ppool, NULL);
+			} else if (!vm_identity_prepare(vm_locked, pa_begin,
+							pa_end, mode, ppool)) {
+				return false;
+			}
 		}
 	}
 
@@ -634,10 +791,12 @@
  * flushed from the cache so the memory has been cleared across the system.
  */
 static bool ffa_clear_memory_constituents(
-	struct ffa_memory_region_constituent *constituents,
-	uint32_t constituent_count, struct mpool *page_pool)
+	struct ffa_memory_region_constituent **fragments,
+	const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
+	struct mpool *page_pool)
 {
 	struct mpool local_page_pool;
+	uint32_t i;
 	struct mm_stage1_locked stage1_locked;
 	bool ret = false;
 
@@ -648,18 +807,23 @@
 	 */
 	mpool_init_with_fallback(&local_page_pool, page_pool);
 
-	/* Iterate over the memory region constituents. */
-	for (uint32_t i = 0; i < constituent_count; ++i) {
-		size_t size = constituents[i].page_count * PAGE_SIZE;
-		paddr_t begin = pa_from_ipa(ipa_init(constituents[i].address));
-		paddr_t end = pa_add(begin, size);
+	/* Iterate over the memory region constituents within each fragment. */
+	for (i = 0; i < fragment_count; ++i) {
+		uint32_t j;
 
-		if (!clear_memory(begin, end, &local_page_pool)) {
-			/*
-			 * api_clear_memory will defrag on failure, so no need
-			 * to do it here.
-			 */
-			goto out;
+		for (j = 0; j < fragment_constituent_counts[j]; ++j) {
+			size_t size = fragments[i][j].page_count * PAGE_SIZE;
+			paddr_t begin =
+				pa_from_ipa(ipa_init(fragments[i][j].address));
+			paddr_t end = pa_add(begin, size);
+
+			if (!clear_memory(begin, end, &local_page_pool)) {
+				/*
+				 * api_clear_memory will defrag on failure, so
+				 * no need to do it here.
+				 */
+				goto out;
+			}
 		}
 	}
 
@@ -695,12 +859,13 @@
  */
 static struct ffa_value ffa_send_check_update(
 	struct vm_locked from_locked,
-	struct ffa_memory_region_constituent *constituents,
-	uint32_t constituent_count, uint32_t share_func,
-	ffa_memory_access_permissions_t permissions, struct mpool *page_pool,
-	bool clear)
+	struct ffa_memory_region_constituent **fragments,
+	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
+	uint32_t share_func, ffa_memory_access_permissions_t permissions,
+	struct mpool *page_pool, bool clear)
 {
 	struct vm *from = from_locked.vm;
+	uint32_t i;
 	uint32_t orig_from_mode;
 	uint32_t from_mode;
 	struct mpool local_page_pool;
@@ -710,8 +875,11 @@
 	 * Make sure constituents are properly aligned to a 64-bit boundary. If
 	 * not we would get alignment faults trying to read (64-bit) values.
 	 */
-	if (!is_aligned(constituents, 8)) {
-		return ffa_error(FFA_INVALID_PARAMETERS);
+	for (i = 0; i < fragment_count; ++i) {
+		if (!is_aligned(fragments[i], 8)) {
+			dlog_verbose("Constituents not aligned.\n");
+			return ffa_error(FFA_INVALID_PARAMETERS);
+		}
 	}
 
 	/*
@@ -720,9 +888,11 @@
 	 * state.
 	 */
 	ret = ffa_send_check_transition(from_locked, share_func, permissions,
-					&orig_from_mode, constituents,
-					constituent_count, &from_mode);
+					&orig_from_mode, fragments,
+					fragment_constituent_counts,
+					fragment_count, &from_mode);
 	if (ret.func != FFA_SUCCESS_32) {
+		dlog_verbose("Invalid transition for send.\n");
 		return ret;
 	}
 
@@ -738,9 +908,9 @@
 	 * without committing, to make sure the entire operation will succeed
 	 * without exhausting the page pool.
 	 */
-	if (!ffa_region_group_identity_map(from_locked, constituents,
-					   constituent_count, from_mode,
-					   page_pool, false)) {
+	if (!ffa_region_group_identity_map(
+		    from_locked, fragments, fragment_constituent_counts,
+		    fragment_count, from_mode, page_pool, false)) {
 		/* TODO: partial defrag of failed range. */
 		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
@@ -752,13 +922,14 @@
 	 * case that a whole block is being unmapped that was previously
 	 * partially mapped.
 	 */
-	CHECK(ffa_region_group_identity_map(from_locked, constituents,
-					    constituent_count, from_mode,
-					    &local_page_pool, true));
+	CHECK(ffa_region_group_identity_map(
+		from_locked, fragments, fragment_constituent_counts,
+		fragment_count, from_mode, &local_page_pool, true));
 
 	/* Clear the memory so no VM or device can see the previous contents. */
 	if (clear && !ffa_clear_memory_constituents(
-			     constituents, constituent_count, page_pool)) {
+			     fragments, fragment_constituent_counts,
+			     fragment_count, page_pool)) {
 		/*
 		 * On failure, roll back by returning memory to the sender. This
 		 * may allocate pages which were previously freed into
@@ -766,8 +937,9 @@
 		 * more pages than that so can never fail.
 		 */
 		CHECK(ffa_region_group_identity_map(
-			from_locked, constituents, constituent_count,
-			orig_from_mode, &local_page_pool, true));
+			from_locked, fragments, fragment_constituent_counts,
+			fragment_count, orig_from_mode, &local_page_pool,
+			true));
 
 		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
@@ -802,22 +974,25 @@
  */
 static struct ffa_value ffa_retrieve_check_update(
 	struct vm_locked to_locked,
-	struct ffa_memory_region_constituent *constituents,
-	uint32_t constituent_count, uint32_t memory_to_attributes,
-	uint32_t share_func, bool clear, struct mpool *page_pool)
+	struct ffa_memory_region_constituent **fragments,
+	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
+	uint32_t memory_to_attributes, uint32_t share_func, bool clear,
+	struct mpool *page_pool)
 {
 	struct vm *to = to_locked.vm;
+	uint32_t i;
 	uint32_t to_mode;
 	struct mpool local_page_pool;
 	struct ffa_value ret;
 
 	/*
-	 * Make sure constituents are properly aligned to a 32-bit boundary. If
-	 * not we would get alignment faults trying to read (32-bit) values.
+	 * Make sure constituents are properly aligned to a 64-bit boundary. If
+	 * not we would get alignment faults trying to read (64-bit) values.
 	 */
-	if (!is_aligned(constituents, 4)) {
-		dlog_verbose("Constituents not aligned.\n");
-		return ffa_error(FFA_INVALID_PARAMETERS);
+	for (i = 0; i < fragment_count; ++i) {
+		if (!is_aligned(fragments[i], 8)) {
+			return ffa_error(FFA_INVALID_PARAMETERS);
+		}
 	}
 
 	/*
@@ -825,11 +1000,11 @@
 	 * that all constituents of the memory region being retrieved are at the
 	 * same state.
 	 */
-	ret = ffa_retrieve_check_transition(to_locked, share_func, constituents,
-					    constituent_count,
-					    memory_to_attributes, &to_mode);
+	ret = ffa_retrieve_check_transition(
+		to_locked, share_func, fragments, fragment_constituent_counts,
+		fragment_count, memory_to_attributes, &to_mode);
 	if (ret.func != FFA_SUCCESS_32) {
-		dlog_verbose("Invalid transition.\n");
+		dlog_verbose("Invalid transition for retrieve.\n");
 		return ret;
 	}
 
@@ -845,9 +1020,9 @@
 	 * the recipient page tables without committing, to make sure the entire
 	 * operation will succeed without exhausting the page pool.
 	 */
-	if (!ffa_region_group_identity_map(to_locked, constituents,
-					   constituent_count, to_mode,
-					   page_pool, false)) {
+	if (!ffa_region_group_identity_map(
+		    to_locked, fragments, fragment_constituent_counts,
+		    fragment_count, to_mode, page_pool, false)) {
 		/* TODO: partial defrag of failed range. */
 		dlog_verbose(
 			"Insufficient memory to update recipient page "
@@ -858,7 +1033,8 @@
 
 	/* Clear the memory so no VM or device can see the previous contents. */
 	if (clear && !ffa_clear_memory_constituents(
-			     constituents, constituent_count, page_pool)) {
+			     fragments, fragment_constituent_counts,
+			     fragment_count, page_pool)) {
 		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
 	}
@@ -868,9 +1044,9 @@
 	 * won't allocate because the transaction was already prepared above, so
 	 * it doesn't need to use the `local_page_pool`.
 	 */
-	CHECK(ffa_region_group_identity_map(to_locked, constituents,
-					    constituent_count, to_mode,
-					    page_pool, true));
+	CHECK(ffa_region_group_identity_map(
+		to_locked, fragments, fragment_constituent_counts,
+		fragment_count, to_mode, page_pool, true));
 
 	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
 
@@ -915,10 +1091,10 @@
 	ffa_memory_region_flags_t tee_flags;
 
 	/*
-	 * Make sure constituents are properly aligned to a 32-bit boundary. If
-	 * not we would get alignment faults trying to read (32-bit) values.
+	 * Make sure constituents are properly aligned to a 64-bit boundary. If
+	 * not we would get alignment faults trying to read (64-bit) values.
 	 */
-	if (!is_aligned(constituents, 4)) {
+	if (!is_aligned(constituents, 8)) {
 		dlog_verbose("Constituents not aligned.\n");
 		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
@@ -929,8 +1105,8 @@
 	 * same state.
 	 */
 	ret = ffa_retrieve_check_transition(to_locked, FFA_MEM_RECLAIM_32,
-					    constituents, constituent_count,
-					    memory_to_attributes, &to_mode);
+					    &constituents, &constituent_count,
+					    1, memory_to_attributes, &to_mode);
 	if (ret.func != FFA_SUCCESS_32) {
 		dlog_verbose("Invalid transition.\n");
 		return ret;
@@ -948,8 +1124,8 @@
 	 * the recipient page tables without committing, to make sure the entire
 	 * operation will succeed without exhausting the page pool.
 	 */
-	if (!ffa_region_group_identity_map(to_locked, constituents,
-					   constituent_count, to_mode,
+	if (!ffa_region_group_identity_map(to_locked, &constituents,
+					   &constituent_count, 1, to_mode,
 					   page_pool, false)) {
 		/* TODO: partial defrag of failed range. */
 		dlog_verbose(
@@ -973,8 +1149,8 @@
 
 	if (ret.func != FFA_SUCCESS_32) {
 		dlog_verbose(
-			"Got %#x (%d) from TEE in response to "
-			"FFA_MEM_RECLAIM_32, expected FFA_SUCCESS_32.\n",
+			"Got %#x (%d) from TEE in response to FFA_MEM_RECLAIM, "
+			"expected FFA_SUCCESS.\n",
 			ret.func, ret.arg2);
 		goto out;
 	}
@@ -985,8 +1161,8 @@
 	 * transaction was already prepared above, so it doesn't need to use the
 	 * `local_page_pool`.
 	 */
-	CHECK(ffa_region_group_identity_map(to_locked, constituents,
-					    constituent_count, to_mode,
+	CHECK(ffa_region_group_identity_map(to_locked, &constituents,
+					    &constituent_count, 1, to_mode,
 					    page_pool, true));
 
 	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
@@ -1005,19 +1181,20 @@
 
 static struct ffa_value ffa_relinquish_check_update(
 	struct vm_locked from_locked,
-	struct ffa_memory_region_constituent *constituents,
-	uint32_t constituent_count, struct mpool *page_pool, bool clear)
+	struct ffa_memory_region_constituent **fragments,
+	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
+	struct mpool *page_pool, bool clear)
 {
 	uint32_t orig_from_mode;
 	uint32_t from_mode;
 	struct mpool local_page_pool;
 	struct ffa_value ret;
 
-	ret = ffa_relinquish_check_transition(from_locked, &orig_from_mode,
-					      constituents, constituent_count,
-					      &from_mode);
+	ret = ffa_relinquish_check_transition(
+		from_locked, &orig_from_mode, fragments,
+		fragment_constituent_counts, fragment_count, &from_mode);
 	if (ret.func != FFA_SUCCESS_32) {
-		dlog_verbose("Invalid transition.\n");
+		dlog_verbose("Invalid transition for relinquish.\n");
 		return ret;
 	}
 
@@ -1033,9 +1210,9 @@
 	 * without committing, to make sure the entire operation will succeed
 	 * without exhausting the page pool.
 	 */
-	if (!ffa_region_group_identity_map(from_locked, constituents,
-					   constituent_count, from_mode,
-					   page_pool, false)) {
+	if (!ffa_region_group_identity_map(
+		    from_locked, fragments, fragment_constituent_counts,
+		    fragment_count, from_mode, page_pool, false)) {
 		/* TODO: partial defrag of failed range. */
 		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
@@ -1047,13 +1224,14 @@
 	 * case that a whole block is being unmapped that was previously
 	 * partially mapped.
 	 */
-	CHECK(ffa_region_group_identity_map(from_locked, constituents,
-					    constituent_count, from_mode,
-					    &local_page_pool, true));
+	CHECK(ffa_region_group_identity_map(
+		from_locked, fragments, fragment_constituent_counts,
+		fragment_count, from_mode, &local_page_pool, true));
 
 	/* Clear the memory so no VM or device can see the previous contents. */
 	if (clear && !ffa_clear_memory_constituents(
-			     constituents, constituent_count, page_pool)) {
+			     fragments, fragment_constituent_counts,
+			     fragment_count, page_pool)) {
 		/*
 		 * On failure, roll back by returning memory to the sender. This
 		 * may allocate pages which were previously freed into
@@ -1061,8 +1239,9 @@
 		 * more pages than that so can never fail.
 		 */
 		CHECK(ffa_region_group_identity_map(
-			from_locked, constituents, constituent_count,
-			orig_from_mode, &local_page_pool, true));
+			from_locked, fragments, fragment_constituent_counts,
+			fragment_count, orig_from_mode, &local_page_pool,
+			true));
 
 		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
@@ -1083,6 +1262,45 @@
 }
 
 /**
+ * Complete a memory sending operation by checking that it is valid, updating
+ * the sender page table, and then either marking the share state as having
+ * completed sending (on success) or freeing it (on failure).
+ *
+ * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
+ */
+static struct ffa_value ffa_memory_send_complete(
+	struct vm_locked from_locked, struct share_states_locked share_states,
+	struct ffa_memory_share_state *share_state, struct mpool *page_pool)
+{
+	struct ffa_memory_region *memory_region = share_state->memory_region;
+	struct ffa_value ret;
+
+	/* Lock must be held. */
+	CHECK(share_states.share_states != NULL);
+
+	/* Check that state is valid in sender page table and update. */
+	ret = ffa_send_check_update(
+		from_locked, share_state->fragments,
+		share_state->fragment_constituent_counts,
+		share_state->fragment_count, share_state->share_func,
+		memory_region->receivers[0].receiver_permissions.permissions,
+		page_pool, memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR);
+	if (ret.func != FFA_SUCCESS_32) {
+		/*
+		 * Free share state, it failed to send so it can't be retrieved.
+		 */
+		dlog_verbose("Complete failed, freeing share state.\n");
+		share_state_free(share_states, share_state, page_pool);
+		return ret;
+	}
+
+	share_state->sending_complete = true;
+	dlog_verbose("Marked sending complete.\n");
+
+	return ffa_mem_success(share_state->handle);
+}
+
+/**
  * Check that the given `memory_region` represents a valid memory send request
  * of the given `share_func` type, return the clear flag and permissions via the
  * respective output parameters, and update the permissions if necessary.
@@ -1149,6 +1367,13 @@
 				     .composite_memory_region_offset);
 		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
+	if (fragment_length < memory_share_length &&
+	    fragment_length < HF_MAILBOX_SIZE) {
+		dlog_warning(
+			"Initial fragment length %d smaller than mailbox "
+			"size.\n",
+			fragment_length);
+	}
 
 	/*
 	 * Clear is not allowed for memory sharing, as the sender still has
@@ -1250,6 +1475,96 @@
 }
 
 /**
+ * Gets the share state for continuing an operation to donate, lend or share
+ * memory, and checks that it is a valid request.
+ *
+ * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
+ * not.
+ */
+static struct ffa_value ffa_memory_send_continue_validate(
+	struct share_states_locked share_states, ffa_memory_handle_t handle,
+	struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id,
+	struct mpool *page_pool)
+{
+	struct ffa_memory_share_state *share_state;
+	struct ffa_memory_region *memory_region;
+
+	CHECK(share_state_ret != NULL);
+
+	/*
+	 * Look up the share state by handle and make sure that the VM ID
+	 * matches.
+	 */
+	if (!get_share_state(share_states, handle, &share_state)) {
+		dlog_verbose(
+			"Invalid handle %#x for memory send continuation.\n",
+			handle);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+	memory_region = share_state->memory_region;
+
+	if (memory_region->sender != from_vm_id) {
+		dlog_verbose("Invalid sender %d.\n", memory_region->sender);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	if (share_state->sending_complete) {
+		dlog_verbose(
+			"Sending of memory handle %#x is already complete.\n",
+			handle);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	if (share_state->fragment_count == MAX_FRAGMENTS) {
+		/*
+		 * Log a warning as this is a sign that MAX_FRAGMENTS should
+		 * probably be increased.
+		 */
+		dlog_warning(
+			"Too many fragments for memory share with handle %#x; "
+			"only %d supported.\n",
+			handle, MAX_FRAGMENTS);
+		/* Free share state, as it's not possible to complete it. */
+		share_state_free(share_states, share_state, page_pool);
+		return ffa_error(FFA_NO_MEMORY);
+	}
+
+	*share_state_ret = share_state;
+
+	return (struct ffa_value){.func = FFA_SUCCESS_32};
+}
+
+/**
+ * Forwards a memory send continuation message on to the TEE.
+ */
+static struct ffa_value memory_send_continue_tee_forward(
+	struct vm_locked tee_locked, ffa_vm_id_t sender_vm_id, void *fragment,
+	uint32_t fragment_length, ffa_memory_handle_t handle)
+{
+	struct ffa_value ret;
+
+	memcpy_s(tee_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, fragment,
+		 fragment_length);
+	tee_locked.vm->mailbox.recv_size = fragment_length;
+	tee_locked.vm->mailbox.recv_sender = sender_vm_id;
+	tee_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
+	tee_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
+	ret = arch_tee_call(
+		(struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
+				   .arg1 = (uint32_t)handle,
+				   .arg2 = (uint32_t)(handle >> 32),
+				   .arg3 = fragment_length,
+				   .arg4 = (uint64_t)sender_vm_id << 16});
+	/*
+	 * After the call to the TEE completes it must have finished reading its
+	 * RX buffer, so it is ready for another message.
+	 */
+	tee_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
+
+	return ret;
+}
+
+/**
  * Validates a call to donate, lend or share memory to a non-TEE VM and then
  * updates the stage-2 page tables. Specifically, check if the message length
  * and number of memory region constituents match, and if the transition is
@@ -1271,8 +1586,8 @@
 {
 	ffa_memory_access_permissions_t permissions;
 	struct ffa_value ret;
-	ffa_memory_handle_t handle;
-	struct ffa_composite_memory_region *composite;
+	struct share_states_locked share_states;
+	struct ffa_memory_share_state *share_state;
 
 	/*
 	 * If there is an error validating the `memory_region` then we need to
@@ -1302,33 +1617,38 @@
 		break;
 	}
 
+	share_states = share_states_lock();
 	/*
 	 * Allocate a share state before updating the page table. Otherwise if
 	 * updating the page table succeeded but allocating the share state
 	 * failed then it would leave the memory in a state where nobody could
 	 * get it back.
 	 */
-	if (!allocate_share_state(share_func, memory_region, &handle)) {
+	if (!allocate_share_state(share_states, share_func, memory_region,
+				  fragment_length, FFA_MEMORY_HANDLE_INVALID,
+				  &share_state)) {
 		dlog_verbose("Failed to allocate share state.\n");
 		mpool_free(page_pool, memory_region);
-		return ffa_error(FFA_NO_MEMORY);
+		ret = ffa_error(FFA_NO_MEMORY);
+		goto out;
 	}
 
+	if (fragment_length == memory_share_length) {
+		/* No more fragments to come, everything fit in one message. */
+		ret = ffa_memory_send_complete(from_locked, share_states,
+					       share_state, page_pool);
+	} else {
+		ret = (struct ffa_value){
+			.func = FFA_MEM_FRAG_RX_32,
+			.arg1 = (uint32_t)share_state->handle,
+			.arg2 = (uint32_t)(share_state->handle >> 32),
+			.arg3 = fragment_length};
+	}
+
+out:
+	share_states_unlock(&share_states);
 	dump_share_states();
-
-	/* Check that state is valid in sender page table and update. */
-	composite = ffa_memory_region_get_composite(memory_region, 0);
-	ret = ffa_send_check_update(
-		from_locked, composite->constituents,
-		composite->constituent_count, share_func, permissions,
-		page_pool, memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR);
-	if (ret.func != FFA_SUCCESS_32) {
-		/* Free share state. */
-		CHECK(share_state_free_handle(handle, page_pool));
-		return ret;
-	}
-
-	return ffa_mem_success(handle);
+	return ret;
 }
 
 /**
@@ -1352,7 +1672,6 @@
 {
 	ffa_memory_access_permissions_t permissions;
 	struct ffa_value ret;
-	struct ffa_composite_memory_region *composite;
 
 	/*
 	 * If there is an error validating the `memory_region` then we need to
@@ -1366,26 +1685,336 @@
 		goto out;
 	}
 
-	/* Check that state is valid in sender page table and update. */
-	composite = ffa_memory_region_get_composite(memory_region, 0);
-	ret = ffa_send_check_update(
-		from_locked, composite->constituents,
-		composite->constituent_count, share_func, permissions,
-		page_pool, memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR);
-	if (ret.func != FFA_SUCCESS_32) {
-		goto out;
+	if (fragment_length == memory_share_length) {
+		/* No more fragments to come, everything fit in one message. */
+		struct ffa_composite_memory_region *composite =
+			ffa_memory_region_get_composite(memory_region, 0);
+		struct ffa_memory_region_constituent *constituents =
+			composite->constituents;
+
+		ret = ffa_send_check_update(
+			from_locked, &constituents,
+			&composite->constituent_count, 1, share_func,
+			permissions, page_pool,
+			memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR);
+		if (ret.func != FFA_SUCCESS_32) {
+			goto out;
+		}
+
+		/* Forward memory send message on to TEE. */
+		ret = memory_send_tee_forward(
+			to_locked, from_locked.vm->id, share_func,
+			memory_region, memory_share_length, fragment_length);
+	} else {
+		struct share_states_locked share_states = share_states_lock();
+		ffa_memory_handle_t handle;
+
+		/*
+		 * We need to wait for the rest of the fragments before we can
+		 * check whether the transaction is valid and unmap the memory.
+		 * Call the TEE so it can do its initial validation and assign a
+		 * handle, and allocate a share state to keep what we have so
+		 * far.
+		 */
+		ret = memory_send_tee_forward(
+			to_locked, from_locked.vm->id, share_func,
+			memory_region, memory_share_length, fragment_length);
+		if (ret.func == FFA_ERROR_32) {
+			goto out_unlock;
+		} else if (ret.func != FFA_MEM_FRAG_RX_32) {
+			dlog_warning(
+				"Got %#x from TEE in response to %#x for "
+				"fragment with with %d/%d, expected "
+				"FFA_MEM_FRAG_RX.\n",
+				ret.func, share_func, fragment_length,
+				memory_share_length);
+			ret = ffa_error(FFA_INVALID_PARAMETERS);
+			goto out_unlock;
+		}
+		handle = ffa_frag_handle(ret);
+		if (ret.arg3 != fragment_length) {
+			dlog_warning(
+				"Got unexpected fragment offset %d for "
+				"FFA_MEM_FRAG_RX from TEE (expected %d).\n",
+				ret.arg3, fragment_length);
+			ret = ffa_error(FFA_INVALID_PARAMETERS);
+			goto out_unlock;
+		}
+		if (ffa_frag_sender(ret) != from_locked.vm->id) {
+			dlog_warning(
+				"Got unexpected sender ID %d for "
+				"FFA_MEM_FRAG_RX from TEE (expected %d).\n",
+				ffa_frag_sender(ret), from_locked.vm->id);
+			ret = ffa_error(FFA_INVALID_PARAMETERS);
+			goto out_unlock;
+		}
+
+		if (!allocate_share_state(share_states, share_func,
+					  memory_region, fragment_length,
+					  handle, NULL)) {
+			dlog_verbose("Failed to allocate share state.\n");
+			ret = ffa_error(FFA_NO_MEMORY);
+			goto out_unlock;
+		}
+		/*
+		 * Don't free the memory region fragment, as it has been stored
+		 * in the share state.
+		 */
+		memory_region = NULL;
+	out_unlock:
+		share_states_unlock(&share_states);
 	}
 
-	/* Forward memory send message on to TEE. */
-	ret = memory_send_tee_forward(to_locked, from_locked.vm->id, share_func,
-				      memory_region, memory_share_length,
-				      fragment_length);
+out:
+	if (memory_region != NULL) {
+		mpool_free(page_pool, memory_region);
+	}
+	dump_share_states();
+	return ret;
+}
+
+/**
+ * Continues an operation to donate, lend or share memory to a non-TEE VM. If
+ * this is the last fragment then checks that the transition is valid for the
+ * type of memory sending operation and updates the stage-2 page tables of the
+ * sender.
+ *
+ * Assumes that the caller has already found and locked the sender VM and copied
+ * the memory region descriptor from the sender's TX buffer to a freshly
+ * allocated page from Hafnium's internal pool.
+ *
+ * This function takes ownership of the `fragment` passed in; it must not be
+ * freed by the caller.
+ */
+struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
+					  void *fragment,
+					  uint32_t fragment_length,
+					  ffa_memory_handle_t handle,
+					  struct mpool *page_pool)
+{
+	struct share_states_locked share_states = share_states_lock();
+	struct ffa_memory_share_state *share_state;
+	struct ffa_value ret;
+	struct ffa_memory_region *memory_region;
+
+	ret = ffa_memory_send_continue_validate(share_states, handle,
+						&share_state,
+						from_locked.vm->id, page_pool);
+	if (ret.func != FFA_SUCCESS_32) {
+		goto out_free_fragment;
+	}
+	memory_region = share_state->memory_region;
+
+	if (memory_region->receivers[0].receiver_permissions.receiver ==
+	    HF_TEE_VM_ID) {
+		dlog_error(
+			"Got hypervisor-allocated handle for memory send to "
+			"TEE. This should never happen, and indicates a bug in "
+			"EL3 code.\n");
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out_free_fragment;
+	}
+
+	/* Add this fragment. */
+	share_state->fragments[share_state->fragment_count] = fragment;
+	share_state->fragment_constituent_counts[share_state->fragment_count] =
+		fragment_length / sizeof(struct ffa_memory_region_constituent);
+	share_state->fragment_count++;
+
+	/* Check whether the memory send operation is now ready to complete. */
+	if (share_state_sending_complete(share_states, share_state)) {
+		ret = ffa_memory_send_complete(from_locked, share_states,
+					       share_state, page_pool);
+	} else {
+		ret = (struct ffa_value){
+			.func = FFA_MEM_FRAG_RX_32,
+			.arg1 = (uint32_t)handle,
+			.arg2 = (uint32_t)(handle >> 32),
+			.arg3 = share_state_next_fragment_offset(share_states,
+								 share_state)};
+	}
+	goto out;
+
+out_free_fragment:
+	mpool_free(page_pool, fragment);
 
 out:
-	mpool_free(page_pool, memory_region);
+	share_states_unlock(&share_states);
 	return ret;
 }
 
+/**
+ * Continues an operation to donate, lend or share memory to the TEE VM. If this
+ * is the last fragment then checks that the transition is valid for the type of
+ * memory sending operation and updates the stage-2 page tables of the sender.
+ *
+ * Assumes that the caller has already found and locked the sender VM and copied
+ * the memory region descriptor from the sender's TX buffer to a freshly
+ * allocated page from Hafnium's internal pool.
+ *
+ * This function takes ownership of the `memory_region` passed in and will free
+ * it when necessary; it must not be freed by the caller.
+ */
+struct ffa_value ffa_memory_tee_send_continue(struct vm_locked from_locked,
+					      struct vm_locked to_locked,
+					      void *fragment,
+					      uint32_t fragment_length,
+					      ffa_memory_handle_t handle,
+					      struct mpool *page_pool)
+{
+	struct share_states_locked share_states = share_states_lock();
+	struct ffa_memory_share_state *share_state;
+	struct ffa_value ret;
+	struct ffa_memory_region *memory_region;
+
+	ret = ffa_memory_send_continue_validate(share_states, handle,
+						&share_state,
+						from_locked.vm->id, page_pool);
+	if (ret.func != FFA_SUCCESS_32) {
+		goto out_free_fragment;
+	}
+	memory_region = share_state->memory_region;
+
+	if (memory_region->receivers[0].receiver_permissions.receiver !=
+	    HF_TEE_VM_ID) {
+		dlog_error(
+			"Got SPM-allocated handle for memory send to non-TEE "
+			"VM. This should never happen, and indicates a bug.\n");
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out_free_fragment;
+	}
+
+	if (to_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
+	    to_locked.vm->mailbox.recv == NULL) {
+		/*
+		 * If the TEE RX buffer is not available, tell the sender to
+		 * retry by returning the current offset again.
+		 */
+		ret = (struct ffa_value){
+			.func = FFA_MEM_FRAG_RX_32,
+			.arg1 = (uint32_t)handle,
+			.arg2 = (uint32_t)(handle >> 32),
+			.arg3 = share_state_next_fragment_offset(share_states,
+								 share_state),
+		};
+		goto out_free_fragment;
+	}
+
+	/* Add this fragment. */
+	share_state->fragments[share_state->fragment_count] = fragment;
+	share_state->fragment_constituent_counts[share_state->fragment_count] =
+		fragment_length / sizeof(struct ffa_memory_region_constituent);
+	share_state->fragment_count++;
+
+	/* Check whether the memory send operation is now ready to complete. */
+	if (share_state_sending_complete(share_states, share_state)) {
+		ret = ffa_memory_send_complete(from_locked, share_states,
+					       share_state, page_pool);
+
+		if (ret.func == FFA_SUCCESS_32) {
+			/*
+			 * Forward final fragment on to the TEE so that
+			 * it can complete the memory sending operation.
+			 */
+			ret = memory_send_continue_tee_forward(
+				to_locked, from_locked.vm->id, fragment,
+				fragment_length, handle);
+
+			if (ret.func != FFA_SUCCESS_32) {
+				/*
+				 * The error will be passed on to the caller,
+				 * but log it here too.
+				 */
+				dlog_verbose(
+					"TEE didn't successfully complete "
+					"memory send operation; returned %#x "
+					"(%d).\n",
+					ret.func, ret.arg2);
+			}
+			/* Free share state. */
+			share_state_free(share_states, share_state, page_pool);
+		} else {
+			/* Abort sending to TEE. */
+			struct ffa_value tee_ret =
+				arch_tee_call((struct ffa_value){
+					.func = FFA_MEM_RECLAIM_32,
+					.arg1 = (uint32_t)handle,
+					.arg2 = (uint32_t)(handle >> 32)});
+
+			if (tee_ret.func != FFA_SUCCESS_32) {
+				/*
+				 * Nothing we can do if TEE doesn't abort
+				 * properly, just log it.
+				 */
+				dlog_verbose(
+					"TEE didn't successfully abort failed "
+					"memory send operation; returned %#x "
+					"(%d).\n",
+					tee_ret.func, tee_ret.arg2);
+			}
+			/*
+			 * We don't need to free the share state in this case
+			 * because ffa_memory_send_complete does that already.
+			 */
+		}
+	} else {
+		uint32_t next_fragment_offset =
+			share_state_next_fragment_offset(share_states,
+							 share_state);
+
+		ret = memory_send_continue_tee_forward(
+			to_locked, from_locked.vm->id, fragment,
+			fragment_length, handle);
+
+		if (ret.func != FFA_MEM_FRAG_RX_32 ||
+		    ffa_frag_handle(ret) != handle ||
+		    ret.arg3 != next_fragment_offset ||
+		    ffa_frag_sender(ret) != from_locked.vm->id) {
+			dlog_verbose(
+				"Got unexpected result from forwarding "
+				"FFA_MEM_FRAG_TX to TEE: %#x (handle %#x, "
+				"offset %d, sender %d); expected "
+				"FFA_MEM_FRAG_RX (handle %#x, offset %d, "
+				"sender %d).\n",
+				ret.func, ffa_frag_handle(ret), ret.arg3,
+				ffa_frag_sender(ret), handle,
+				next_fragment_offset, from_locked.vm->id);
+			/* Free share state. */
+			share_state_free(share_states, share_state, page_pool);
+			ret = ffa_error(FFA_INVALID_PARAMETERS);
+			goto out;
+		}
+
+		ret = (struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
+					 .arg1 = (uint32_t)handle,
+					 .arg2 = (uint32_t)(handle >> 32),
+					 .arg3 = next_fragment_offset};
+	}
+	goto out;
+
+out_free_fragment:
+	mpool_free(page_pool, fragment);
+
+out:
+	share_states_unlock(&share_states);
+	return ret;
+}
+
+/** Clean up after the receiver has finished retrieving a memory region. */
+static void ffa_memory_retrieve_complete(
+	struct share_states_locked share_states,
+	struct ffa_memory_share_state *share_state, struct mpool *page_pool)
+{
+	if (share_state->share_func == FFA_MEM_DONATE_32) {
+		/*
+		 * Memory that has been donated can't be relinquished,
+		 * so no need to keep the share state around.
+		 */
+		share_state_free(share_states, share_state, page_pool);
+		dlog_verbose("Freed share state for donate.\n");
+	}
+}
+
 struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
 				     struct ffa_memory_region *retrieve_request,
 				     uint32_t retrieve_request_length,
@@ -1408,11 +2037,12 @@
 	enum ffa_instruction_access requested_instruction_access;
 	ffa_memory_access_permissions_t permissions;
 	uint32_t memory_to_attributes;
-	struct ffa_composite_memory_region *composite;
 	struct share_states_locked share_states;
 	struct ffa_memory_share_state *share_state;
 	struct ffa_value ret;
-	uint32_t response_length;
+	struct ffa_composite_memory_region *composite;
+	uint32_t total_length;
+	uint32_t fragment_length;
 
 	dump_share_states();
 
@@ -1507,7 +2137,16 @@
 		goto out;
 	}
 
-	if (share_state->retrieved[0]) {
+	if (!share_state->sending_complete) {
+		dlog_verbose(
+			"Memory with handle %#x not fully sent, can't "
+			"retrieve.\n",
+			handle);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
+	if (share_state->retrieved_fragment_count[0] != 0) {
 		dlog_verbose("Memory with handle %#x already retrieved.\n",
 			     handle);
 		ret = ffa_error(FFA_DENIED);
@@ -1599,10 +2238,10 @@
 	}
 	memory_to_attributes = ffa_memory_permissions_to_mode(permissions);
 
-	composite = ffa_memory_region_get_composite(memory_region, 0);
 	ret = ffa_retrieve_check_update(
-		to_locked, composite->constituents,
-		composite->constituent_count, memory_to_attributes,
+		to_locked, share_state->fragments,
+		share_state->fragment_constituent_counts,
+		share_state->fragment_count, memory_to_attributes,
 		share_state->share_func, false, page_pool);
 	if (ret.func != FFA_SUCCESS_32) {
 		goto out;
@@ -1613,30 +2252,149 @@
 	 * must be done before the share_state is (possibly) freed.
 	 */
 	/* TODO: combine attributes from sender and request. */
-	response_length = ffa_retrieved_memory_region_init(
+	composite = ffa_memory_region_get_composite(memory_region, 0);
+	/*
+	 * Constituents which we received in the first fragment should always
+	 * fit in the first fragment we are sending, because the header is the
+	 * same size in both cases and we have a fixed message buffer size. So
+	 * `ffa_retrieved_memory_region_init` should never fail.
+	 */
+	CHECK(ffa_retrieved_memory_region_init(
 		to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
 		memory_region->sender, memory_region->attributes,
 		memory_region->flags, handle, to_locked.vm->id, permissions,
-		composite->constituents, composite->constituent_count);
-	to_locked.vm->mailbox.recv_size = response_length;
+		composite->page_count, composite->constituent_count,
+		share_state->fragments[0],
+		share_state->fragment_constituent_counts[0], &total_length,
+		&fragment_length));
+	to_locked.vm->mailbox.recv_size = fragment_length;
 	to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
 	to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
 	to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
 
-	if (share_state->share_func == FFA_MEM_DONATE_32) {
-		/*
-		 * Memory that has been donated can't be relinquished, so no
-		 * need to keep the share state around.
-		 */
-		share_state_free(share_states, share_state, page_pool);
-		dlog_verbose("Freed share state for donate.\n");
-	} else {
-		share_state->retrieved[0] = true;
+	share_state->retrieved_fragment_count[0] = 1;
+	if (share_state->retrieved_fragment_count[0] ==
+	    share_state->fragment_count) {
+		ffa_memory_retrieve_complete(share_states, share_state,
+					     page_pool);
 	}
 
 	ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
-				 .arg1 = response_length,
-				 .arg2 = response_length};
+				 .arg1 = total_length,
+				 .arg2 = fragment_length};
+
+out:
+	share_states_unlock(&share_states);
+	dump_share_states();
+	return ret;
+}
+
+struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
+					      ffa_memory_handle_t handle,
+					      uint32_t fragment_offset,
+					      struct mpool *page_pool)
+{
+	struct ffa_memory_region *memory_region;
+	struct share_states_locked share_states;
+	struct ffa_memory_share_state *share_state;
+	struct ffa_value ret;
+	uint32_t fragment_index;
+	uint32_t retrieved_constituents_count;
+	uint32_t i;
+	uint32_t expected_fragment_offset;
+	uint32_t remaining_constituent_count;
+	uint32_t fragment_length;
+
+	dump_share_states();
+
+	share_states = share_states_lock();
+	if (!get_share_state(share_states, handle, &share_state)) {
+		dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
+			     handle);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
+	memory_region = share_state->memory_region;
+	CHECK(memory_region != NULL);
+
+	if (memory_region->receivers[0].receiver_permissions.receiver !=
+	    to_locked.vm->id) {
+		dlog_verbose(
+			"Caller of FFA_MEM_FRAG_RX (%d) is not receiver (%d) "
+			"of handle %#x.\n",
+			to_locked.vm->id,
+			memory_region->receivers[0]
+				.receiver_permissions.receiver,
+			handle);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
+	if (!share_state->sending_complete) {
+		dlog_verbose(
+			"Memory with handle %#x not fully sent, can't "
+			"retrieve.\n",
+			handle);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
+	if (share_state->retrieved_fragment_count[0] == 0 ||
+	    share_state->retrieved_fragment_count[0] >=
+		    share_state->fragment_count) {
+		dlog_verbose(
+			"Retrieval of memory with handle %#x not yet started "
+			"or already completed (%d/%d fragments retrieved).\n",
+			handle, share_state->retrieved_fragment_count[0],
+			share_state->fragment_count);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
+	fragment_index = share_state->retrieved_fragment_count[0];
+
+	/*
+	 * Check that the given fragment offset is correct by counting how many
+	 * constituents were in the fragments previously sent.
+	 */
+	retrieved_constituents_count = 0;
+	for (i = 0; i < fragment_index; ++i) {
+		retrieved_constituents_count +=
+			share_state->fragment_constituent_counts[i];
+	}
+	expected_fragment_offset =
+		ffa_composite_constituent_offset(memory_region, 0) +
+		retrieved_constituents_count *
+			sizeof(struct ffa_memory_region_constituent);
+	if (fragment_offset != expected_fragment_offset) {
+		dlog_verbose("Fragment offset was %d but expected %d.\n",
+			     fragment_offset, expected_fragment_offset);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
+	remaining_constituent_count = ffa_memory_fragment_init(
+		to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
+		share_state->fragments[fragment_index],
+		share_state->fragment_constituent_counts[fragment_index],
+		&fragment_length);
+	CHECK(remaining_constituent_count == 0);
+	to_locked.vm->mailbox.recv_size = fragment_length;
+	to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
+	to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
+	to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
+	share_state->retrieved_fragment_count[0]++;
+	if (share_state->retrieved_fragment_count[0] ==
+	    share_state->fragment_count) {
+		ffa_memory_retrieve_complete(share_states, share_state,
+					     page_pool);
+	}
+
+	ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
+				 .arg1 = (uint32_t)handle,
+				 .arg2 = (uint32_t)(handle >> 32),
+				 .arg3 = fragment_length};
 
 out:
 	share_states_unlock(&share_states);
@@ -1653,7 +2411,6 @@
 	struct ffa_memory_share_state *share_state;
 	struct ffa_memory_region *memory_region;
 	bool clear;
-	struct ffa_composite_memory_region *composite;
 	struct ffa_value ret;
 
 	if (relinquish_request->endpoint_count != 1) {
@@ -1682,6 +2439,15 @@
 		goto out;
 	}
 
+	if (!share_state->sending_complete) {
+		dlog_verbose(
+			"Memory with handle %#x not fully sent, can't "
+			"relinquish.\n",
+			handle);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
 	memory_region = share_state->memory_region;
 	CHECK(memory_region != NULL);
 
@@ -1697,9 +2463,10 @@
 		goto out;
 	}
 
-	if (!share_state->retrieved[0]) {
+	if (share_state->retrieved_fragment_count[0] !=
+	    share_state->fragment_count) {
 		dlog_verbose(
-			"Memory with handle %#x not yet retrieved, can't "
+			"Memory with handle %#x not yet fully retrieved, can't "
 			"relinquish.\n",
 			handle);
 		ret = ffa_error(FFA_INVALID_PARAMETERS);
@@ -1718,17 +2485,17 @@
 		goto out;
 	}
 
-	composite = ffa_memory_region_get_composite(memory_region, 0);
-	ret = ffa_relinquish_check_update(from_locked, composite->constituents,
-					  composite->constituent_count,
-					  page_pool, clear);
+	ret = ffa_relinquish_check_update(
+		from_locked, share_state->fragments,
+		share_state->fragment_constituent_counts,
+		share_state->fragment_count, page_pool, clear);
 
 	if (ret.func == FFA_SUCCESS_32) {
 		/*
 		 * Mark memory handle as not retrieved, so it can be reclaimed
 		 * (or retrieved again).
 		 */
-		share_state->retrieved[0] = false;
+		share_state->retrieved_fragment_count[0] = 0;
 	}
 
 out:
@@ -1743,13 +2510,13 @@
  * associated with the handle.
  */
 struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
-				    ffa_memory_handle_t handle, bool clear,
+				    ffa_memory_handle_t handle,
+				    ffa_memory_region_flags_t flags,
 				    struct mpool *page_pool)
 {
 	struct share_states_locked share_states;
 	struct ffa_memory_share_state *share_state;
 	struct ffa_memory_region *memory_region;
-	struct ffa_composite_memory_region *composite;
 	uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
 	struct ffa_value ret;
 
@@ -1775,7 +2542,16 @@
 		goto out;
 	}
 
-	if (share_state->retrieved[0]) {
+	if (!share_state->sending_complete) {
+		dlog_verbose(
+			"Memory with handle %#x not fully sent, can't "
+			"reclaim.\n",
+			handle);
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
+	if (share_state->retrieved_fragment_count[0] != 0) {
 		dlog_verbose(
 			"Tried to reclaim memory handle %#x that has not been "
 			"relinquished.\n",
@@ -1784,11 +2560,11 @@
 		goto out;
 	}
 
-	composite = ffa_memory_region_get_composite(memory_region, 0);
-	ret = ffa_retrieve_check_update(to_locked, composite->constituents,
-					composite->constituent_count,
-					memory_to_attributes,
-					FFA_MEM_RECLAIM_32, clear, page_pool);
+	ret = ffa_retrieve_check_update(
+		to_locked, share_state->fragments,
+		share_state->fragment_constituent_counts,
+		share_state->fragment_count, memory_to_attributes,
+		FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
 
 	if (ret.func == FFA_SUCCESS_32) {
 		share_state_free(share_states, share_state, page_pool);
@@ -1801,16 +2577,113 @@
 }
 
 /**
- * Validates that the reclaim transition is allowed for the given memory region
- * and updates the page table of the reclaiming VM.
+ * Validates that the reclaim transition is allowed for the memory region with
+ * the given handle which was previously shared with the TEE, tells the TEE to
+ * mark it as reclaimed, and updates the page table of the reclaiming VM.
+ *
+ * To do this information about the memory region is first fetched from the TEE.
  */
 struct ffa_value ffa_memory_tee_reclaim(struct vm_locked to_locked,
+					struct vm_locked from_locked,
 					ffa_memory_handle_t handle,
-					struct ffa_memory_region *memory_region,
-					bool clear, struct mpool *page_pool)
+					ffa_memory_region_flags_t flags,
+					struct mpool *page_pool)
 {
-	uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
+	uint32_t request_length = ffa_memory_lender_retrieve_request_init(
+		from_locked.vm->mailbox.recv, handle, to_locked.vm->id);
+	struct ffa_value tee_ret;
+	uint32_t length;
+	uint32_t fragment_length;
+	uint32_t fragment_offset;
+	struct ffa_memory_region *memory_region;
 	struct ffa_composite_memory_region *composite;
+	uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
+
+	CHECK(request_length <= HF_MAILBOX_SIZE);
+	CHECK(from_locked.vm->id == HF_TEE_VM_ID);
+
+	/* Retrieve memory region information from the TEE. */
+	tee_ret = arch_tee_call(
+		(struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
+				   .arg1 = request_length,
+				   .arg2 = request_length});
+	if (tee_ret.func == FFA_ERROR_32) {
+		dlog_verbose("Got error %d from EL3.\n", tee_ret.arg2);
+		return tee_ret;
+	}
+	if (tee_ret.func != FFA_MEM_RETRIEVE_RESP_32) {
+		dlog_verbose(
+			"Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n",
+			tee_ret.func);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	length = tee_ret.arg1;
+	fragment_length = tee_ret.arg2;
+
+	if (fragment_length > HF_MAILBOX_SIZE || fragment_length > length ||
+	    length > sizeof(tee_retrieve_buffer)) {
+		dlog_verbose("Invalid fragment length %d/%d (max %d/%d).\n",
+			     fragment_length, length, HF_MAILBOX_SIZE,
+			     sizeof(tee_retrieve_buffer));
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	/*
+	 * Copy the first fragment of the memory region descriptor to an
+	 * internal buffer.
+	 */
+	memcpy_s(tee_retrieve_buffer, sizeof(tee_retrieve_buffer),
+		 from_locked.vm->mailbox.send, fragment_length);
+
+	/* Fetch the remaining fragments into the same buffer. */
+	fragment_offset = fragment_length;
+	while (fragment_offset < length) {
+		tee_ret = arch_tee_call(
+			(struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
+					   .arg1 = (uint32_t)handle,
+					   .arg2 = (uint32_t)(handle >> 32),
+					   .arg3 = fragment_offset});
+		if (tee_ret.func != FFA_MEM_FRAG_TX_32) {
+			dlog_verbose(
+				"Got %#x (%d) from TEE in response to "
+				"FFA_MEM_FRAG_RX, expected FFA_MEM_FRAG_TX.\n",
+				tee_ret.func, tee_ret.arg2);
+			return tee_ret;
+		}
+		if (ffa_frag_handle(tee_ret) != handle) {
+			dlog_verbose(
+				"Got FFA_MEM_FRAG_TX for unexpected handle %#x "
+				"in response to FFA_MEM_FRAG_RX for handle "
+				"%#x.\n",
+				ffa_frag_handle(tee_ret), handle);
+			return ffa_error(FFA_INVALID_PARAMETERS);
+		}
+		if (ffa_frag_sender(tee_ret) != 0) {
+			dlog_verbose(
+				"Got FFA_MEM_FRAG_TX with unexpected sender %d "
+				"(expected 0).\n",
+				ffa_frag_sender(tee_ret));
+			return ffa_error(FFA_INVALID_PARAMETERS);
+		}
+		fragment_length = tee_ret.arg3;
+		if (fragment_length > HF_MAILBOX_SIZE ||
+		    fragment_offset + fragment_length > length) {
+			dlog_verbose(
+				"Invalid fragment length %d at offset %d (max "
+				"%d).\n",
+				fragment_length, fragment_offset,
+				HF_MAILBOX_SIZE);
+			return ffa_error(FFA_INVALID_PARAMETERS);
+		}
+		memcpy_s(tee_retrieve_buffer + fragment_offset,
+			 sizeof(tee_retrieve_buffer) - fragment_offset,
+			 from_locked.vm->mailbox.send, fragment_length);
+
+		fragment_offset += fragment_length;
+	}
+
+	memory_region = (struct ffa_memory_region *)tee_retrieve_buffer;
 
 	if (memory_region->receiver_count != 1) {
 		/* Only one receiver supported by Hafnium for now. */
@@ -1818,7 +2691,7 @@
 			"Multiple recipients not supported (got %d, expected "
 			"1).\n",
 			memory_region->receiver_count);
-		return ffa_error(FFA_NOT_SUPPORTED);
+		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
 	if (memory_region->handle != handle) {
@@ -1841,11 +2714,12 @@
 	composite = ffa_memory_region_get_composite(memory_region, 0);
 
 	/*
-	 * Forward the request to the TEE and then map the memory back into the
-	 * caller's stage-2 page table.
+	 * Validate that the reclaim transition is allowed for the given memory
+	 * region, forward the request to the TEE and then map the memory back
+	 * into the caller's stage-2 page table.
 	 */
 	return ffa_tee_reclaim_check_update(
 		to_locked, handle, composite->constituents,
-		composite->constituent_count, memory_to_attributes, clear,
-		page_pool);
+		composite->constituent_count, memory_to_attributes,
+		flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
 }
diff --git a/test/inc/test/vmapi/ffa.h b/test/inc/test/vmapi/ffa.h
index c01008e..8a43349 100644
--- a/test/inc/test/vmapi/ffa.h
+++ b/test/inc/test/vmapi/ffa.h
@@ -40,9 +40,20 @@
 	enum ffa_data_access retrieve_data_access,
 	enum ffa_instruction_access send_instruction_access,
 	enum ffa_instruction_access retrieve_instruction_access);
-ffa_vm_id_t retrieve_memory_from_message(void *recv_buf, void *send_buf,
-					 struct ffa_value msg_ret,
-					 ffa_memory_handle_t *handle);
+ffa_memory_handle_t send_memory_and_retrieve_request_force_fragmented(
+	uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
+	ffa_vm_id_t recipient,
+	struct ffa_memory_region_constituent constituents[],
+	uint32_t constituent_count, ffa_memory_region_flags_t flags,
+	enum ffa_data_access send_data_access,
+	enum ffa_data_access retrieve_data_access,
+	enum ffa_instruction_access send_instruction_access,
+	enum ffa_instruction_access retrieve_instruction_access);
+ffa_vm_id_t retrieve_memory_from_message(
+	void *recv_buf, void *send_buf, struct ffa_value msg_ret,
+	ffa_memory_handle_t *handle,
+	struct ffa_memory_region *memory_region_ret,
+	size_t memory_region_max_size);
 ffa_vm_id_t retrieve_memory_from_message_expect_fail(void *recv_buf,
 						     void *send_buf,
 						     struct ffa_value msg_ret,
diff --git a/test/vmapi/common/ffa.c b/test/vmapi/common/ffa.c
index a313267..b1c9ccb 100644
--- a/test/vmapi/common/ffa.c
+++ b/test/vmapi/common/ffa.c
@@ -56,35 +56,75 @@
 	enum ffa_instruction_access send_instruction_access,
 	enum ffa_instruction_access retrieve_instruction_access)
 {
+	uint32_t total_length;
+	uint32_t fragment_length;
 	uint32_t msg_size;
 	struct ffa_value ret;
+	const ffa_memory_handle_t INVALID_FRAGMENT_HANDLE = 0xffffffffffffffff;
+	ffa_memory_handle_t fragment_handle = INVALID_FRAGMENT_HANDLE;
 	ffa_memory_handle_t handle;
+	uint32_t remaining_constituent_count;
+	uint32_t sent_length;
 
-	/* Send the memory. */
-	msg_size = ffa_memory_region_init(
-		tx_buffer, sender, recipient, constituents, constituent_count,
-		0, flags, send_data_access, send_instruction_access,
-		FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
-		FFA_MEMORY_OUTER_SHAREABLE);
+	/* Send the first fragment of the memory. */
+	remaining_constituent_count = ffa_memory_region_init(
+		tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
+		constituent_count, 0, flags, send_data_access,
+		send_instruction_access, FFA_MEMORY_NORMAL_MEM,
+		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE,
+		&total_length, &fragment_length);
+	if (remaining_constituent_count == 0) {
+		EXPECT_EQ(total_length, fragment_length);
+	}
 	switch (share_func) {
 	case FFA_MEM_DONATE_32:
-		ret = ffa_mem_donate(msg_size, msg_size);
+		ret = ffa_mem_donate(total_length, fragment_length);
 		break;
 	case FFA_MEM_LEND_32:
-		ret = ffa_mem_lend(msg_size, msg_size);
+		ret = ffa_mem_lend(total_length, fragment_length);
 		break;
 	case FFA_MEM_SHARE_32:
-		ret = ffa_mem_share(msg_size, msg_size);
+		ret = ffa_mem_share(total_length, fragment_length);
 		break;
 	default:
 		FAIL("Invalid share_func %#x.\n", share_func);
 		/* Never reached, but needed to keep clang-analyser happy. */
 		return 0;
 	}
+	sent_length = fragment_length;
+
+	/* Send the remaining fragments. */
+	while (remaining_constituent_count != 0) {
+		dlog_verbose("%d constituents left to send.\n",
+			     remaining_constituent_count);
+		EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
+		if (fragment_handle == INVALID_FRAGMENT_HANDLE) {
+			fragment_handle = ffa_frag_handle(ret);
+		} else {
+			EXPECT_EQ(ffa_frag_handle(ret), fragment_handle);
+		}
+		EXPECT_EQ(ret.arg3, sent_length);
+		/* Sender MBZ at virtual instance. */
+		EXPECT_EQ(ffa_frag_sender(ret), 0);
+
+		remaining_constituent_count = ffa_memory_fragment_init(
+			tx_buffer, HF_MAILBOX_SIZE,
+			constituents + constituent_count -
+				remaining_constituent_count,
+			remaining_constituent_count, &fragment_length);
+
+		ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
+		sent_length += fragment_length;
+	}
+
+	EXPECT_EQ(sent_length, total_length);
 	EXPECT_EQ(ret.func, FFA_SUCCESS_32);
 	handle = ffa_mem_success_handle(ret);
 	EXPECT_EQ(handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK,
 		  FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR);
+	if (fragment_handle != INVALID_FRAGMENT_HANDLE) {
+		EXPECT_EQ(handle, fragment_handle);
+	}
 
 	/*
 	 * Send the appropriate retrieve request to the VM so that it can use it
@@ -95,6 +135,87 @@
 		retrieve_data_access, retrieve_instruction_access,
 		FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
 		FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
+	EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
+		  FFA_SUCCESS_32);
+
+	return handle;
+}
+
+/*
+ * Helper function to send memory to a VM then send a message with the retrieve
+ * request it needs to retrieve it, forcing the request to be made in at least
+ * two fragments even if it could fit in one.
+ */
+ffa_memory_handle_t send_memory_and_retrieve_request_force_fragmented(
+	uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
+	ffa_vm_id_t recipient,
+	struct ffa_memory_region_constituent constituents[],
+	uint32_t constituent_count, ffa_memory_region_flags_t flags,
+	enum ffa_data_access send_data_access,
+	enum ffa_data_access retrieve_data_access,
+	enum ffa_instruction_access send_instruction_access,
+	enum ffa_instruction_access retrieve_instruction_access)
+{
+	uint32_t total_length;
+	uint32_t fragment_length;
+	uint32_t msg_size;
+	uint32_t remaining_constituent_count;
+	struct ffa_value ret;
+	ffa_memory_handle_t handle;
+
+	/* Send everything except the last constituent in the first fragment. */
+	remaining_constituent_count = ffa_memory_region_init(
+		tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
+		constituent_count, 0, flags, send_data_access,
+		send_instruction_access, FFA_MEMORY_NORMAL_MEM,
+		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE,
+		&total_length, &fragment_length);
+	EXPECT_EQ(remaining_constituent_count, 0);
+	EXPECT_EQ(total_length, fragment_length);
+	/* Don't include the last constituent in the first fragment. */
+	fragment_length -= sizeof(struct ffa_memory_region_constituent);
+	switch (share_func) {
+	case FFA_MEM_DONATE_32:
+		ret = ffa_mem_donate(total_length, fragment_length);
+		break;
+	case FFA_MEM_LEND_32:
+		ret = ffa_mem_lend(total_length, fragment_length);
+		break;
+	case FFA_MEM_SHARE_32:
+		ret = ffa_mem_share(total_length, fragment_length);
+		break;
+	default:
+		FAIL("Invalid share_func %#x.\n", share_func);
+		/* Never reached, but needed to keep clang-analyser happy. */
+		return 0;
+	}
+	EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
+	EXPECT_EQ(ret.arg3, fragment_length);
+	/* Sender MBZ at virtual instance. */
+	EXPECT_EQ(ffa_frag_sender(ret), 0);
+
+	handle = ffa_frag_handle(ret);
+
+	/* Send the last constituent in a separate fragment. */
+	remaining_constituent_count = ffa_memory_fragment_init(
+		tx_buffer, HF_MAILBOX_SIZE,
+		&constituents[constituent_count - 1], 1, &fragment_length);
+	EXPECT_EQ(remaining_constituent_count, 0);
+	ret = ffa_mem_frag_tx(handle, fragment_length);
+	EXPECT_EQ(ret.func, FFA_SUCCESS_32);
+	EXPECT_EQ(ffa_mem_success_handle(ret), handle);
+
+	/*
+	 * Send the appropriate retrieve request to the VM so that it can use it
+	 * to retrieve the memory.
+	 */
+	msg_size = ffa_memory_retrieve_request_init(
+		tx_buffer, handle, sender, recipient, 0, 0,
+		retrieve_data_access, retrieve_instruction_access,
+		FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+		FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
 	EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
 		  FFA_SUCCESS_32);
 
@@ -103,36 +224,88 @@
 
 /*
  * Use the retrieve request from the receive buffer to retrieve a memory region
- * which has been sent to us. Returns the sender, and the handle via a return
+ * which has been sent to us. Copies all the fragments into the provided buffer
+ * if any, and checks that the total length of all fragments is no more than
+ * `memory_region_max_size`. Returns the sender, and the handle via a return
  * parameter.
  */
-ffa_vm_id_t retrieve_memory_from_message(void *recv_buf, void *send_buf,
-					 struct ffa_value msg_ret,
-					 ffa_memory_handle_t *handle)
+ffa_vm_id_t retrieve_memory_from_message(
+	void *recv_buf, void *send_buf, struct ffa_value msg_ret,
+	ffa_memory_handle_t *handle,
+	struct ffa_memory_region *memory_region_ret,
+	size_t memory_region_max_size)
 {
 	uint32_t msg_size;
 	struct ffa_value ret;
 	struct ffa_memory_region *memory_region;
 	ffa_vm_id_t sender;
+	struct ffa_memory_region *retrieve_request;
+	ffa_memory_handle_t handle_;
+	uint32_t fragment_length;
+	uint32_t total_length;
+	uint32_t fragment_offset;
 
 	EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
 	msg_size = ffa_msg_send_size(msg_ret);
 	sender = ffa_msg_send_sender(msg_ret);
 
+	retrieve_request = (struct ffa_memory_region *)recv_buf;
+	handle_ = retrieve_request->handle;
 	if (handle != NULL) {
-		struct ffa_memory_region *retrieve_request =
-			(struct ffa_memory_region *)recv_buf;
-		*handle = retrieve_request->handle;
+		*handle = handle_;
 	}
 	memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
 	ffa_rx_release();
 	ret = ffa_mem_retrieve_req(msg_size, msg_size);
 	EXPECT_EQ(ret.func, FFA_MEM_RETRIEVE_RESP_32);
+	total_length = ret.arg1;
+	fragment_length = ret.arg2;
+	EXPECT_GE(fragment_length,
+		  sizeof(struct ffa_memory_region) +
+			  sizeof(struct ffa_memory_access) +
+			  sizeof(struct ffa_composite_memory_region));
+	EXPECT_LE(fragment_length, HF_MAILBOX_SIZE);
+	EXPECT_LE(fragment_length, total_length);
 	memory_region = (struct ffa_memory_region *)recv_buf;
 	EXPECT_EQ(memory_region->receiver_count, 1);
 	EXPECT_EQ(memory_region->receivers[0].receiver_permissions.receiver,
 		  hf_vm_get_id());
 
+	/* Copy into the return buffer. */
+	if (memory_region_ret != NULL) {
+		memcpy_s(memory_region_ret, memory_region_max_size,
+			 memory_region, fragment_length);
+	}
+
+	/*
+	 * Release the RX buffer now that we have read everything we need from
+	 * it.
+	 */
+	memory_region = NULL;
+	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
+
+	/* Retrieve the remaining fragments. */
+	fragment_offset = fragment_length;
+	while (fragment_offset < total_length) {
+		ret = ffa_mem_frag_rx(handle_, fragment_offset);
+		EXPECT_EQ(ret.func, FFA_MEM_FRAG_TX_32);
+		EXPECT_EQ(ffa_frag_handle(ret), handle_);
+		/* Sender MBZ at virtual instance. */
+		EXPECT_EQ(ffa_frag_sender(ret), 0);
+		fragment_length = ret.arg3;
+		EXPECT_GT(fragment_length, 0);
+		ASSERT_LE(fragment_offset + fragment_length,
+			  memory_region_max_size);
+		if (memory_region_ret != NULL) {
+			memcpy_s((uint8_t *)memory_region_ret + fragment_offset,
+				 memory_region_max_size - fragment_offset,
+				 recv_buf, fragment_length);
+		}
+		fragment_offset += fragment_length;
+		EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
+	}
+	EXPECT_EQ(fragment_offset, total_length);
+
 	return sender;
 }
 
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index 1b3c1d9..303021c 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -26,7 +26,16 @@
 #include "test/vmapi/exception_handler.h"
 #include "test/vmapi/ffa.h"
 
-alignas(PAGE_SIZE) static uint8_t pages[4 * PAGE_SIZE];
+/*
+ * A number of pages that is large enough that it must take two fragments to
+ * share.
+ */
+#define FRAGMENTED_SHARE_PAGE_COUNT \
+	(PAGE_SIZE / sizeof(struct ffa_memory_region_constituent))
+
+alignas(PAGE_SIZE) static uint8_t
+	pages[FRAGMENTED_SHARE_PAGE_COUNT * PAGE_SIZE];
+static uint8_t retrieve_buffer[HF_MAILBOX_SIZE];
 
 /**
  * Helper function to test sending memory in the different configurations.
@@ -72,9 +81,11 @@
 					for (m = 0;
 					     m < ARRAY_SIZE(cacheability);
 					     ++m) {
-						uint32_t msg_size =
+						uint32_t msg_size;
+						EXPECT_EQ(
 							ffa_memory_region_init(
 								mb.send,
+								HF_MAILBOX_SIZE,
 								HF_PRIMARY_VM_ID,
 								vms[i],
 								constituents,
@@ -85,8 +96,10 @@
 									[k],
 								FFA_MEMORY_NORMAL_MEM,
 								cacheability[m],
-								shareability
-									[l]);
+								shareability[l],
+								NULL,
+								&msg_size),
+							0);
 						struct ffa_value ret =
 							send_function(msg_size,
 								      msg_size);
@@ -101,9 +114,11 @@
 					}
 					for (m = 0; m < ARRAY_SIZE(device);
 					     ++m) {
-						uint32_t msg_size =
+						uint32_t msg_size;
+						EXPECT_EQ(
 							ffa_memory_region_init(
 								mb.send,
+								HF_MAILBOX_SIZE,
 								HF_PRIMARY_VM_ID,
 								vms[i],
 								constituents,
@@ -114,8 +129,10 @@
 									[k],
 								FFA_MEMORY_DEVICE_MEM,
 								device[m],
-								shareability
-									[l]);
+								shareability[l],
+								NULL,
+								&msg_size),
+							0);
 						struct ffa_value ret =
 							send_function(msg_size,
 								      msg_size);
@@ -180,12 +197,15 @@
 		if (vms[i] == avoid_vm) {
 			continue;
 		}
-		msg_size = ffa_memory_region_init(
-			mb.send, HF_PRIMARY_VM_ID, vms[i], constituents,
-			constituent_count, 0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
-			FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
-			FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
-			FFA_MEMORY_OUTER_SHAREABLE);
+		EXPECT_EQ(ffa_memory_region_init(
+				  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+				  vms[i], constituents, constituent_count, 0, 0,
+				  FFA_DATA_ACCESS_NOT_SPECIFIED,
+				  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+				  FFA_MEMORY_NORMAL_MEM,
+				  FFA_MEMORY_CACHE_WRITE_BACK,
+				  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+			  0);
 		ret = ffa_mem_donate(msg_size, msg_size);
 		EXPECT_EQ(ret.func, FFA_ERROR_32);
 		EXPECT_TRUE(ret.arg2 == FFA_DENIED ||
@@ -363,6 +383,98 @@
 }
 
 /**
+ * Check that memory can be lent and retrieved with multiple fragments.
+ */
+TEST(memory_sharing, lend_fragmented_relinquish)
+{
+	struct ffa_value run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = pages;
+	uint32_t i;
+	ffa_memory_handle_t handle;
+	struct ffa_memory_region_constituent
+		constituents[FRAGMENTED_SHARE_PAGE_COUNT];
+
+	SERVICE_SELECT(SERVICE_VM1, "ffa_memory_lend_relinquish", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(pages), 'b',
+		 PAGE_SIZE * FRAGMENTED_SHARE_PAGE_COUNT);
+
+	for (i = 0; i < ARRAY_SIZE(constituents); ++i) {
+		constituents[i].address = (uint64_t)pages + i * PAGE_SIZE;
+		constituents[i].page_count = 1;
+		constituents[i].reserved = 0;
+	}
+
+	handle = send_memory_and_retrieve_request(
+		FFA_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
+		constituents, ARRAY_SIZE(constituents), 0, FFA_DATA_ACCESS_RW,
+		FFA_DATA_ACCESS_RW, FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+		FFA_INSTRUCTION_ACCESS_X);
+
+	run_res = ffa_run(SERVICE_VM1, 0);
+
+	/* Let the memory be returned. */
+	EXPECT_EQ(run_res.func, FFA_MSG_SEND_32);
+	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
+	EXPECT_EQ(ffa_mem_reclaim(handle, 0).func, FFA_SUCCESS_32);
+
+	/* Ensure that the secondary VM accessed the region. */
+	for (int i = 0; i < PAGE_SIZE * FRAGMENTED_SHARE_PAGE_COUNT; ++i) {
+		ASSERT_EQ(ptr[i], 'c');
+	}
+
+	run_res = ffa_run(SERVICE_VM1, 0);
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+		  1);
+}
+
+/**
+ * Check that memory can be lent with multiple fragments even though it could
+ * fit in one.
+ */
+TEST(memory_sharing, lend_force_fragmented_relinquish)
+{
+	struct ffa_value run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = pages;
+	ffa_memory_handle_t handle;
+
+	SERVICE_SELECT(SERVICE_VM1, "ffa_memory_lend_relinquish", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+	struct ffa_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)pages, .page_count = 1},
+		{.address = (uint64_t)pages + PAGE_SIZE, .page_count = 2},
+	};
+
+	handle = send_memory_and_retrieve_request_force_fragmented(
+		FFA_MEM_LEND_32, mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
+		constituents, ARRAY_SIZE(constituents), 0, FFA_DATA_ACCESS_RW,
+		FFA_DATA_ACCESS_RW, FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+		FFA_INSTRUCTION_ACCESS_X);
+
+	run_res = ffa_run(SERVICE_VM1, 0);
+
+	/* Let the memory be returned. */
+	EXPECT_EQ(run_res.func, FFA_MSG_SEND_32);
+	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
+	EXPECT_EQ(ffa_mem_reclaim(handle, 0).func, FFA_SUCCESS_32);
+
+	/* Ensure that the secondary VM accessed the region. */
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ASSERT_EQ(ptr[i], 'c');
+	}
+
+	run_res = ffa_run(SERVICE_VM1, 0);
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+		  1);
+}
+
+/**
  * Check that memory that is donated can't be relinquished.
  */
 TEST(memory_sharing, donate_relinquish)
@@ -419,13 +531,13 @@
 
 	/* Let the memory be returned, and retrieve it. */
 	run_res = ffa_run(SERVICE_VM1, 0);
-	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL),
+	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL,
+					       NULL, HF_MAILBOX_SIZE),
 		  SERVICE_VM1);
 
 	for (int i = 0; i < PAGE_SIZE; ++i) {
 		ASSERT_EQ(ptr[i], 'c');
 	}
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 
 	run_res = ffa_run(SERVICE_VM1, 0);
 	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
@@ -566,11 +678,12 @@
 
 	/* Have the memory be given. */
 	run_res = ffa_run(SERVICE_VM1, 0);
-	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL),
+	memory_region = (struct ffa_memory_region *)retrieve_buffer;
+	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL,
+					       memory_region, HF_MAILBOX_SIZE),
 		  SERVICE_VM1);
 
 	/* Check the memory was cleared. */
-	memory_region = (struct ffa_memory_region *)mb.recv;
 	ASSERT_EQ(memory_region->receiver_count, 1);
 	ASSERT_NE(memory_region->receivers[0].composite_memory_region_offset,
 		  0);
@@ -579,7 +692,6 @@
 	for (int i = 0; i < PAGE_SIZE; ++i) {
 		ASSERT_EQ(ptr[i], 0);
 	}
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 
 	run_res = ffa_run(SERVICE_VM1, 0);
 	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
@@ -601,11 +713,12 @@
 
 	/* Have the memory be lent. */
 	run_res = ffa_run(SERVICE_VM1, 0);
-	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL),
+	memory_region = (struct ffa_memory_region *)retrieve_buffer;
+	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL,
+					       memory_region, HF_MAILBOX_SIZE),
 		  SERVICE_VM1);
 
 	/* Check the memory was cleared. */
-	memory_region = (struct ffa_memory_region *)mb.recv;
 	ASSERT_EQ(memory_region->receiver_count, 1);
 	ASSERT_NE(memory_region->receivers[0].composite_memory_region_offset,
 		  0);
@@ -614,7 +727,6 @@
 	for (int i = 0; i < PAGE_SIZE; ++i) {
 		ASSERT_EQ(ptr[i], 0);
 	}
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 
 	run_res = ffa_run(SERVICE_VM1, 0);
 	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
@@ -774,9 +886,9 @@
 	run_res = ffa_run(SERVICE_VM1, 0);
 
 	/* Let the memory be returned. */
-	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL),
+	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL,
+					       NULL, HF_MAILBOX_SIZE),
 		  SERVICE_VM1);
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 
 	/* Share the memory with another VM. */
 	send_memory_and_retrieve_request(
@@ -883,9 +995,9 @@
 
 	/* Let the memory be sent from VM1 to PRIMARY (returned). */
 	run_res = ffa_run(SERVICE_VM1, 0);
-	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL),
+	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL,
+					       NULL, HF_MAILBOX_SIZE),
 		  SERVICE_VM1);
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 
 	/* Check we have access again. */
 	ptr[0] = 'f';
@@ -910,11 +1022,15 @@
 		{.address = (uint64_t)pages, .page_count = 1},
 	};
 
-	msg_size = ffa_memory_region_init(
-		mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, constituents,
-		ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(ffa_memory_region_init(
+			  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+			  HF_PRIMARY_VM_ID, constituents,
+			  ARRAY_SIZE(constituents), 0, 0,
+			  FFA_DATA_ACCESS_NOT_SPECIFIED,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 
 	EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
@@ -935,11 +1051,14 @@
 		{.address = (uint64_t)pages, .page_count = 1},
 	};
 
-	msg_size = ffa_memory_region_init(
-		mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, constituents,
-		ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_RW,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(ffa_memory_region_init(
+			  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+			  HF_PRIMARY_VM_ID, constituents,
+			  ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_RW,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_lend(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
 }
@@ -959,11 +1078,14 @@
 		{.address = (uint64_t)pages, .page_count = 1},
 	};
 
-	msg_size = ffa_memory_region_init(
-		mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, constituents,
-		ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_RW,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(ffa_memory_region_init(
+			  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+			  HF_PRIMARY_VM_ID, constituents,
+			  ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_RW,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_share(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
 }
@@ -988,27 +1110,37 @@
 	};
 
 	/* Try invalid configurations. */
-	msg_size = ffa_memory_region_init(
-		mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents,
-		ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(
+		ffa_memory_region_init(
+			mb.send, HF_MAILBOX_SIZE, SERVICE_VM1, HF_PRIMARY_VM_ID,
+			constituents, ARRAY_SIZE(constituents), 0, 0,
+			FFA_DATA_ACCESS_NOT_SPECIFIED,
+			FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		0);
 	EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
 
-	msg_size = ffa_memory_region_init(
-		mb.send, SERVICE_VM1, SERVICE_VM1, constituents,
-		ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(ffa_memory_region_init(
+			  mb.send, HF_MAILBOX_SIZE, SERVICE_VM1, SERVICE_VM1,
+			  constituents, ARRAY_SIZE(constituents), 0, 0,
+			  FFA_DATA_ACCESS_NOT_SPECIFIED,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
 
-	msg_size = ffa_memory_region_init(
-		mb.send, SERVICE_VM2, SERVICE_VM1, constituents,
-		ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(ffa_memory_region_init(
+			  mb.send, HF_MAILBOX_SIZE, SERVICE_VM2, SERVICE_VM1,
+			  constituents, ARRAY_SIZE(constituents), 0, 0,
+			  FFA_DATA_ACCESS_NOT_SPECIFIED,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
 
@@ -1021,9 +1153,9 @@
 
 	/* Receive and return memory from VM1. */
 	run_res = ffa_run(SERVICE_VM1, 0);
-	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL),
+	EXPECT_EQ(retrieve_memory_from_message(mb.recv, mb.send, run_res, NULL,
+					       NULL, HF_MAILBOX_SIZE),
 		  SERVICE_VM1);
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 
 	/* Use VM1 to fail to donate memory from the primary to VM2. */
 	run_res = ffa_run(SERVICE_VM1, 0);
@@ -1052,24 +1184,33 @@
 				{.address = (uint64_t)pages + PAGE_SIZE + j,
 				 .page_count = 1},
 			};
-			uint32_t msg_size = ffa_memory_region_init(
-				mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
-				constituents, ARRAY_SIZE(constituents), 0, 0,
-				FFA_DATA_ACCESS_NOT_SPECIFIED,
-				FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
-				FFA_MEMORY_NORMAL_MEM,
-				FFA_MEMORY_CACHE_WRITE_BACK,
-				FFA_MEMORY_OUTER_SHAREABLE);
+			uint32_t msg_size;
+			EXPECT_EQ(
+				ffa_memory_region_init(
+					mb.send, HF_MAILBOX_SIZE,
+					HF_PRIMARY_VM_ID, SERVICE_VM1,
+					constituents, ARRAY_SIZE(constituents),
+					0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
+					FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+					FFA_MEMORY_NORMAL_MEM,
+					FFA_MEMORY_CACHE_WRITE_BACK,
+					FFA_MEMORY_OUTER_SHAREABLE, NULL,
+					&msg_size),
+				0);
 			EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size),
 					 FFA_INVALID_PARAMETERS);
-			msg_size = ffa_memory_region_init(
-				mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
-				constituents, ARRAY_SIZE(constituents), 0, 0,
-				FFA_DATA_ACCESS_RW,
-				FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
-				FFA_MEMORY_NORMAL_MEM,
-				FFA_MEMORY_CACHE_WRITE_BACK,
-				FFA_MEMORY_OUTER_SHAREABLE);
+			EXPECT_EQ(
+				ffa_memory_region_init(
+					mb.send, HF_MAILBOX_SIZE,
+					HF_PRIMARY_VM_ID, SERVICE_VM1,
+					constituents, ARRAY_SIZE(constituents),
+					0, 0, FFA_DATA_ACCESS_RW,
+					FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+					FFA_MEMORY_NORMAL_MEM,
+					FFA_MEMORY_CACHE_WRITE_BACK,
+					FFA_MEMORY_OUTER_SHAREABLE, NULL,
+					&msg_size),
+				0);
 			EXPECT_FFA_ERROR(ffa_mem_lend(msg_size, msg_size),
 					 FFA_INVALID_PARAMETERS);
 		}
@@ -1096,11 +1237,14 @@
 	};
 
 	/* Check cannot swap VM IDs. */
-	msg_size = ffa_memory_region_init(
-		mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents,
-		ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_RW,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(ffa_memory_region_init(
+			  mb.send, HF_MAILBOX_SIZE, SERVICE_VM1,
+			  HF_PRIMARY_VM_ID, constituents,
+			  ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_RW,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_lend(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
 
@@ -1471,23 +1615,29 @@
 	constituents[0].page_count = 1;
 	for (int i = 1; i < PAGE_SIZE * 2; i++) {
 		constituents[0].address = (uint64_t)pages + PAGE_SIZE;
-		msg_size = ffa_memory_region_init(
-			mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
-			ARRAY_SIZE(constituents), 0, 0,
-			FFA_DATA_ACCESS_NOT_SPECIFIED,
-			FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
-			FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
-			FFA_MEMORY_OUTER_SHAREABLE);
+		EXPECT_EQ(ffa_memory_region_init(
+				  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+				  SERVICE_VM2, constituents,
+				  ARRAY_SIZE(constituents), 0, 0,
+				  FFA_DATA_ACCESS_NOT_SPECIFIED,
+				  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+				  FFA_MEMORY_NORMAL_MEM,
+				  FFA_MEMORY_CACHE_WRITE_BACK,
+				  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+			  0);
 		EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size),
 				 FFA_DENIED);
 	}
 
 	/* Ensure we can't donate to the only borrower. */
-	msg_size = ffa_memory_region_init(
-		mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
-		ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(ffa_memory_region_init(
+			  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+			  SERVICE_VM1, constituents, ARRAY_SIZE(constituents),
+			  0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size), FFA_DENIED);
 }
 
@@ -1530,23 +1680,29 @@
 	constituents[0].page_count = 1;
 	for (int i = 1; i < PAGE_SIZE * 2; i++) {
 		constituents[0].address = (uint64_t)pages + PAGE_SIZE;
-		msg_size = ffa_memory_region_init(
-			mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
-			ARRAY_SIZE(constituents), 0, 0,
-			FFA_DATA_ACCESS_NOT_SPECIFIED,
-			FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
-			FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
-			FFA_MEMORY_OUTER_SHAREABLE);
+		EXPECT_EQ(ffa_memory_region_init(
+				  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+				  SERVICE_VM2, constituents,
+				  ARRAY_SIZE(constituents), 0, 0,
+				  FFA_DATA_ACCESS_NOT_SPECIFIED,
+				  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+				  FFA_MEMORY_NORMAL_MEM,
+				  FFA_MEMORY_CACHE_WRITE_BACK,
+				  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+			  0);
 		EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size),
 				 FFA_DENIED);
 	}
 
 	/* Ensure we can't donate to the only borrower. */
-	msg_size = ffa_memory_region_init(
-		mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
-		ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(ffa_memory_region_init(
+			  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+			  SERVICE_VM1, constituents, ARRAY_SIZE(constituents),
+			  0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size), FFA_DENIED);
 }
 
@@ -1609,12 +1765,16 @@
 	constituents[0].page_count = 1;
 	for (int i = 0; i < 2; i++) {
 		constituents[0].address = (uint64_t)pages + i * PAGE_SIZE;
-		msg_size = ffa_memory_region_init(
-			mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
-			ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_RO,
-			FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
-			FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
-			FFA_MEMORY_OUTER_SHAREABLE);
+		EXPECT_EQ(ffa_memory_region_init(
+				  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+				  SERVICE_VM2, constituents,
+				  ARRAY_SIZE(constituents), 0, 0,
+				  FFA_DATA_ACCESS_RO,
+				  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+				  FFA_MEMORY_NORMAL_MEM,
+				  FFA_MEMORY_CACHE_WRITE_BACK,
+				  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+			  0);
 		EXPECT_FFA_ERROR(ffa_mem_lend(msg_size, msg_size), FFA_DENIED);
 	}
 }
@@ -1668,12 +1828,16 @@
 	constituents[0].page_count = 1;
 	for (int i = 0; i < 2; i++) {
 		constituents[0].address = (uint64_t)pages + i * PAGE_SIZE;
-		msg_size = ffa_memory_region_init(
-			mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
-			ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_RO,
-			FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
-			FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
-			FFA_MEMORY_OUTER_SHAREABLE);
+		EXPECT_EQ(ffa_memory_region_init(
+				  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+				  SERVICE_VM2, constituents,
+				  ARRAY_SIZE(constituents), 0, 0,
+				  FFA_DATA_ACCESS_RO,
+				  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+				  FFA_MEMORY_NORMAL_MEM,
+				  FFA_MEMORY_CACHE_WRITE_BACK,
+				  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+			  0);
 		EXPECT_FFA_ERROR(ffa_mem_share(msg_size, msg_size), FFA_DENIED);
 	}
 }
@@ -1732,12 +1896,14 @@
 		{.address = (uint64_t)pages, .page_count = 2},
 	};
 
-	msg_size = ffa_memory_region_init(
-		mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
-		ARRAY_SIZE(constituents), 0, FFA_MEMORY_REGION_FLAG_CLEAR,
-		FFA_DATA_ACCESS_RO, FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
-		FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
-		FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(ffa_memory_region_init(
+			  mb.send, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+			  SERVICE_VM1, constituents, ARRAY_SIZE(constituents),
+			  0, FFA_MEMORY_REGION_FLAG_CLEAR, FFA_DATA_ACCESS_RO,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_share(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
 
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index 2c598d0..02c686c 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -28,6 +28,7 @@
 #include "test/vmapi/ffa.h"
 
 alignas(PAGE_SIZE) static uint8_t page[PAGE_SIZE];
+static uint8_t retrieve_buffer[PAGE_SIZE * 2];
 
 TEST_SERVICE(memory_increment)
 {
@@ -38,10 +39,11 @@
 		void *send_buf = SERVICE_SEND_BUFFER();
 
 		struct ffa_value ret = ffa_msg_wait();
-		ffa_vm_id_t sender = retrieve_memory_from_message(
-			recv_buf, send_buf, ret, NULL);
 		struct ffa_memory_region *memory_region =
-			(struct ffa_memory_region *)recv_buf;
+			(struct ffa_memory_region *)retrieve_buffer;
+		ffa_vm_id_t sender = retrieve_memory_from_message(
+			recv_buf, send_buf, ret, NULL, memory_region,
+			HF_MAILBOX_SIZE);
 		struct ffa_composite_memory_region *composite =
 			ffa_memory_region_get_composite(memory_region, 0);
 		uint8_t *ptr = (uint8_t *)composite->constituents[0].address;
@@ -60,7 +62,6 @@
 		}
 
 		/* Signal completion and reset. */
-		EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 		ffa_msg_send(hf_vm_get_id(), sender, sizeof(ptr), 0);
 	}
 }
@@ -121,10 +122,10 @@
 
 	exception_setup(NULL, exception_handler_yield_data_abort);
 
-	ffa_vm_id_t sender =
-		retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
 	struct ffa_memory_region *memory_region =
-		(struct ffa_memory_region *)recv_buf;
+		(struct ffa_memory_region *)retrieve_buffer;
+	ffa_vm_id_t sender = retrieve_memory_from_message(
+		recv_buf, send_buf, ret, NULL, memory_region, HF_MAILBOX_SIZE);
 	struct ffa_composite_memory_region *composite =
 		ffa_memory_region_get_composite(memory_region, 0);
 
@@ -141,7 +142,6 @@
 		composite->constituents, composite->constituent_count, 0,
 		FFA_DATA_ACCESS_NOT_SPECIFIED, FFA_DATA_ACCESS_RW,
 		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_INSTRUCTION_ACCESS_X);
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 
 	/*
 	 * Try and access the memory which will cause a fault unless the memory
@@ -168,14 +168,14 @@
 
 	exception_setup(NULL, exception_handler_yield_data_abort);
 
-	retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
-	memory_region = (struct ffa_memory_region *)recv_buf;
+	memory_region = (struct ffa_memory_region *)retrieve_buffer;
+	retrieve_memory_from_message(recv_buf, send_buf, ret, NULL,
+				     memory_region, HF_MAILBOX_SIZE);
 	composite = ffa_memory_region_get_composite(memory_region, 0);
 
 	/* Choose which constituent we want to test. */
 	index = *(uint8_t *)composite->constituents[0].address;
 	ptr = (uint8_t *)composite->constituents[index].address;
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 
 	/*
 	 * Check that we can't access out of bounds after the region sent to us.
@@ -202,14 +202,14 @@
 
 	exception_setup(NULL, exception_handler_yield_data_abort);
 
-	retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
-	memory_region = (struct ffa_memory_region *)recv_buf;
+	memory_region = (struct ffa_memory_region *)retrieve_buffer;
+	retrieve_memory_from_message(recv_buf, send_buf, ret, NULL,
+				     memory_region, HF_MAILBOX_SIZE);
 	composite = ffa_memory_region_get_composite(memory_region, 0);
 
 	/* Choose which constituent we want to test. */
 	index = *(uint8_t *)composite->constituents[0].address;
 	ptr = (uint8_t *)composite->constituents[index].address;
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 
 	/*
 	 * Check that we can't access out of bounds before the region sent to
@@ -230,10 +230,10 @@
 	void *send_buf = SERVICE_SEND_BUFFER();
 
 	struct ffa_value ret = ffa_msg_wait();
-	ffa_vm_id_t sender =
-		retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
 	struct ffa_memory_region *memory_region =
-		(struct ffa_memory_region *)recv_buf;
+		(struct ffa_memory_region *)retrieve_buffer;
+	ffa_vm_id_t sender = retrieve_memory_from_message(
+		recv_buf, send_buf, ret, NULL, memory_region, HF_MAILBOX_SIZE);
 	struct ffa_composite_memory_region *composite =
 		ffa_memory_region_get_composite(memory_region, 0);
 
@@ -248,7 +248,6 @@
 		composite->constituents, composite->constituent_count, 0,
 		FFA_DATA_ACCESS_NOT_SPECIFIED, FFA_DATA_ACCESS_RW,
 		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_INSTRUCTION_ACCESS_X);
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 
 	/* Ensure that we are unable to modify memory any more. */
 	ptr[0] = 'c';
@@ -266,17 +265,15 @@
 	void *send_buf = SERVICE_SEND_BUFFER();
 
 	struct ffa_value ret = ffa_msg_wait();
-	ffa_vm_id_t sender =
-		retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
 	struct ffa_memory_region *memory_region =
-		(struct ffa_memory_region *)recv_buf;
+		(struct ffa_memory_region *)retrieve_buffer;
+	ffa_vm_id_t sender = retrieve_memory_from_message(
+		recv_buf, send_buf, ret, NULL, memory_region, HF_MAILBOX_SIZE);
 	struct ffa_composite_memory_region *composite =
 		ffa_memory_region_get_composite(memory_region, 0);
 	struct ffa_memory_region_constituent constituent =
 		composite->constituents[0];
 
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
-
 	/* Yield to allow attempt to re donate from primary. */
 	ffa_yield();
 
@@ -288,11 +285,14 @@
 		FFA_INSTRUCTION_ACCESS_X);
 
 	/* Attempt to donate the memory to another VM. */
-	msg_size = ffa_memory_region_init(
-		send_buf, hf_vm_get_id(), SERVICE_VM2, &constituent, 1, 0, 0,
-		FFA_DATA_ACCESS_NOT_SPECIFIED,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(
+		ffa_memory_region_init(
+			send_buf, HF_MAILBOX_SIZE, hf_vm_get_id(), SERVICE_VM2,
+			&constituent, 1, 0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
+			FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		0);
 	EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size), FFA_DENIED);
 
 	ffa_yield();
@@ -309,16 +309,16 @@
 
 	for (;;) {
 		struct ffa_value ret = ffa_msg_wait();
-		struct ffa_memory_region *memory_region;
+		struct ffa_memory_region *memory_region =
+			(struct ffa_memory_region *)retrieve_buffer;
 		struct ffa_composite_memory_region *composite;
 		uint8_t *ptr;
 
-		retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
-		memory_region = (struct ffa_memory_region *)recv_buf;
+		retrieve_memory_from_message(recv_buf, send_buf, ret, NULL,
+					     memory_region, HF_MAILBOX_SIZE);
 		composite = ffa_memory_region_get_composite(memory_region, 0);
 		ptr = (uint8_t *)composite->constituents[0].address;
 
-		EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 		ptr[0] = 'd';
 		ffa_yield();
 
@@ -338,10 +338,10 @@
 	void *send_buf = SERVICE_SEND_BUFFER();
 
 	struct ffa_value ret = ffa_msg_wait();
-	ffa_vm_id_t sender =
-		retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
 	struct ffa_memory_region *memory_region =
-		(struct ffa_memory_region *)recv_buf;
+		(struct ffa_memory_region *)retrieve_buffer;
+	ffa_vm_id_t sender = retrieve_memory_from_message(
+		recv_buf, send_buf, ret, NULL, memory_region, HF_MAILBOX_SIZE);
 	struct ffa_composite_memory_region *composite =
 		ffa_memory_region_get_composite(memory_region, 0);
 
@@ -353,13 +353,15 @@
 		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_INSTRUCTION_ACCESS_X);
 
 	/* Fail to donate the memory from the primary to VM2. */
-	msg_size = ffa_memory_region_init(
-		send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
-		composite->constituents, composite->constituent_count, 0, 0,
-		FFA_DATA_ACCESS_NOT_SPECIFIED,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
+	EXPECT_EQ(ffa_memory_region_init(
+			  send_buf, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+			  SERVICE_VM2, composite->constituents,
+			  composite->constituent_count, 0, 0,
+			  FFA_DATA_ACCESS_NOT_SPECIFIED,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_donate(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
 	ffa_yield();
@@ -371,10 +373,6 @@
 
 	/* Loop, giving memory back to the sender. */
 	for (;;) {
-		uint8_t *ptr;
-		uint8_t *ptr2;
-		uint32_t count;
-		uint32_t count2;
 		size_t i;
 		ffa_memory_handle_t handle;
 
@@ -382,31 +380,32 @@
 		void *send_buf = SERVICE_SEND_BUFFER();
 
 		struct ffa_value ret = ffa_msg_wait();
-		ffa_vm_id_t sender = retrieve_memory_from_message(
-			recv_buf, send_buf, ret, &handle);
 		struct ffa_memory_region *memory_region =
-			(struct ffa_memory_region *)recv_buf;
+			(struct ffa_memory_region *)retrieve_buffer;
+		ffa_vm_id_t sender = retrieve_memory_from_message(
+			recv_buf, send_buf, ret, &handle, memory_region,
+			sizeof(retrieve_buffer));
 		struct ffa_composite_memory_region *composite =
 			ffa_memory_region_get_composite(memory_region, 0);
-		struct ffa_memory_region_constituent *constituents =
-			composite->constituents;
+		struct ffa_memory_region_constituent *constituents;
+		uint8_t *first_ptr;
 
 		/* ASSERT_TRUE isn't enough for clang-analyze. */
 		CHECK(composite != NULL);
+		constituents = composite->constituents;
+		first_ptr = (uint8_t *)constituents[0].address;
 
-		ptr = (uint8_t *)constituents[0].address;
-		count = constituents[0].page_count;
-		ptr2 = (uint8_t *)constituents[1].address;
-		count2 = constituents[1].page_count;
-		/* Relevant information read, mailbox can be cleared. */
-		EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
+		/*
+		 * Check that we can read and write every page that was shared.
+		 */
+		for (i = 0; i < composite->constituent_count; ++i) {
+			uint8_t *ptr = (uint8_t *)constituents[i].address;
+			uint32_t count = constituents[i].page_count;
+			size_t j;
 
-		/* Check that one has access to the shared region. */
-		for (i = 0; i < PAGE_SIZE * count; ++i) {
-			ptr[i]++;
-		}
-		for (i = 0; i < PAGE_SIZE * count2; ++i) {
-			ptr2[i]++;
+			for (j = 0; j < PAGE_SIZE * count; ++j) {
+				ptr[j]++;
+			}
 		}
 
 		/* Give the memory back and notify the sender. */
@@ -416,10 +415,10 @@
 			  FFA_SUCCESS_32);
 
 		/*
-		 * Try and access the memory which will cause a fault unless the
+		 * Try to access the memory, which will cause a fault unless the
 		 * memory has been shared back again.
 		 */
-		ptr[0] = 123;
+		first_ptr[0] = 123;
 	}
 }
 
@@ -439,8 +438,9 @@
 		void *send_buf = SERVICE_SEND_BUFFER();
 		struct ffa_value ret = ffa_msg_wait();
 
-		retrieve_memory_from_message(recv_buf, send_buf, ret, &handle);
-		memory_region = (struct ffa_memory_region *)recv_buf;
+		memory_region = (struct ffa_memory_region *)retrieve_buffer;
+		retrieve_memory_from_message(recv_buf, send_buf, ret, &handle,
+					     memory_region, HF_MAILBOX_SIZE);
 		composite = ffa_memory_region_get_composite(memory_region, 0);
 
 		ptr = (uint8_t *)composite->constituents[0].address;
@@ -455,7 +455,6 @@
 		 * it was donated not lent.
 		 */
 		ffa_mem_relinquish_init(send_buf, handle, 0, hf_vm_get_id());
-		EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
 		EXPECT_FFA_ERROR(ffa_mem_relinquish(), FFA_INVALID_PARAMETERS);
 
 		/* Ensure we still have access to the memory. */
@@ -482,13 +481,8 @@
 
 		struct ffa_value ret = ffa_msg_wait();
 		ffa_vm_id_t sender = retrieve_memory_from_message(
-			recv_buf, send_buf, ret, &handle);
-
-		/*
-		 * Mailbox can be cleared, we don't actually care what the
-		 * memory region is.
-		 */
-		EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
+			recv_buf, send_buf, ret, &handle, NULL,
+			HF_MAILBOX_SIZE);
 
 		/* Trying to relinquish the memory and clear it should fail. */
 		ffa_mem_relinquish_init(send_buf, handle,
@@ -515,10 +509,11 @@
 	void *recv_buf = SERVICE_RECV_BUFFER();
 	void *send_buf = SERVICE_SEND_BUFFER();
 	struct ffa_value ret = ffa_msg_wait();
-	ffa_vm_id_t sender =
-		retrieve_memory_from_message(recv_buf, send_buf, ret, &handle);
 	struct ffa_memory_region *memory_region =
-		(struct ffa_memory_region *)recv_buf;
+		(struct ffa_memory_region *)retrieve_buffer;
+	ffa_vm_id_t sender =
+		retrieve_memory_from_message(recv_buf, send_buf, ret, &handle,
+					     memory_region, HF_MAILBOX_SIZE);
 	struct ffa_composite_memory_region *composite =
 		ffa_memory_region_get_composite(memory_region, 0);
 
@@ -529,23 +524,26 @@
 		  FFA_SUCCESS_32);
 
 	/* Ensure we cannot lend from the primary to another secondary. */
-	msg_size = ffa_memory_region_init(
-		send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
-		composite->constituents, composite->constituent_count, 0, 0,
-		FFA_DATA_ACCESS_RW, FFA_INSTRUCTION_ACCESS_X,
-		FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
-		FFA_MEMORY_OUTER_SHAREABLE);
+	EXPECT_EQ(ffa_memory_region_init(
+			  send_buf, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+			  SERVICE_VM2, composite->constituents,
+			  composite->constituent_count, 0, 0,
+			  FFA_DATA_ACCESS_RW, FFA_INSTRUCTION_ACCESS_X,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_lend(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
 
 	/* Ensure we cannot share from the primary to another secondary. */
-	msg_size = ffa_memory_region_init(
-		send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
-		composite->constituents, composite->constituent_count, 0, 0,
-		FFA_DATA_ACCESS_RW, FFA_INSTRUCTION_ACCESS_X,
-		FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
-		FFA_MEMORY_OUTER_SHAREABLE);
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
+	EXPECT_EQ(ffa_memory_region_init(
+			  send_buf, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+			  SERVICE_VM2, composite->constituents,
+			  composite->constituent_count, 0, 0,
+			  FFA_DATA_ACCESS_RW, FFA_INSTRUCTION_ACCESS_X,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	EXPECT_FFA_ERROR(ffa_mem_share(msg_size, msg_size),
 			 FFA_INVALID_PARAMETERS);
 
@@ -564,10 +562,11 @@
 		void *recv_buf = SERVICE_RECV_BUFFER();
 		void *send_buf = SERVICE_SEND_BUFFER();
 		struct ffa_value ret = ffa_msg_wait();
-		ffa_vm_id_t sender = retrieve_memory_from_message(
-			recv_buf, send_buf, ret, &handle);
 		struct ffa_memory_region *memory_region =
-			(struct ffa_memory_region *)recv_buf;
+			(struct ffa_memory_region *)retrieve_buffer;
+		ffa_vm_id_t sender = retrieve_memory_from_message(
+			recv_buf, send_buf, ret, &handle, memory_region,
+			HF_MAILBOX_SIZE);
 		struct ffa_composite_memory_region *composite =
 			ffa_memory_region_get_composite(memory_region, 0);
 		struct ffa_memory_region_constituent *constituents;
@@ -579,8 +578,6 @@
 		constituents = composite->constituents;
 		ptr = (uint64_t *)constituents[0].address;
 
-		EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
-
 		/*
 		 * Verify that the instruction in memory is the encoded RET
 		 * instruction.
@@ -630,17 +627,16 @@
 		void *recv_buf = SERVICE_RECV_BUFFER();
 		void *send_buf = SERVICE_SEND_BUFFER();
 		struct ffa_value ret = ffa_msg_wait();
-		ffa_vm_id_t sender = retrieve_memory_from_message(
-			recv_buf, send_buf, ret, &handle);
 		struct ffa_memory_region *memory_region =
-			(struct ffa_memory_region *)recv_buf;
+			(struct ffa_memory_region *)retrieve_buffer;
+		ffa_vm_id_t sender = retrieve_memory_from_message(
+			recv_buf, send_buf, ret, &handle, memory_region,
+			HF_MAILBOX_SIZE);
 		struct ffa_composite_memory_region *composite =
 			ffa_memory_region_get_composite(memory_region, 0);
 		struct ffa_memory_region_constituent constituent_copy =
 			composite->constituents[0];
 
-		EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
-
 		ptr = (uint8_t *)constituent_copy.address;
 
 		/* Check that we have read access. */
@@ -677,13 +673,12 @@
 	struct ffa_composite_memory_region *composite;
 	struct ffa_memory_region_constituent constituent_copy;
 
-	retrieve_memory_from_message(recv_buf, send_buf, ret, NULL);
-	memory_region = (struct ffa_memory_region *)recv_buf;
+	memory_region = (struct ffa_memory_region *)retrieve_buffer;
+	retrieve_memory_from_message(recv_buf, send_buf, ret, NULL,
+				     memory_region, HF_MAILBOX_SIZE);
 	composite = ffa_memory_region_get_composite(memory_region, 0);
 	constituent_copy = composite->constituents[0];
 
-	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
-
 	ptr = (uint8_t *)constituent_copy.address;
 
 	/* Check that we have read access. */
@@ -700,20 +695,24 @@
 		constituent_copy.address = (uint64_t)ptr + i;
 
 		/* Fail to lend or share the memory from the primary. */
-		msg_size = ffa_memory_region_init(
-			send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
-			&constituent_copy, 1, 0, 0, FFA_DATA_ACCESS_RW,
-			FFA_INSTRUCTION_ACCESS_X, FFA_MEMORY_NORMAL_MEM,
-			FFA_MEMORY_CACHE_WRITE_BACK,
-			FFA_MEMORY_OUTER_SHAREABLE);
+		EXPECT_EQ(ffa_memory_region_init(
+				  send_buf, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+				  SERVICE_VM2, &constituent_copy, 1, 0, 0,
+				  FFA_DATA_ACCESS_RW, FFA_INSTRUCTION_ACCESS_X,
+				  FFA_MEMORY_NORMAL_MEM,
+				  FFA_MEMORY_CACHE_WRITE_BACK,
+				  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+			  0);
 		EXPECT_FFA_ERROR(ffa_mem_lend(msg_size, msg_size),
 				 FFA_INVALID_PARAMETERS);
-		msg_size = ffa_memory_region_init(
-			send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
-			&constituent_copy, 1, 0, 0, FFA_DATA_ACCESS_RW,
-			FFA_INSTRUCTION_ACCESS_X, FFA_MEMORY_NORMAL_MEM,
-			FFA_MEMORY_CACHE_WRITE_BACK,
-			FFA_MEMORY_OUTER_SHAREABLE);
+		EXPECT_EQ(ffa_memory_region_init(
+				  send_buf, HF_MAILBOX_SIZE, HF_PRIMARY_VM_ID,
+				  SERVICE_VM2, &constituent_copy, 1, 0, 0,
+				  FFA_DATA_ACCESS_RW, FFA_INSTRUCTION_ACCESS_X,
+				  FFA_MEMORY_NORMAL_MEM,
+				  FFA_MEMORY_CACHE_WRITE_BACK,
+				  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+			  0);
 		EXPECT_FFA_ERROR(ffa_mem_share(msg_size, msg_size),
 				 FFA_INVALID_PARAMETERS);
 	}
diff --git a/test/vmapi/primary_with_secondaries/services/unmapped.c b/test/vmapi/primary_with_secondaries/services/unmapped.c
index ec85310..35a2444 100644
--- a/test/vmapi/primary_with_secondaries/services/unmapped.c
+++ b/test/vmapi/primary_with_secondaries/services/unmapped.c
@@ -43,11 +43,16 @@
 	struct ffa_memory_region_constituent constituents[] = {
 		{.address = (uint64_t)(&pages[PAGE_SIZE]), .page_count = 1},
 	};
-	uint32_t msg_size = ffa_memory_region_init(
-		send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID, constituents,
-		ARRAY_SIZE(constituents), 0, 0, FFA_DATA_ACCESS_NOT_SPECIFIED,
-		FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED, FFA_MEMORY_NORMAL_MEM,
-		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE);
+	uint32_t msg_size;
+	EXPECT_EQ(ffa_memory_region_init(
+			  send_buf, HF_MAILBOX_SIZE, hf_vm_get_id(),
+			  HF_PRIMARY_VM_ID, constituents,
+			  ARRAY_SIZE(constituents), 0, 0,
+			  FFA_DATA_ACCESS_NOT_SPECIFIED,
+			  FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+			  FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+			  FFA_MEMORY_OUTER_SHAREABLE, NULL, &msg_size),
+		  0);
 	exception_setup(NULL, exception_handler_yield_data_abort);
 
 	EXPECT_EQ(ffa_mem_donate(msg_size, msg_size).func, FFA_SUCCESS_32);
diff --git a/vmlib/ffa.c b/vmlib/ffa.c
index c32b2b6..2040171 100644
--- a/vmlib/ffa.c
+++ b/vmlib/ffa.c
@@ -29,24 +29,15 @@
 #endif
 
 /**
- * Initialises the given `ffa_memory_region` and copies the constituent
- * information to it. Returns the length in bytes occupied by the data copied to
- * `memory_region` (attributes, constituents and memory region header size).
+ * Initialises the header of the given `ffa_memory_region`, not including the
+ * composite memory region offset.
  */
-static uint32_t ffa_memory_region_init_internal(
+static void ffa_memory_region_init_header(
 	struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
 	ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
 	ffa_memory_handle_t handle, uint32_t tag, ffa_vm_id_t receiver,
-	ffa_memory_access_permissions_t permissions,
-	const struct ffa_memory_region_constituent constituents[],
-	uint32_t constituent_count)
+	ffa_memory_access_permissions_t permissions)
 {
-	struct ffa_composite_memory_region *composite_memory_region;
-	uint32_t index;
-	uint32_t constituents_length =
-		constituent_count *
-		sizeof(struct ffa_memory_region_constituent);
-
 	memory_region->sender = sender;
 	memory_region->attributes = attributes;
 	memory_region->reserved_0 = 0;
@@ -59,6 +50,48 @@
 	memory_region->receivers[0].receiver_permissions.permissions =
 		permissions;
 	memory_region->receivers[0].receiver_permissions.flags = 0;
+	memory_region->receivers[0].reserved_0 = 0;
+}
+
+/**
+ * Initialises the given `ffa_memory_region` and copies as many as possible of
+ * the given constituents to it.
+ *
+ * Returns the number of constituents remaining which wouldn't fit, and (via
+ * return parameters) the size in bytes of the first fragment of data copied to
+ * `memory_region` (attributes, constituents and memory region header size), and
+ * the total size of the memory sharing message including all constituents.
+ */
+uint32_t ffa_memory_region_init(
+	struct ffa_memory_region *memory_region, size_t memory_region_max_size,
+	ffa_vm_id_t sender, ffa_vm_id_t receiver,
+	const struct ffa_memory_region_constituent constituents[],
+	uint32_t constituent_count, uint32_t tag,
+	ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
+	enum ffa_instruction_access instruction_access,
+	enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
+	enum ffa_memory_shareability shareability, uint32_t *total_length,
+	uint32_t *fragment_length)
+{
+	ffa_memory_access_permissions_t permissions = 0;
+	ffa_memory_attributes_t attributes = 0;
+	struct ffa_composite_memory_region *composite_memory_region;
+	uint32_t fragment_max_constituents;
+	uint32_t count_to_copy;
+	uint32_t i;
+	uint32_t constituents_offset;
+
+	/* Set memory region's permissions. */
+	ffa_set_data_access_attr(&permissions, data_access);
+	ffa_set_instruction_access_attr(&permissions, instruction_access);
+
+	/* Set memory region's page attributes. */
+	ffa_set_memory_type_attr(&attributes, type);
+	ffa_set_memory_cacheability_attr(&attributes, cacheability);
+	ffa_set_memory_shareability_attr(&attributes, shareability);
+
+	ffa_memory_region_init_header(memory_region, sender, attributes, flags,
+				      0, tag, receiver, permissions);
 	/*
 	 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
 	 * ffa_memory_access)` must both be multiples of 16 (as verified by the
@@ -70,63 +103,56 @@
 		sizeof(struct ffa_memory_region) +
 		memory_region->receiver_count *
 			sizeof(struct ffa_memory_access);
-	memory_region->receivers[0].reserved_0 = 0;
 
 	composite_memory_region =
 		ffa_memory_region_get_composite(memory_region, 0);
-
 	composite_memory_region->page_count = 0;
 	composite_memory_region->constituent_count = constituent_count;
 	composite_memory_region->reserved_0 = 0;
 
-	for (index = 0; index < constituent_count; index++) {
-		composite_memory_region->constituents[index] =
-			constituents[index];
-		composite_memory_region->page_count +=
-			constituents[index].page_count;
+	constituents_offset =
+		memory_region->receivers[0].composite_memory_region_offset +
+		sizeof(struct ffa_composite_memory_region);
+	fragment_max_constituents =
+		(memory_region_max_size - constituents_offset) /
+		sizeof(struct ffa_memory_region_constituent);
+
+	count_to_copy = constituent_count;
+	if (count_to_copy > fragment_max_constituents) {
+		count_to_copy = fragment_max_constituents;
 	}
 
-	/*
-	 * TODO: Add assert ensuring that the specified message
-	 * length is not greater than FFA_MSG_PAYLOAD_MAX.
-	 */
+	for (i = 0; i < constituent_count; ++i) {
+		if (i < count_to_copy) {
+			composite_memory_region->constituents[i] =
+				constituents[i];
+		}
+		composite_memory_region->page_count +=
+			constituents[i].page_count;
+	}
 
-	return memory_region->receivers[0].composite_memory_region_offset +
-	       sizeof(struct ffa_composite_memory_region) + constituents_length;
+	if (total_length != NULL) {
+		*total_length =
+			constituents_offset +
+			composite_memory_region->constituent_count *
+				sizeof(struct ffa_memory_region_constituent);
+	}
+	if (fragment_length != NULL) {
+		*fragment_length =
+			constituents_offset +
+			count_to_copy *
+				sizeof(struct ffa_memory_region_constituent);
+	}
+
+	return composite_memory_region->constituent_count - count_to_copy;
 }
 
 /**
- * Initialises the given `ffa_memory_region` and copies the constituent
- * information to it. Returns the length in bytes occupied by the data copied to
- * `memory_region` (attributes, constituents and memory region header size).
+ * Initialises the given `ffa_memory_region` to be used for an
+ * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
+ *
+ * Returns the size of the message written.
  */
-uint32_t ffa_memory_region_init(
-	struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
-	ffa_vm_id_t receiver,
-	const struct ffa_memory_region_constituent constituents[],
-	uint32_t constituent_count, uint32_t tag,
-	ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
-	enum ffa_instruction_access instruction_access,
-	enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
-	enum ffa_memory_shareability shareability)
-{
-	ffa_memory_access_permissions_t permissions = 0;
-	ffa_memory_attributes_t attributes = 0;
-
-	/* Set memory region's permissions. */
-	ffa_set_data_access_attr(&permissions, data_access);
-	ffa_set_instruction_access_attr(&permissions, instruction_access);
-
-	/* Set memory region's page attributes. */
-	ffa_set_memory_type_attr(&attributes, type);
-	ffa_set_memory_cacheability_attr(&attributes, cacheability);
-	ffa_set_memory_shareability_attr(&attributes, shareability);
-
-	return ffa_memory_region_init_internal(
-		memory_region, sender, attributes, flags, 0, tag, receiver,
-		permissions, constituents, constituent_count);
-}
-
 uint32_t ffa_memory_retrieve_request_init(
 	struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
 	ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
@@ -147,18 +173,8 @@
 	ffa_set_memory_cacheability_attr(&attributes, cacheability);
 	ffa_set_memory_shareability_attr(&attributes, shareability);
 
-	memory_region->sender = sender;
-	memory_region->attributes = attributes;
-	memory_region->reserved_0 = 0;
-	memory_region->flags = flags;
-	memory_region->reserved_1 = 0;
-	memory_region->handle = handle;
-	memory_region->tag = tag;
-	memory_region->receiver_count = 1;
-	memory_region->receivers[0].receiver_permissions.receiver = receiver;
-	memory_region->receivers[0].receiver_permissions.permissions =
-		permissions;
-	memory_region->receivers[0].receiver_permissions.flags = 0;
+	ffa_memory_region_init_header(memory_region, sender, attributes, flags,
+				      handle, tag, receiver, permissions);
 	/*
 	 * Offset 0 in this case means that the hypervisor should allocate the
 	 * address ranges. This is the only configuration supported by Hafnium,
@@ -171,6 +187,12 @@
 	       memory_region->receiver_count * sizeof(struct ffa_memory_access);
 }
 
+/**
+ * Initialises the given `ffa_memory_region` to be used for an
+ * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
+ *
+ * Returns the size of the message written.
+ */
 uint32_t ffa_memory_lender_retrieve_request_init(
 	struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
 	ffa_vm_id_t sender)
@@ -187,16 +209,100 @@
 	return sizeof(struct ffa_memory_region);
 }
 
-uint32_t ffa_retrieved_memory_region_init(
+/**
+ * Initialises the given `ffa_memory_region` to be used for an
+ * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
+ * fragment.
+ *
+ * Returns true on success, or false if the given constituents won't all fit in
+ * the first fragment.
+ */
+bool ffa_retrieved_memory_region_init(
 	struct ffa_memory_region *response, size_t response_max_size,
 	ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
 	ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
 	ffa_vm_id_t receiver, ffa_memory_access_permissions_t permissions,
+	uint32_t page_count, uint32_t total_constituent_count,
 	const struct ffa_memory_region_constituent constituents[],
-	uint32_t constituent_count)
+	uint32_t fragment_constituent_count, uint32_t *total_length,
+	uint32_t *fragment_length)
 {
-	/* TODO: Check against response_max_size first. */
-	return ffa_memory_region_init_internal(
-		response, sender, attributes, flags, handle, 0, receiver,
-		permissions, constituents, constituent_count);
+	struct ffa_composite_memory_region *composite_memory_region;
+	uint32_t i;
+	uint32_t constituents_offset;
+
+	ffa_memory_region_init_header(response, sender, attributes, flags,
+				      handle, 0, receiver, permissions);
+	/*
+	 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
+	 * ffa_memory_access)` must both be multiples of 16 (as verified by the
+	 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
+	 * calculate here is aligned to a 64-bit boundary and so 64-bit values
+	 * can be copied without alignment faults.
+	 */
+	response->receivers[0].composite_memory_region_offset =
+		sizeof(struct ffa_memory_region) +
+		response->receiver_count * sizeof(struct ffa_memory_access);
+
+	composite_memory_region = ffa_memory_region_get_composite(response, 0);
+	composite_memory_region->page_count = page_count;
+	composite_memory_region->constituent_count = total_constituent_count;
+	composite_memory_region->reserved_0 = 0;
+
+	constituents_offset =
+		response->receivers[0].composite_memory_region_offset +
+		sizeof(struct ffa_composite_memory_region);
+	if (constituents_offset +
+		    fragment_constituent_count *
+			    sizeof(struct ffa_memory_region_constituent) >
+	    response_max_size) {
+		return false;
+	}
+
+	for (i = 0; i < fragment_constituent_count; ++i) {
+		composite_memory_region->constituents[i] = constituents[i];
+	}
+
+	if (total_length != NULL) {
+		*total_length =
+			constituents_offset +
+			composite_memory_region->constituent_count *
+				sizeof(struct ffa_memory_region_constituent);
+	}
+	if (fragment_length != NULL) {
+		*fragment_length =
+			constituents_offset +
+			fragment_constituent_count *
+				sizeof(struct ffa_memory_region_constituent);
+	}
+
+	return true;
+}
+
+uint32_t ffa_memory_fragment_init(
+	struct ffa_memory_region_constituent *fragment,
+	size_t fragment_max_size,
+	const struct ffa_memory_region_constituent constituents[],
+	uint32_t constituent_count, uint32_t *fragment_length)
+{
+	uint32_t fragment_max_constituents =
+		fragment_max_size /
+		sizeof(struct ffa_memory_region_constituent);
+	uint32_t count_to_copy = constituent_count;
+	uint32_t i;
+
+	if (count_to_copy > fragment_max_constituents) {
+		count_to_copy = fragment_max_constituents;
+	}
+
+	for (i = 0; i < count_to_copy; ++i) {
+		fragment[i] = constituents[i];
+	}
+
+	if (fragment_length != NULL) {
+		*fragment_length = count_to_copy *
+				   sizeof(struct ffa_memory_region_constituent);
+	}
+
+	return constituent_count - count_to_copy;
 }