SPCI: Donate memory architected message.

The donate mechnism accepts a single memory region as per the spec but
restrics the number of constituents of this regions to 1.
Multiple constituents will be introduced in a later commit.

Change-Id: I7af9b80068060aedb953d3e204fa3e03c9ccc438
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 43497bf..af21d00 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -52,9 +52,14 @@
 			     spci_vcpu_index_t target_vcpu_idx, uint32_t intid,
 			     struct vcpu *current, struct vcpu **next);
 
-int32_t api_spci_msg_send(uint32_t attributes, struct vcpu *current,
-			  struct vcpu **next);
+spci_return_t api_spci_msg_send(uint32_t attributes, struct vcpu *current,
+				struct vcpu **next);
 int32_t api_spci_msg_recv(uint32_t attributes, struct vcpu *current,
 			  struct vcpu **next);
 int32_t api_spci_yield(struct vcpu *current, struct vcpu **next);
 int32_t api_spci_version(void);
+spci_return_t api_spci_share_memory(struct vm_locked to_locked,
+				    struct vm_locked from_locked,
+				    struct spci_memory_region *memory_region,
+				    uint32_t memory_to_attributes,
+				    enum spci_memory_share share);
diff --git a/inc/hf/spci_internal.h b/inc/hf/spci_internal.h
index 441b424..78477a3 100644
--- a/inc/hf/spci_internal.h
+++ b/inc/hf/spci_internal.h
@@ -16,7 +16,31 @@
 
 #pragma once
 
+#include "hf/addr.h"
+#include "hf/vm.h"
+
+#include "vmapi/hf/spci.h"
+
 #define SPCI_VERSION_MAJOR 0x0
 #define SPCI_VERSION_MINOR 0x9
 
 #define SPCI_VERSION_MAJOR_OFFSET 16
+
+struct spci_mem_transitions {
+	int orig_from_mode;
+	int orig_to_mode;
+	int from_mode;
+	int to_mode;
+};
+
+spci_return_t spci_msg_handle_architected_message(
+	struct vm_locked to_locked, struct vm_locked from_locked,
+	const struct spci_architected_message_header
+		*architected_message_replica,
+	struct spci_message *from_msg_replica, struct spci_message *to_msg);
+
+bool spci_msg_check_transition(struct vm *to, struct vm *from,
+			       enum spci_memory_share share,
+			       int *orig_from_mode, ipaddr_t begin,
+			       ipaddr_t end, uint32_t memory_to_attributes,
+			       int *from_mode, int *to_mode);
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index c86da21..8930c11 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -24,7 +24,8 @@
 #include "hf/list.h"
 #include "hf/mm.h"
 #include "hf/mpool.h"
-#include "hf/spci.h"
+
+#include "vmapi/hf/spci.h"
 
 enum mailbox_state {
 	/** There is no message in the mailbox. */
@@ -97,10 +98,17 @@
 	struct vm *vm;
 };
 
+/** Container for two vm_locked structures */
+struct two_vm_locked {
+	struct vm_locked vm1;
+	struct vm_locked vm2;
+};
+
 bool vm_init(spci_vcpu_count_t vcpu_count, struct mpool *ppool,
 	     struct vm **new_vm);
 spci_vm_count_t vm_get_count(void);
 struct vm *vm_find(spci_vm_id_t id);
 struct vm_locked vm_lock(struct vm *vm);
+struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2);
 void vm_unlock(struct vm_locked *locked);
 struct vcpu *vm_get_vcpu(struct vm *vm, spci_vcpu_index_t vcpu_index);
diff --git a/inc/vmapi/hf/spci.h b/inc/vmapi/hf/spci.h
index e55ea97..cad5e35 100644
--- a/inc/vmapi/hf/spci.h
+++ b/inc/vmapi/hf/spci.h
@@ -14,10 +14,10 @@
  * limitations under the License.
  */
 
-#include "hf/types.h"
-
 #pragma once
 
+#include "hf/types.h"
+
 /* clang-format off */
 
 #define SPCI_LOW_32_ID  0x84000060
@@ -46,10 +46,17 @@
 /* TODO: return code currently undefined in SPCI alpha2. */
 #define SPCI_RETRY              INT32_C(-7)
 
+/* Architected memory sharing message IDs. */
+enum spci_memory_share {
+	SPCI_MEMORY_DONATE = 0x2,
+};
+
 /* SPCI function specific constants. */
 #define SPCI_MSG_RECV_BLOCK_MASK  0x1
 #define SPCI_MSG_SEND_NOTIFY_MASK 0x1
 
+#define SPCI_MESSAGE_ARCHITECTED 0x0
+#define SPCI_MESSAGE_IMPDEF      0x1
 #define SPCI_MESSAGE_IMPDEF_MASK 0x1
 
 #define SPCI_MSG_SEND_NOTIFY 0x1
@@ -62,6 +69,7 @@
 
 /** The ID of a VM. These are assigned sequentially. */
 typedef uint16_t spci_vm_id_t;
+typedef uint32_t spci_memory_handle_t;
 
 /**
  * A count of VMs. This has the same range as the VM IDs but we give it a
@@ -78,6 +86,10 @@
  */
 typedef spci_vcpu_index_t spci_vcpu_count_t;
 
+/** Return type of SPCI functions. */
+/* TODO: Reuse spci_return_t type on all SPCI functions declarations. */
+typedef int32_t spci_return_t;
+
 /** SPCI common message header. */
 struct spci_message {
 	/*
@@ -115,15 +127,41 @@
 	uint8_t payload[];
 };
 
+struct spci_architected_message_header {
+	uint16_t type;
+
+	/*
+	 * TODO: Padding is present to ensure that the field
+	 * payload is aligned on a 64B boundary. SPCI
+	 * spec must be updated to reflect this.
+	 */
+	uint16_t reserved[3];
+	uint8_t payload[];
+};
+
+struct spci_memory_region_constituent {
+	uint64_t address;
+	uint32_t page_count;
+
+	uint32_t reserved;
+};
+
+struct spci_memory_region {
+	spci_memory_handle_t handle;
+	uint32_t count;
+
+	struct spci_memory_region_constituent constituents[];
+};
+
+/* TODO: Move all the functions below this line to a support library. */
 /**
- * Set the SPCI common message header fields.
+ * Fill all the fields, except for the flags, in the SPCI message common header.
  */
-static inline void spci_message_init(struct spci_message *message,
-				     uint32_t message_length,
-				     spci_vm_id_t target_vm_id,
-				     spci_vm_id_t source_vm_id)
+static inline void spci_common_header_init(struct spci_message *message,
+					   uint32_t message_length,
+					   spci_vm_id_t target_vm_id,
+					   spci_vm_id_t source_vm_id)
 {
-	message->flags = SPCI_MESSAGE_IMPDEF_MASK;
 	message->length = message_length;
 	message->target_vm_id = target_vm_id;
 	message->source_vm_id = source_vm_id;
@@ -135,3 +173,118 @@
 	message->reserved_1 = 0;
 	message->reserved_2 = 0;
 }
+
+/**
+ * Set the SPCI implementation defined message header fields.
+ */
+static inline void spci_message_init(struct spci_message *message,
+				     uint32_t message_length,
+				     spci_vm_id_t target_vm_id,
+				     spci_vm_id_t source_vm_id)
+{
+	spci_common_header_init(message, message_length, target_vm_id,
+				source_vm_id);
+
+	message->flags = SPCI_MESSAGE_IMPDEF;
+}
+
+/**
+ * Obtain a pointer to the architected header in the spci_message.
+ *
+ * Note: the argument "message" has const qualifier. This qualifier
+ * is meant to forbid changes in information enclosed in the
+ * struct spci_message. The spci_architected_message_header, for which
+ * a pointer is returned in this function, is not part of spci_message.
+ * Its information is meant to be changed and hence the returned pointer
+ * does not have const type qualifier.
+ */
+static inline struct spci_architected_message_header *
+spci_get_architected_message_header(const struct spci_message *message)
+{
+	return (struct spci_architected_message_header *)message->payload;
+}
+
+/**
+ * Helper method to fill in the information about the architected message.
+ */
+static inline void spci_architected_message_init(struct spci_message *message,
+						 uint32_t message_length,
+						 spci_vm_id_t target_vm_id,
+						 spci_vm_id_t source_vm_id,
+						 enum spci_memory_share type)
+{
+	struct spci_architected_message_header *architected_header;
+
+	spci_common_header_init(message, message_length, target_vm_id,
+				source_vm_id);
+	message->flags = SPCI_MESSAGE_ARCHITECTED;
+
+	/* Fill the architected header. */
+	architected_header = spci_get_architected_message_header(message);
+	architected_header->type = type;
+	architected_header->reserved[0] = 0;
+	architected_header->reserved[1] = 0;
+	architected_header->reserved[2] = 0;
+}
+
+/** Obtain a pointer to the start of the memory region in the donate message. */
+static inline struct spci_memory_region *spci_get_donated_memory_region(
+	struct spci_message *message)
+{
+	struct spci_architected_message_header *architected_header =
+		spci_get_architected_message_header(message);
+	return (struct spci_memory_region *)architected_header->payload;
+}
+
+/**
+ * Add a memory region to the current message.
+ * A memory region is composed of one or more constituents.
+ */
+static inline void spci_memory_region_add(
+	struct spci_message *message, spci_memory_handle_t handle,
+	const struct spci_memory_region_constituent constituents[],
+	uint32_t num_constituents)
+{
+	struct spci_memory_region *memory_region =
+		spci_get_donated_memory_region(message);
+
+	uint32_t constituents_length =
+		num_constituents *
+		sizeof(struct spci_memory_region_constituent);
+	uint32_t index;
+
+	memory_region->handle = handle;
+	memory_region->count = num_constituents;
+
+	for (index = 0; index < num_constituents; index++) {
+		memory_region->constituents[index] = constituents[index];
+		memory_region->constituents[index].reserved = 0;
+	}
+
+	/*
+	 * TODO: Add assert ensuring that the specified message
+	 * length is not greater than SPCI_MSG_PAYLOAD_MAX.
+	 */
+	message->length +=
+		sizeof(struct spci_memory_region) + constituents_length;
+}
+
+/** Construct the SPCI donate memory region message. */
+static inline void spci_memory_donate(
+	struct spci_message *message, spci_vm_id_t target_vm_id,
+	spci_vm_id_t source_vm_id,
+	struct spci_memory_region_constituent *region_constituents,
+	uint32_t num_elements, uint32_t handle)
+{
+	int32_t message_length;
+
+	message_length = sizeof(struct spci_architected_message_header);
+
+	/* Fill in the details on the common message header. */
+	spci_architected_message_init(message, message_length, target_vm_id,
+				      source_vm_id, SPCI_MEMORY_DONATE);
+
+	/* Create single memory region. */
+	spci_memory_region_add(message, handle, region_constituents,
+			       num_elements);
+}
diff --git a/src/BUILD.gn b/src/BUILD.gn
index 045d1f3..f5108ec 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -53,6 +53,7 @@
     "mm.c",
     "mpool.c",
     "panic.c",
+    "spci_architected_message.c",
     "vm.c",
   ]
 
diff --git a/src/api.c b/src/api.c
index bb954a6..c32446f 100644
--- a/src/api.c
+++ b/src/api.c
@@ -832,11 +832,14 @@
  * If the recipient's receive buffer is busy, it can optionally register the
  * caller to be notified when the recipient's receive buffer becomes available.
  */
-int32_t api_spci_msg_send(uint32_t attributes, struct vcpu *current,
-			  struct vcpu **next)
+spci_return_t api_spci_msg_send(uint32_t attributes, struct vcpu *current,
+				struct vcpu **next)
 {
 	struct vm *from = current->vm;
 	struct vm *to;
+
+	struct two_vm_locked vm_from_to_lock;
+
 	struct hf_vcpu_run_return primary_ret = {
 		.code = HF_VCPU_RUN_MESSAGE,
 	};
@@ -891,7 +894,15 @@
 		return SPCI_INVALID_PARAMETERS;
 	}
 
-	sl_lock(&to->lock);
+	/*
+	 * Hf needs to hold the lock on <to> before the mailbox state is
+	 * checked. The lock on <to> must be held until the information is
+	 * copied to <to> Rx buffer. Since in
+	 * spci_msg_handle_architected_message we may call api_spci_share_memory
+	 * which must hold the <from> lock, we must hold the <from> lock at this
+	 * point to prevent a deadlock scenario.
+	 */
+	vm_from_to_lock = vm_lock_both(to, from);
 
 	if (to->mailbox.state != MAILBOX_STATE_EMPTY ||
 	    to->mailbox.recv == NULL) {
@@ -915,11 +926,68 @@
 		goto out;
 	}
 
-	/* Copy data. */
 	to_msg = to->mailbox.recv;
-	*to_msg = from_msg_replica;
-	memcpy_s(to_msg->payload, SPCI_MSG_PAYLOAD_MAX,
-		 from->mailbox.send->payload, size);
+
+	/* Handle architected messages. */
+	if ((from_msg_replica.flags & SPCI_MESSAGE_IMPDEF_MASK) !=
+	    SPCI_MESSAGE_IMPDEF) {
+		/*
+		 * Buffer holding the internal copy of the shared memory
+		 * regions.
+		 */
+		/* TODO: Buffer is temporarily in the stack. */
+		uint8_t message_buffer
+			[sizeof(struct spci_architected_message_header) +
+			 sizeof(struct spci_memory_region_constituent) +
+			 sizeof(struct spci_memory_region)];
+
+		struct spci_architected_message_header *architected_header =
+			spci_get_architected_message_header(from->mailbox.send);
+
+		const struct spci_architected_message_header
+			*architected_message_replica;
+
+		if (from_msg_replica.length > sizeof(message_buffer)) {
+			ret = SPCI_INVALID_PARAMETERS;
+			goto out;
+		}
+
+		if (from_msg_replica.length <
+		    sizeof(struct spci_architected_message_header)) {
+			ret = SPCI_INVALID_PARAMETERS;
+			goto out;
+		}
+
+		/* Copy the architected message into an internal buffer. */
+		memcpy_s(message_buffer, sizeof(message_buffer),
+			 architected_header, from_msg_replica.length);
+
+		architected_message_replica =
+			(struct spci_architected_message_header *)
+				message_buffer;
+
+		/*
+		 * Note that message_buffer is passed as the third parameter to
+		 * spci_msg_handle_architected_message. The execution flow
+		 * commencing at spci_msg_handle_architected_message will make
+		 * several accesses to fields in message_buffer. The memory area
+		 * message_buffer must be exclusively owned by Hf so that TOCTOU
+		 * issues do not arise.
+		 */
+		ret = spci_msg_handle_architected_message(
+			vm_from_to_lock.vm1, vm_from_to_lock.vm2,
+			architected_message_replica, &from_msg_replica, to_msg);
+
+		if (ret != SPCI_SUCCESS) {
+			goto out;
+		}
+	} else {
+		/* Copy data. */
+		memcpy_s(to_msg->payload, SPCI_MSG_PAYLOAD_MAX,
+			 from->mailbox.send->payload, size);
+		*to_msg = from_msg_replica;
+	}
+
 	primary_ret.message.vm_id = to->id;
 	ret = SPCI_SUCCESS;
 
@@ -940,7 +1008,8 @@
 	}
 
 out:
-	sl_unlock(&to->lock);
+	vm_unlock(&vm_from_to_lock.vm1);
+	vm_unlock(&vm_from_to_lock.vm2);
 
 	return ret;
 }
@@ -1313,6 +1382,108 @@
 	return ret;
 }
 
+/** TODO: Move function to spci_architectted_message.c. */
+/**
+ * Shares memory from the calling VM with another. The memory can be shared in
+ * different modes.
+ *
+ * This function requires the calling context to hold the <to> and <from> locks.
+ *
+ * Returns:
+ *  In case of error one of the following values is returned:
+ *   1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were
+ *     erroneous;
+ *   2) SPCI_NO_MEMORY - Hf did not have sufficient memory to complete
+ *     the request.
+ *  Success is indicated by SPCI_SUCCESS.
+ */
+spci_return_t api_spci_share_memory(struct vm_locked to_locked,
+				    struct vm_locked from_locked,
+				    struct spci_memory_region *memory_region,
+				    uint32_t memory_to_attributes,
+				    enum spci_memory_share share)
+{
+	struct vm *to = to_locked.vm;
+	struct vm *from = from_locked.vm;
+	int orig_from_mode;
+	int from_mode;
+	int to_mode;
+	struct mpool local_page_pool;
+	int64_t ret;
+	paddr_t pa_begin;
+	paddr_t pa_end;
+	ipaddr_t begin;
+	ipaddr_t end;
+
+	size_t size;
+
+	/* Disallow reflexive shares as this suggests an error in the VM. */
+	if (to == from) {
+		return SPCI_INVALID_PARAMETERS;
+	}
+
+	/*
+	 * Create a local pool so any freed memory can't be used by another
+	 * thread. This is to ensure the original mapping can be restored if any
+	 * stage of the process fails.
+	 */
+	mpool_init_with_fallback(&local_page_pool, &api_page_pool);
+
+	/* Obtain the single contiguous set of pages from the memory_region. */
+	/* TODO: Add support for multiple constituent regions. */
+	size = memory_region->constituents[0].page_count * PAGE_SIZE;
+	begin = ipa_init(memory_region->constituents[0].address);
+	end = ipa_add(begin, size);
+
+	/*
+	 * Check if the state transition is lawful for both VMs involved
+	 * in the memory exchange, ensure that all constituents of a memory
+	 * region being shared are at the same state.
+	 */
+	if (!spci_msg_check_transition(to, from, share, &orig_from_mode, begin,
+				       end, memory_to_attributes, &from_mode,
+				       &to_mode)) {
+		return SPCI_INVALID_PARAMETERS;
+	}
+
+	pa_begin = pa_from_ipa(begin);
+	pa_end = pa_from_ipa(end);
+
+	/*
+	 * First update the mapping for the sender so there is not overlap with
+	 * the recipient.
+	 */
+	if (!mm_vm_identity_map(&from->ptable, pa_begin, pa_end, from_mode,
+				NULL, &local_page_pool)) {
+		ret = SPCI_NO_MEMORY;
+		goto out;
+	}
+
+	/* Complete the transfer by mapping the memory into the recipient. */
+	if (!mm_vm_identity_map(&to->ptable, pa_begin, pa_end, to_mode, NULL,
+				&local_page_pool)) {
+		/* TODO: partial defrag of failed range. */
+		/* Recover any memory consumed in failed mapping. */
+		mm_vm_defrag(&from->ptable, &local_page_pool);
+
+		ret = SPCI_NO_MEMORY;
+
+		CHECK(mm_vm_identity_map(&from->ptable, pa_begin, pa_end,
+					 orig_from_mode, NULL,
+					 &local_page_pool));
+
+		goto out;
+	}
+
+	ret = SPCI_SUCCESS;
+
+out:
+
+	mpool_fini(&local_page_pool);
+
+	return ret;
+}
+
 /**
  * Shares memory from the calling VM with another. The memory can be shared in
  * different modes.
diff --git a/src/spci_architected_message.c b/src/spci_architected_message.c
new file mode 100644
index 0000000..0905531
--- /dev/null
+++ b/src/spci_architected_message.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/api.h"
+#include "hf/dlog.h"
+#include "hf/spci_internal.h"
+#include "hf/std.h"
+
+/**
+ * Check if the message length and the number of memory region constituents
+ * match, if the check is correct call the memory sharing routine.
+ */
+static spci_return_t spci_validate_call_share_memory(
+	struct vm_locked to_locked, struct vm_locked from_locked,
+	struct spci_memory_region *memory_region, uint32_t memory_share_size,
+	uint32_t memory_to_attributes, enum spci_memory_share share)
+{
+	uint32_t max_count = memory_region->count;
+
+	/*
+	 * Ensure the number of constituents are within the memory
+	 * bounds.
+	 */
+	if (memory_share_size !=
+	    sizeof(struct spci_memory_region) +
+		    (sizeof(struct spci_memory_region_constituent) *
+		     max_count)) {
+		return SPCI_INVALID_PARAMETERS;
+	}
+
+	return api_spci_share_memory(to_locked, from_locked, memory_region,
+				     memory_to_attributes, share);
+}
+
+/**
+ * Performs initial architected message information parsing. Calls the
+ * corresponding api functions implementing the functionality requested
+ * in the architected message.
+ */
+spci_return_t spci_msg_handle_architected_message(
+	struct vm_locked to_locked, struct vm_locked from_locked,
+	const struct spci_architected_message_header
+		*architected_message_replica,
+	struct spci_message *from_msg_replica, struct spci_message *to_msg)
+{
+	int64_t ret;
+	struct spci_memory_region *memory_region;
+	uint32_t to_mode;
+	uint32_t message_type;
+	uint32_t memory_share_size;
+
+	message_type = architected_message_replica->type;
+
+	switch (message_type) {
+	case SPCI_MEMORY_DONATE:
+		memory_region = (struct spci_memory_region *)
+					architected_message_replica->payload;
+
+		memory_share_size =
+			from_msg_replica->length -
+			sizeof(struct spci_architected_message_header);
+
+		/* TODO: Add memory attributes. */
+		to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
+
+		ret = spci_validate_call_share_memory(
+			to_locked, from_locked, memory_region,
+			memory_share_size, to_mode, message_type);
+		break;
+
+	default:
+		dlog("Invalid memory sharing message.\n");
+		return SPCI_INVALID_PARAMETERS;
+	}
+
+	/* Copy data to the destination Rx. */
+	/*
+	 * TODO: Translate the <from> IPA addresses to <to> IPA addresses.
+	 * Currently we assume identity mapping of the stage 2 translation.
+	 * Removing this assumption relies on a mechanism to handle scenarios
+	 * where the memory region fits in the source Tx buffer but cannot fit
+	 * in the destination Rx buffer. This mechanism will be defined at the
+	 * spec level.
+	 */
+	if (ret == SPCI_SUCCESS) {
+		memcpy_s(to_msg->payload, SPCI_MSG_PAYLOAD_MAX,
+			 architected_message_replica, from_msg_replica->length);
+	}
+	*to_msg = *from_msg_replica;
+
+	return ret;
+}
+
+/**
+ * Obtain the next mode to apply to the two VMs.
+ *
+ * Returns:
+ *  The error code -1 indicates that a state transition was not found.
+ *  Success is indicated by 0.
+ */
+static bool spci_msg_get_next_state(
+	const struct spci_mem_transitions *transitions,
+	uint32_t transition_count, uint32_t memory_to_attributes,
+	int orig_from_mode, int orig_to_mode, int *from_mode, int *to_mode)
+{
+	const uint32_t state_mask =
+		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
+	const uint32_t orig_from_state = orig_from_mode & state_mask;
+
+	for (uint32_t index = 0; index < transition_count; index++) {
+		uint32_t table_orig_from_mode =
+			transitions[index].orig_from_mode;
+		uint32_t table_orig_to_mode = transitions[index].orig_to_mode;
+
+		if (((orig_from_state) == table_orig_from_mode) &&
+		    ((orig_to_mode & state_mask) == table_orig_to_mode)) {
+			*to_mode = transitions[index].to_mode |
+				   memory_to_attributes;
+			/*
+			 * TODO: Change access permission assignment to cater
+			 * for the lend case.
+			 */
+			*from_mode = transitions[index].from_mode;
+
+			return true;
+		}
+	}
+	return false;
+}
+
+/**
+ * Verify that all pages have the same mode, that the starting mode
+ * constitutes a valid state and obtain the next mode to apply
+ * to the two VMs.
+ *
+ * Returns:
+ *  The error code false indicates that:
+ *   1) a state transition was not found;
+ *   2) the pages being shared do not have the same mode within the <to>
+ *     or <form> VMs;
+ *   3) The beginning and end IPAs are not page aligned;
+ *   4) The requested share type was not handled.
+ *  Success is indicated by true.
+ *
+ */
+bool spci_msg_check_transition(struct vm *to, struct vm *from,
+			       enum spci_memory_share share,
+			       int *orig_from_mode, ipaddr_t begin,
+			       ipaddr_t end, uint32_t memory_to_attributes,
+			       int *from_mode, int *to_mode)
+{
+	int orig_to_mode;
+	const struct spci_mem_transitions *mem_transition_table;
+	uint32_t transition_table_size;
+
+	/*
+	 * TODO: Transition table does not currently consider the multiple
+	 * shared case.
+	 */
+	static const struct spci_mem_transitions donate_transitions[] = {
+		{
+			/* 1) {O-EA, !O-NA} -> {!O-NA, O-EA} */
+			.orig_from_mode = 0,
+			.orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+			.from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+			.to_mode = 0,
+		},
+		{
+			/* 2) {O-NA, !O-EA} -> {!O-NA, O-EA} */
+			.orig_from_mode = MM_MODE_INVALID,
+			.orig_to_mode = MM_MODE_UNOWNED,
+			.from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+			.to_mode = 0,
+		},
+		{
+			/* 3) {O-SA, !O-SA} -> {!O-NA, O-EA} */
+			.orig_from_mode = MM_MODE_SHARED,
+			.orig_to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
+			.from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+			.to_mode = 0,
+		},
+		{
+			/*
+			 * Duplicate of 1) in order to cater for an alternative
+			 * representation of !O-NA:
+			 * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
+			 * are both alternate representations of !O-NA.
+			 */
+			/* 4) {O-EA, !O-NA} -> {!O-NA, O-EA} */
+			.orig_from_mode = 0,
+			.orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+					MM_MODE_SHARED,
+			.from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+				     MM_MODE_SHARED,
+			.to_mode = 0,
+		},
+	};
+	static const uint32_t size_donate_transitions =
+		ARRAY_SIZE(donate_transitions);
+
+	/* Fail if addresses are not page-aligned. */
+	if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
+	    !is_aligned(ipa_addr(end), PAGE_SIZE)) {
+		return false;
+	}
+
+	/*
+	 * Ensure that the memory range is mapped with the same
+	 * mode.
+	 */
+	if (!mm_vm_get_mode(&from->ptable, begin, end, orig_from_mode) ||
+	    !mm_vm_get_mode(&to->ptable, begin, end, &orig_to_mode)) {
+		return false;
+	}
+
+	switch (share) {
+	case SPCI_MEMORY_DONATE:
+		mem_transition_table = donate_transitions;
+		transition_table_size = size_donate_transitions;
+		break;
+
+	default:
+		return false;
+	}
+
+	return spci_msg_get_next_state(mem_transition_table,
+				       transition_table_size,
+				       memory_to_attributes, *orig_from_mode,
+				       orig_to_mode, from_mode, to_mode);
+}
diff --git a/src/vm.c b/src/vm.c
index f1b79e6..fd477cd 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -102,6 +102,21 @@
 }
 
 /**
+ * Locks two VMs ensuring that the locking order is according to the locks'
+ * addresses.
+ */
+struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
+{
+	struct two_vm_locked dual_lock;
+
+	sl_lock_both(&vm1->lock, &vm2->lock);
+	dual_lock.vm1.vm = vm1;
+	dual_lock.vm2.vm = vm2;
+
+	return dual_lock;
+}
+
+/**
  * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
  * the fact that the VM is no longer locked.
  */
diff --git a/test/hftest/inc/hftest_impl.h b/test/hftest/inc/hftest_impl.h
index 64eeac4..f3eb407 100644
--- a/test/hftest/inc/hftest_impl.h
+++ b/test/hftest/inc/hftest_impl.h
@@ -22,6 +22,8 @@
 #include "hf/spci.h"
 #include "hf/std.h"
 
+#include "vmapi/hf/spci.h"
+
 #define HFTEST_MAX_TESTS 50
 
 /*
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index 70dbc08..ed402b8 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -163,6 +163,45 @@
 }
 
 /**
+ * SPCI Memory given away can be given back.
+ * Employing SPCI donate architected messages.
+ */
+TEST(memory_sharing, spci_give_and_get_back)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "memory_return_spci", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+
+	/* Can only donate single constituent memory region. */
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			   1, 0);
+
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+
+	/* Let the memory be returned. */
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	/* Ensure that the secondary VM accessed the region. */
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ASSERT_EQ(ptr[i], 'c');
+	}
+
+	/* Observe the service faulting when accessing the memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+}
+
+/**
  * Memory given away can be given back.
  */
 TEST(memory_sharing, give_and_get_back)
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index f17932c..8b65600 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -55,6 +55,41 @@
 	}
 }
 
+TEST_SERVICE(memory_return_spci)
+{
+	/* Loop, giving memory back to the sender. */
+	for (;;) {
+		spci_msg_recv(SPCI_MSG_RECV_BLOCK);
+		uint8_t *ptr;
+
+		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		struct spci_memory_region *memory_region =
+			spci_get_donated_memory_region(recv_buf);
+
+		ptr = (uint8_t *)memory_region->constituents[0].address;
+		/* Relevant information read, mailbox can be cleared. */
+		hf_mailbox_clear();
+
+		/* Check that one has access to the shared region. */
+		for (int i = 0; i < PAGE_SIZE; ++i) {
+			ptr[i]++;
+		}
+
+		/* Give the memory back and notify the sender. */
+		spci_memory_donate(
+			send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+			memory_region->constituents, memory_region->count, 0);
+		spci_msg_send(0);
+
+		/*
+		 * Try and access the memory which will cause a fault unless the
+		 * memory has been shared back again.
+		 */
+		ptr[0] = 123;
+	}
+}
+
 TEST_SERVICE(memory_return)
 {
 	/* Loop, giving memory back to the sender. */