SPCI: Memory lend with shared access.

Change-Id: I1c6d583b504eea62ba1eee375f53c41d6f846afe
diff --git a/inc/hf/spci_internal.h b/inc/hf/spci_internal.h
index 78477a3..3877283 100644
--- a/inc/hf/spci_internal.h
+++ b/inc/hf/spci_internal.h
@@ -33,6 +33,36 @@
 	int to_mode;
 };
 
+/* TODO: Add device attributes: GRE, cacheability, shareability. */
+static inline uint32_t spci_memory_attrs_to_mode(uint32_t memory_attributes)
+{
+	uint32_t mode = 0;
+	uint32_t attr_value;
+
+	attr_value = spci_get_lend_access_attr(memory_attributes);
+	switch (attr_value) {
+	case SPCI_LEND_RO_NX:
+		mode |= MM_MODE_R;
+		break;
+
+	case SPCI_LEND_RO_X:
+		mode |= MM_MODE_R | MM_MODE_X;
+		break;
+
+	case SPCI_LEND_RW_NX:
+		mode |= MM_MODE_R | MM_MODE_W;
+		break;
+
+	case SPCI_LEND_RW_X:
+		mode |= MM_MODE_R | MM_MODE_W | MM_MODE_X;
+		break;
+
+	default:
+		break;
+	}
+	return mode;
+}
+
 spci_return_t spci_msg_handle_architected_message(
 	struct vm_locked to_locked, struct vm_locked from_locked,
 	const struct spci_architected_message_header
diff --git a/inc/vmapi/hf/spci.h b/inc/vmapi/hf/spci.h
index a6d9c68..98d4155 100644
--- a/inc/vmapi/hf/spci.h
+++ b/inc/vmapi/hf/spci.h
@@ -48,6 +48,7 @@
 
 /* Architected memory sharing message IDs. */
 enum spci_memory_share {
+	SPCI_MEMORY_LEND = 0x0,
 	SPCI_MEMORY_RELINQUISH = 0x1,
 	SPCI_MEMORY_DONATE = 0x2,
 };
@@ -66,6 +67,90 @@
 /* The maximum length possible for a single message. */
 #define SPCI_MSG_PAYLOAD_MAX (HF_MAILBOX_SIZE - sizeof(struct spci_message))
 
+#define spci_get_lend_descriptor(message)\
+	((struct spci_memory_lend *)(((uint8_t *) message)\
+	+ sizeof(struct spci_message)\
+	+ sizeof(struct spci_architected_message_header)))
+
+enum spci_lend_access {
+	SPCI_LEND_RO_NX,
+	SPCI_LEND_RO_X,
+	SPCI_LEND_RW_NX,
+	SPCI_LEND_RW_X,
+};
+
+enum spci_lend_type {
+	SPCI_LEND_NORMAL_MEM,
+	SPCI_LEND_DEV_NGNRNE,
+	SPCI_LEND_DEV_NGNRE,
+	SPCI_LEND_DEV_NGRE,
+	SPCI_LEND_DEV_GRE,
+};
+
+enum spci_lend_cacheability {
+	SPCI_LEND_CACHE_NON_CACHEABLE,
+	SPCI_LEND_CACHE_WRITE_THROUGH,
+	SPCI_LEND_CACHE_WRITE_BACK,
+};
+
+enum spci_lend_shareability {
+	SPCI_LEND_SHARE_NON_SHAREABLE,
+	SPCI_LEND_RESERVED,
+	SPCI_LEND_OUTER_SHAREABLE,
+	SPCI_LEND_INNER_SHAREABLE,
+};
+
+#define SPCI_LEND_ACCESS_OFFSET (0x7U)
+#define SPCI_LEND_ACCESS_MASK ((0x3U) << SPCI_LEND_ACCESS_OFFSET)
+
+#define SPCI_LEND_TYPE_OFFSET (0x4U)
+#define SPCI_LEND_TYPE_MASK ((0x7U) << SPCI_LEND_TYPE_OFFSET)
+
+#define SPCI_LEND_CACHEABILITY_OFFSET (0x2U)
+#define SPCI_LEND_CACHEABILITY_MASK ((0x3U) <<\
+	SPCI_LEND_CACHEABILITY_OFFSET)
+
+#define SPCI_LEND_SHAREABILITY_OFFSET (0x0U)
+#define SPCI_LEND_SHAREABILITY_MASK ((0x3U) <<\
+	SPCI_LEND_SHAREABILITY_OFFSET)
+
+#define LEND_ATTR_FUNCTION_SET(name, offset, mask) \
+static inline void spci_set_lend_##name##_attr(uint16_t *lend_attr,\
+	const enum spci_lend_##name perm)\
+{\
+	*lend_attr = (*lend_attr & ~(mask)) | ((perm  << offset) & mask);\
+}
+
+#define LEND_ATTR_FUNCTION_GET(name, offset, mask) \
+static inline enum spci_lend_##name spci_get_lend_##name##_attr(\
+	uint16_t lend_attr)\
+{\
+	return (enum spci_lend_##name)((lend_attr & mask) >> offset);\
+}
+
+LEND_ATTR_FUNCTION_SET(access, SPCI_LEND_ACCESS_OFFSET, SPCI_LEND_ACCESS_MASK)
+LEND_ATTR_FUNCTION_GET(access, SPCI_LEND_ACCESS_OFFSET, SPCI_LEND_ACCESS_MASK)
+
+LEND_ATTR_FUNCTION_SET(type, SPCI_LEND_TYPE_OFFSET, SPCI_LEND_TYPE_MASK)
+LEND_ATTR_FUNCTION_GET(type, SPCI_LEND_TYPE_OFFSET, SPCI_LEND_TYPE_MASK)
+
+LEND_ATTR_FUNCTION_SET(cacheability, SPCI_LEND_CACHEABILITY_OFFSET,
+	SPCI_LEND_CACHEABILITY_MASK)
+
+LEND_ATTR_FUNCTION_GET(cacheability, SPCI_LEND_CACHEABILITY_OFFSET,
+	SPCI_LEND_CACHEABILITY_MASK)
+
+LEND_ATTR_FUNCTION_SET(shareability, SPCI_LEND_SHAREABILITY_OFFSET,
+	SPCI_LEND_SHAREABILITY_MASK)
+
+LEND_ATTR_FUNCTION_GET(shareability, SPCI_LEND_SHAREABILITY_OFFSET,
+	SPCI_LEND_SHAREABILITY_MASK)
+
+enum spci_lend_flags {
+	SPCI_LEND_KEEP_MAPPED = 0x0,
+	SPCI_LEND_UNMAP = 0x1
+};
+
 /* clang-format on */
 
 /** The ID of a VM. These are assigned sequentially starting with an offset. */
@@ -154,6 +239,15 @@
 	struct spci_memory_region_constituent constituents[];
 };
 
+struct spci_memory_lend {
+	uint16_t flags;
+	uint16_t borrower_attributes;
+
+	uint32_t reserved;
+
+	uint8_t payload[];
+};
+
 /* TODO: Move all the functions below this line to a support library. */
 /**
  * Fill all the fields, except for the flags, in the SPCI message common header.
@@ -238,17 +332,16 @@
 }
 
 /**
- * Add a memory region to the current message.
- * A memory region is composed of one or more constituents.
+ * Helper function that copies the memory constituents and the handle
+ * information onto the address pointed to by memory_region.
+ * The function returns the length in bytes occupied by the data copied to
+ * memory_region (constituents and memory region header size).
  */
-static inline void spci_memory_region_add(
-	struct spci_message *message, spci_memory_handle_t handle,
+static inline uint32_t spci_memory_region_add(
+	struct spci_memory_region *memory_region, spci_memory_handle_t handle,
 	const struct spci_memory_region_constituent constituents[],
 	uint32_t num_constituents)
 {
-	struct spci_memory_region *memory_region =
-		spci_get_donated_memory_region(message);
-
 	uint32_t constituents_length =
 		num_constituents *
 		sizeof(struct spci_memory_region_constituent);
@@ -266,8 +359,8 @@
 	 * TODO: Add assert ensuring that the specified message
 	 * length is not greater than SPCI_MSG_PAYLOAD_MAX.
 	 */
-	message->length +=
-		sizeof(struct spci_memory_region) + constituents_length;
+
+	return sizeof(struct spci_memory_region) + constituents_length;
 }
 
 /** Construct the SPCI donate memory region message. */
@@ -278,6 +371,8 @@
 	uint32_t num_elements, uint32_t handle)
 {
 	int32_t message_length;
+	struct spci_memory_region *memory_region =
+		spci_get_donated_memory_region(message);
 
 	message_length = sizeof(struct spci_architected_message_header);
 
@@ -286,8 +381,8 @@
 				      source_vm_id, SPCI_MEMORY_DONATE);
 
 	/* Create single memory region. */
-	spci_memory_region_add(message, handle, region_constituents,
-			       num_elements);
+	message->length += spci_memory_region_add(
+		memory_region, handle, region_constituents, num_elements);
 }
 
 /**
@@ -301,6 +396,8 @@
 	uint64_t num_elements, uint32_t handle)
 {
 	int32_t message_length;
+	struct spci_memory_region *memory_region =
+		spci_get_donated_memory_region(message);
 
 	message_length = sizeof(struct spci_architected_message_header);
 
@@ -309,6 +406,52 @@
 				      source_vm_id, SPCI_MEMORY_RELINQUISH);
 
 	/* Create single memory region. */
-	spci_memory_region_add(message, handle, region_constituents,
-			       num_elements);
+	message->length += spci_memory_region_add(
+		memory_region, handle, region_constituents, num_elements);
+}
+
+/**
+ * Construct the SPCI memory region lend message.
+ */
+static inline void spci_memory_lend(
+	struct spci_message *message, spci_vm_id_t target_vm_id,
+	spci_vm_id_t source_vm_id,
+	struct spci_memory_region_constituent *region_constituents,
+	uint64_t num_elements, uint32_t handle, enum spci_lend_access access,
+	enum spci_lend_type type, enum spci_lend_cacheability cacheability,
+	enum spci_lend_shareability shareability)
+{
+	int32_t message_length;
+	struct spci_memory_region *memory_region;
+
+	const struct spci_memory_lend lend_init = {0};
+
+	struct spci_memory_lend *lend_descriptor =
+		spci_get_lend_descriptor(message);
+	memory_region = (struct spci_memory_region *)lend_descriptor->payload;
+
+	/* Initilise all struct elements to zero. */
+	*lend_descriptor = lend_init;
+
+	message_length = sizeof(struct spci_architected_message_header) +
+			 sizeof(struct spci_memory_lend);
+
+	/* Fill in the details on the common message header. */
+	spci_architected_message_init(message, message_length, target_vm_id,
+				      source_vm_id, SPCI_MEMORY_LEND);
+
+	lend_descriptor->flags = SPCI_LEND_KEEP_MAPPED;
+
+	/* Set memory region's page attributes. */
+	spci_set_lend_access_attr(&lend_descriptor->borrower_attributes,
+				  access);
+	spci_set_lend_type_attr(&lend_descriptor->borrower_attributes, type);
+	spci_set_lend_cacheability_attr(&lend_descriptor->borrower_attributes,
+					cacheability);
+	spci_set_lend_shareability_attr(&lend_descriptor->borrower_attributes,
+					shareability);
+
+	/* Create single memory region. */
+	message->length += spci_memory_region_add(
+		memory_region, handle, region_constituents, num_elements);
 }
diff --git a/src/spci_architected_message.c b/src/spci_architected_message.c
index 0b47fb3..5628a76 100644
--- a/src/spci_architected_message.c
+++ b/src/spci_architected_message.c
@@ -98,6 +98,32 @@
 
 		break;
 
+	case SPCI_MEMORY_LEND: {
+		/* TODO: Add support for lend exclusive. */
+		struct spci_memory_lend *lend_descriptor;
+		uint32_t borrower_attributes;
+
+		lend_descriptor = (struct spci_memory_lend *)
+					  architected_message_replica->payload;
+
+		borrower_attributes = lend_descriptor->borrower_attributes;
+
+		memory_region =
+			(struct spci_memory_region *)lend_descriptor->payload;
+		memory_share_size =
+			from_msg_replica->length -
+			sizeof(struct spci_architected_message_header) -
+			sizeof(struct spci_memory_lend);
+
+		to_mode = spci_memory_attrs_to_mode(borrower_attributes);
+
+		ret = spci_validate_call_share_memory(
+			to_locked, from_locked, memory_region,
+			memory_share_size, to_mode, message_type);
+
+		break;
+	}
+
 	default:
 		dlog("Invalid memory sharing message.\n");
 		return SPCI_INVALID_PARAMETERS;
@@ -146,11 +172,9 @@
 		    ((orig_to_mode & state_mask) == table_orig_to_mode)) {
 			*to_mode = transitions[index].to_mode |
 				   memory_to_attributes;
-			/*
-			 * TODO: Change access permission assignment to cater
-			 * for the lend case.
-			 */
-			*from_mode = transitions[index].from_mode;
+
+			*from_mode = transitions[index].from_mode |
+				     (~state_mask & orig_from_mode);
 
 			return true;
 		}
@@ -226,6 +250,9 @@
 		},
 	};
 
+	static const uint32_t size_donate_transitions =
+		ARRAY_SIZE(donate_transitions);
+
 	static const struct spci_mem_transitions relinquish_transitions[] = {
 		{
 			/* 1) {!O-EA, O-NA} -> {!O-NA, O-EA} */
@@ -246,11 +273,40 @@
 	};
 
 	static const uint32_t size_relinquish_transitions =
-		sizeof(relinquish_transitions) /
-		sizeof(struct spci_mem_transitions);
+		ARRAY_SIZE(relinquish_transitions);
 
-	static const uint32_t size_donate_transitions =
-		ARRAY_SIZE(donate_transitions);
+	/*
+	 * This data structure holds the allowed state transitions for the "lend
+	 * with shared access" state machine. In this state machine the owner
+	 * keeps the lent pages mapped on its stage2 table and keeps access as
+	 * well.
+	 */
+	static const struct spci_mem_transitions shared_lend_transitions[] = {
+		{
+			/* 1) {O-EA, !O-NA} -> {O-SA, !O-SA} */
+			.orig_from_mode = 0,
+			.orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+					MM_MODE_SHARED,
+			.from_mode = MM_MODE_SHARED,
+			.to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
+		},
+		{
+			/*
+			 * Duplicate of 1) in order to cater for an alternative
+			 * representation of !O-NA:
+			 * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
+			 * are both alternate representations of !O-NA.
+			 */
+			/* 2) {O-EA, !O-NA} -> {O-SA, !O-SA} */
+			.orig_from_mode = 0,
+			.orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+			.from_mode = MM_MODE_SHARED,
+			.to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
+		},
+	};
+
+	static const uint32_t size_shared_lend_transitions =
+		ARRAY_SIZE(shared_lend_transitions);
 
 	/* Fail if addresses are not page-aligned. */
 	if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
@@ -278,6 +334,11 @@
 		transition_table_size = size_relinquish_transitions;
 		break;
 
+	case SPCI_MEMORY_LEND:
+		mem_transition_table = shared_lend_transitions;
+		transition_table_size = size_shared_lend_transitions;
+		break;
+
 	default:
 		return false;
 	}
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index 4e80d02..30f4fc3 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -226,6 +226,44 @@
 }
 
 /**
+ * SPCI Memory given away can be given back.
+ */
+TEST(memory_sharing, spci_lend_relinquish)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "memory_lend_relinquish_spci", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM,
+			 SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE);
+
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+
+	/* Let the memory be returned. */
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	/* Ensure that the secondary VM accessed the region. */
+	for (int i = 0; i < PAGE_SIZE; ++i) {
+		ASSERT_EQ(ptr[i], 'c');
+	}
+
+	/* Observe the service faulting when accessing the memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+}
+
+/**
  * Memory given away can be given back.
  */
 TEST(memory_sharing, give_and_get_back)
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index d749186..c640797 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -56,6 +56,44 @@
 	}
 }
 
+TEST_SERVICE(memory_lend_relinquish_spci)
+{
+	/* Loop, giving memory back to the sender. */
+	for (;;) {
+		spci_msg_recv(SPCI_MSG_RECV_BLOCK);
+		uint8_t *ptr;
+
+		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		struct spci_memory_region *memory_region =
+			(struct spci_memory_region *)(spci_get_lend_descriptor(
+							      recv_buf)
+							      ->payload);
+
+		ptr = (uint8_t *)memory_region->constituents[0].address;
+		/* Relevant information read, mailbox can be cleared. */
+		hf_mailbox_clear();
+
+		/* Check that one has access to the shared region. */
+		for (int i = 0; i < PAGE_SIZE; ++i) {
+			ptr[i]++;
+		}
+
+		hf_mailbox_clear();
+		/* Give the memory back and notify the sender. */
+		spci_memory_relinquish(
+			send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+			memory_region->constituents, memory_region->count, 0);
+		spci_msg_send(0);
+
+		/*
+		 * Try and access the memory which will cause a fault unless the
+		 * memory has been shared back again.
+		 */
+		ptr[0] = 123;
+	}
+}
+
 TEST_SERVICE(memory_return)
 {
 	/* Loop, giving memory back to the sender. */