SPCI Donate: Add initial tests to exercise functionality

Add tests to verify core functionality or `spci_memory_donate`

Change-Id: I6f5f74ee96d849cad45b3c06fbe0869227dd77e2
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index ed402b8..4e80d02 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -49,6 +49,30 @@
 }
 
 /**
+ * Tries sharing memory in available modes with different VMs and asserts that
+ * it will fail.
+ */
+static void spci_check_cannot_share_memory(
+	struct mailbox_buffers mb,
+	struct spci_memory_region_constituent constituents[], int num_elements)
+{
+	uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM0, SERVICE_VM1};
+	void (*modes[])(struct spci_message *, spci_vm_id_t, spci_vm_id_t,
+			struct spci_memory_region_constituent *, uint32_t,
+			uint32_t) = {spci_memory_donate};
+	int i;
+	int j;
+
+	for (j = 0; j < ARRAY_SIZE(modes); ++j) {
+		for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+			modes[j](mb.send, vms[i], HF_PRIMARY_VM_ID,
+				 constituents, num_elements, 0);
+			EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+		}
+	}
+}
+
+/**
  * After memory has been shared concurrently, it can't be shared again.
  */
 TEST(memory_sharing, cannot_share_concurrent_memory_twice)
@@ -172,7 +196,7 @@
 	struct mailbox_buffers mb = set_up_mailbox();
 	uint8_t *ptr = page;
 
-	SERVICE_SELECT(SERVICE_VM0, "memory_return_spci", mb.send);
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_return", mb.send);
 
 	/* Initialise the memory before giving it. */
 	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
@@ -407,3 +431,269 @@
 	run_res = hf_vcpu_run(SERVICE_VM0, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
 }
+
+/**
+ * SPCI: Verify past the upper bound of the donated region cannot be accessed.
+ */
+TEST(memory_sharing, spci_donate_check_upper_bounds)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_donate_check_upper_bound", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', 1 * PAGE_SIZE);
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			   1, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Observe the service faulting when accessing the memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+}
+
+/**
+ * SPCI: Verify past the lower bound of the donated region cannot be accessed.
+ */
+TEST(memory_sharing, spci_donate_check_lower_bounds)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_donate_check_lower_bound", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', 1 * PAGE_SIZE);
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			   1, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Observe the service faulting when accessing the memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+}
+
+/**
+ * SPCI: After memory has been returned, it is free to be shared with another
+ * VM.
+ */
+TEST(memory_sharing, spci_donate_elsewhere_after_return)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_return", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', 1 * PAGE_SIZE);
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			   1, 0);
+
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+
+	/* Let the memory be returned. */
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	/* Share the memory with another VM. */
+	spci_memory_donate(mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents,
+			   1, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Observe the original service faulting when accessing the memory. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+}
+
+/**
+ * SPCI: Check if memory can be donated between secondary VMs.
+ * Ensure that the memory can no longer be accessed by the first VM.
+ */
+TEST(memory_sharing, spci_donate_vms)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_donate_secondary_and_fault", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_receive", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', 1 * PAGE_SIZE);
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	/* Set up VM1 to wait for message. */
+	run_res = hf_vcpu_run(SERVICE_VM1, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_MESSAGE);
+
+	/* Donate memory. */
+	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			   1, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Let the memory be sent from VM0 to VM1. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	/* Receive memory in VM1. */
+	run_res = hf_vcpu_run(SERVICE_VM1, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+	/* Try to access memory in VM0 and fail. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED);
+
+	/* Ensure that memory in VM1 remains the same. */
+	run_res = hf_vcpu_run(SERVICE_VM1, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+}
+
+/**
+ * SPCI: Check that memory is unable to be donated to multiple parties.
+ */
+TEST(memory_sharing, spci_donate_twice)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_donate_twice", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_receive", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', 1 * PAGE_SIZE);
+
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	/* Donate memory to VM0. */
+	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			   1, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Let the memory be received. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+
+	/* Fail to share memory again with either VM0 or VM1. */
+	spci_check_cannot_share_memory(mb, constituents, 1);
+
+	/* Let the memory be sent from VM0 to PRIMARY (returned). */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	/* Check we have access again. */
+	ptr[0] = 'f';
+
+	/* Try and fail to donate memory from VM0 to VM1. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+}
+
+/**
+ * SPCI: Check cannot donate to self.
+ */
+TEST(memory_sharing, spci_donate_to_self)
+{
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	spci_memory_donate(mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID,
+			   constituents, 1, 0);
+
+	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * SPCI: Check cannot donate from alternative VM.
+ */
+TEST(memory_sharing, spci_donate_invalid_source)
+{
+	struct hf_vcpu_run_return run_res;
+	struct mailbox_buffers mb = set_up_mailbox();
+	uint8_t *ptr = page;
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_donate_invalid_source", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_receive", mb.send);
+
+	/* Initialise the memory before giving it. */
+	memset_s(ptr, sizeof(page), 'b', PAGE_SIZE);
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = (uint64_t)page, .page_count = 1},
+	};
+
+	/* Try invalid configurations. */
+	spci_memory_donate(mb.send, HF_PRIMARY_VM_ID, SERVICE_VM0, constituents,
+			   1, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+
+	spci_memory_donate(mb.send, SERVICE_VM0, SERVICE_VM0, constituents, 1,
+			   0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+
+	spci_memory_donate(mb.send, SERVICE_VM0, SERVICE_VM1, constituents, 1,
+			   0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+
+	/* Successfully donate to VM0. */
+	spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents,
+			   1, 0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Receive and return memory from VM0. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+
+	/* Use VM0 to fail to donate memory from the primary to VM1. */
+	run_res = hf_vcpu_run(SERVICE_VM0, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD);
+}
+
+/**
+ * SPCI: Check that unaligned addresses can not be donated.
+ */
+TEST(memory_sharing, spci_give_and_get_back_unaligned)
+{
+	struct mailbox_buffers mb = set_up_mailbox();
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_return", mb.send);
+
+	for (int i = 1; i < PAGE_SIZE; i++) {
+		struct spci_memory_region_constituent constituents[] = {
+			{.address = (uint64_t)page + i, .page_count = 1},
+		};
+		spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID,
+				   constituents, 1, 0);
+		EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	}
+}
diff --git a/test/vmapi/primary_with_secondaries/services/BUILD.gn b/test/vmapi/primary_with_secondaries/services/BUILD.gn
index 073494d..7dc1964 100644
--- a/test/vmapi/primary_with_secondaries/services/BUILD.gn
+++ b/test/vmapi/primary_with_secondaries/services/BUILD.gn
@@ -69,7 +69,10 @@
 # Services related to memory sharing.
 source_set("memory") {
   testonly = true
-  public_configs = [ "//test/hftest:hftest_config" ]
+  public_configs = [
+    "..:config",
+    "//test/hftest:hftest_config",
+  ]
 
   sources = [
     "memory.c",
@@ -203,6 +206,7 @@
   testonly = true
 
   deps = [
+    ":memory",
     ":relay",
     "//test/hftest:hftest_secondary_vm",
   ]
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
index 8b65600..d749186 100644
--- a/test/vmapi/primary_with_secondaries/services/memory.c
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -20,6 +20,7 @@
 #include "vmapi/hf/call.h"
 
 #include "hftest.h"
+#include "primary_with_secondary.h"
 
 alignas(PAGE_SIZE) static uint8_t page[PAGE_SIZE];
 
@@ -27,7 +28,7 @@
 {
 	/* Loop, writing message to the shared memory. */
 	for (;;) {
-		spci_msg_recv(SPCI_MSG_RECV_BLOCK);
+		EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), 0);
 		uint8_t *ptr;
 		size_t i;
 
@@ -55,46 +56,11 @@
 	}
 }
 
-TEST_SERVICE(memory_return_spci)
-{
-	/* Loop, giving memory back to the sender. */
-	for (;;) {
-		spci_msg_recv(SPCI_MSG_RECV_BLOCK);
-		uint8_t *ptr;
-
-		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
-		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
-		struct spci_memory_region *memory_region =
-			spci_get_donated_memory_region(recv_buf);
-
-		ptr = (uint8_t *)memory_region->constituents[0].address;
-		/* Relevant information read, mailbox can be cleared. */
-		hf_mailbox_clear();
-
-		/* Check that one has access to the shared region. */
-		for (int i = 0; i < PAGE_SIZE; ++i) {
-			ptr[i]++;
-		}
-
-		/* Give the memory back and notify the sender. */
-		spci_memory_donate(
-			send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
-			memory_region->constituents, memory_region->count, 0);
-		spci_msg_send(0);
-
-		/*
-		 * Try and access the memory which will cause a fault unless the
-		 * memory has been shared back again.
-		 */
-		ptr[0] = 123;
-	}
-}
-
 TEST_SERVICE(memory_return)
 {
 	/* Loop, giving memory back to the sender. */
 	for (;;) {
-		spci_msg_recv(SPCI_MSG_RECV_BLOCK);
+		EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), 0);
 		uint8_t *ptr;
 
 		/* Check the memory was cleared. */
@@ -170,3 +136,180 @@
 	/* Try using the memory that isn't valid unless it's been returned.  */
 	page[633] = 180;
 }
+
+TEST_SERVICE(spci_memory_return)
+{
+	/* Loop, giving memory back to the sender. */
+	for (;;) {
+		EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), 0);
+		uint8_t *ptr;
+
+		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+		struct spci_memory_region *memory_region =
+			spci_get_donated_memory_region(recv_buf);
+		hf_mailbox_clear();
+
+		ptr = (uint8_t *)memory_region->constituents[0].address;
+
+		/* Check that one has access to the shared region. */
+		for (int i = 0; i < PAGE_SIZE; ++i) {
+			ptr[i]++;
+		}
+
+		/* Give the memory back and notify the sender. */
+		spci_memory_donate(
+			send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+			memory_region->constituents, memory_region->count, 0);
+		spci_msg_send(0);
+
+		/*
+		 * Try and access the memory which will cause a fault unless the
+		 * memory has been shared back again.
+		 */
+		ptr[0] = 123;
+	}
+}
+
+TEST_SERVICE(spci_donate_check_upper_bound)
+{
+	EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), 0);
+	uint8_t *ptr;
+
+	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	struct spci_memory_region *memory_region =
+		spci_get_donated_memory_region(recv_buf);
+	hf_mailbox_clear();
+
+	ptr = (uint8_t *)memory_region->constituents[0].address;
+
+	/* Check that one cannot access out of bounds after donated region. */
+	ptr[PAGE_SIZE]++;
+}
+
+TEST_SERVICE(spci_donate_check_lower_bound)
+{
+	EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), 0);
+	uint8_t *ptr;
+
+	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	struct spci_memory_region *memory_region =
+		spci_get_donated_memory_region(recv_buf);
+	hf_mailbox_clear();
+
+	ptr = (uint8_t *)memory_region->constituents[0].address;
+
+	/* Check that one cannot access out of bounds before donated region. */
+	ptr[-1]++;
+}
+
+/**
+ * SPCI: Attempt to donate memory and then modify.
+ */
+TEST_SERVICE(spci_donate_secondary_and_fault)
+{
+	EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), 0);
+	uint8_t *ptr;
+
+	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+	struct spci_memory_region *memory_region =
+		spci_get_donated_memory_region(recv_buf);
+	hf_mailbox_clear();
+
+	ptr = (uint8_t *)memory_region->constituents[0].address;
+
+	/* Donate memory to next VM. */
+	spci_memory_donate(send_buf, SERVICE_VM1, recv_buf->target_vm_id,
+			   memory_region->constituents, memory_region->count,
+			   0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Ensure that we are unable to modify memory any more. */
+	ptr[0] = 'c';
+	EXPECT_EQ(ptr[0], 'c');
+	spci_yield();
+}
+
+/**
+ * SPCI: Attempt to donate memory twice from VM.
+ */
+TEST_SERVICE(spci_donate_twice)
+{
+	EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), 0);
+
+	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+	struct spci_memory_region *memory_region =
+		spci_get_donated_memory_region(recv_buf);
+	hf_mailbox_clear();
+
+	/* Yield to allow attempt to re donate from primary. */
+	spci_yield();
+
+	/* Give the memory back and notify the sender. */
+	spci_memory_donate(send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+			   memory_region->constituents, memory_region->count,
+			   0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Attempt to donate the memory to another VM. */
+	spci_memory_donate(send_buf, SERVICE_VM1, recv_buf->target_vm_id,
+			   memory_region->constituents, memory_region->count,
+			   0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+
+	spci_yield();
+}
+
+/**
+ * SPCI: Continually receive memory, check if we have access
+ * and ensure it is not changed by a third party.
+ */
+TEST_SERVICE(spci_memory_receive)
+{
+	for (;;) {
+		EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), 0);
+		uint8_t *ptr;
+
+		struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+		struct spci_memory_region *memory_region =
+			spci_get_donated_memory_region(recv_buf);
+		hf_mailbox_clear();
+
+		ptr = (uint8_t *)memory_region->constituents[0].address;
+		ptr[0] = 'd';
+		spci_yield();
+
+		/* Ensure memory has not changed. */
+		EXPECT_EQ(ptr[0], 'd');
+		spci_yield();
+	}
+}
+
+/**
+ * SPCI: Receive memory and attempt to donate from primary VM.
+ */
+TEST_SERVICE(spci_donate_invalid_source)
+{
+	EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), 0);
+
+	struct spci_message *recv_buf = SERVICE_RECV_BUFFER();
+	struct spci_message *send_buf = SERVICE_SEND_BUFFER();
+	struct spci_memory_region *memory_region =
+		spci_get_donated_memory_region(recv_buf);
+	hf_mailbox_clear();
+
+	/* Give the memory back and notify the sender. */
+	spci_memory_donate(send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id,
+			   memory_region->constituents, memory_region->count,
+			   0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS);
+
+	/* Fail to donate the memory from the primary to VM1. */
+	spci_memory_donate(send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID,
+			   memory_region->constituents, memory_region->count,
+			   0);
+	EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS);
+	spci_yield();
+}