Distinguish device and normal memory.

The large address space mapped to the primary is marked as device
memory. The normal memory is then specifically marked based on the
`memory` nodes in the manifest.

Sharing is restricted to normal memory as sharing device memory is
dangerous without full device reassignment.

Change-Id: I117d621342bae89a35cde33e3a6e59e56add9a5c
diff --git a/src/api.c b/src/api.c
index b803cf5..0db632a 100644
--- a/src/api.c
+++ b/src/api.c
@@ -561,8 +561,8 @@
  */
 static bool api_mode_valid_owned_and_exclusive(int mode)
 {
-	return (mode & (MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED)) ==
-	       0;
+	return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
+			MM_MODE_SHARED)) == 0;
 }
 
 /**
@@ -1578,6 +1578,11 @@
 		goto fail;
 	}
 
+	/* Ensure the address range is normal memory and not a device. */
+	if (orig_from_mode & MM_MODE_D) {
+		goto fail;
+	}
+
 	/*
 	 * Ensure the memory range is valid for the sender. If it isn't, the
 	 * sender has either shared it with another VM already or has no claim
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 7f41a10..65fcef4 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -59,7 +59,6 @@
 #define STAGE2_AF         (UINT64_C(1) << 10)
 #define STAGE2_SH(x)      ((x) << 8)
 #define STAGE2_S2AP(x)    ((x) << 6)
-#define STAGE2_MEMATTR(x) ((x) << 2)
 
 #define STAGE2_EXECUTE_ALL  UINT64_C(0)
 #define STAGE2_EXECUTE_EL0  UINT64_C(1)
@@ -79,11 +78,20 @@
 #define STAGE2_SW_EXCLUSIVE (UINT64_C(1) << 56)
 
 /* The following are stage-2 memory attributes for normal memory. */
-#define STAGE2_NONCACHEABLE UINT64_C(1)
-#define STAGE2_WRITETHROUGH UINT64_C(2)
-#define STAGE2_WRITEBACK    UINT64_C(3)
+#define STAGE2_DEVICE_MEMORY UINT64_C(0)
+#define STAGE2_NONCACHEABLE  UINT64_C(1)
+#define STAGE2_WRITETHROUGH  UINT64_C(2)
+#define STAGE2_WRITEBACK     UINT64_C(3)
 
-#define STAGE2_MEMATTR_NORMAL(outer, inner) ((((outer) << 2) | (inner)) << 2)
+/* The following are stage-2 memory attributes for device memory. */
+#define STAGE2_MEMATTR_DEVICE_nGnRnE UINT64_C(0)
+#define STAGE2_MEMATTR_DEVICE_nGnRE  UINT64_C(1)
+#define STAGE2_MEMATTR_DEVICE_nGRE   UINT64_C(2)
+#define STAGE2_MEMATTR_DEVICE_GRE    UINT64_C(3)
+
+/* The following construct and destruct stage-2 memory attributes. */
+#define STAGE2_MEMATTR(outer, inner) ((((outer) << 2) | (inner)) << 2)
+#define STAGE2_MEMATTR_TYPE_MASK UINT64_C(3 << 4)
 
 #define STAGE2_ACCESS_READ  UINT64_C(1)
 #define STAGE2_ACCESS_WRITE UINT64_C(2)
@@ -433,9 +441,8 @@
 	uint64_t access = 0;
 
 	/*
-	 * Non-shareable is the "neutral" share mode, i.e., the
-	 * shareability attribute of stage 1 will determine the actual
-	 * attribute.
+	 * Non-shareable is the "neutral" share mode, i.e., the shareability
+	 * attribute of stage 1 will determine the actual attribute.
 	 */
 	attrs |= STAGE2_AF | STAGE2_SH(NON_SHAREABLE);
 
@@ -458,11 +465,15 @@
 	}
 
 	/*
-	 * Define the memory attribute bits, using the "neutral" values
-	 * which give the stage-1 attributes full control of the
-	 * attributes.
+	 * Define the memory attribute bits, using the "neutral" values which
+	 * give the stage-1 attributes full control of the attributes.
 	 */
-	attrs |= STAGE2_MEMATTR_NORMAL(STAGE2_WRITEBACK, STAGE2_WRITEBACK);
+	if (mode & MM_MODE_D) {
+		attrs |= STAGE2_MEMATTR(STAGE2_DEVICE_MEMORY,
+					STAGE2_MEMATTR_DEVICE_GRE);
+	} else {
+		attrs |= STAGE2_MEMATTR(STAGE2_WRITEBACK, STAGE2_WRITEBACK);
+	}
 
 	/* Define the ownership bit. */
 	if (!(mode & MM_MODE_UNOWNED)) {
@@ -499,6 +510,10 @@
 		mode |= MM_MODE_X;
 	}
 
+	if ((attrs & STAGE2_MEMATTR_TYPE_MASK) == STAGE2_DEVICE_MEMORY) {
+		mode |= MM_MODE_D;
+	}
+
 	if (!(attrs & STAGE2_SW_OWNED)) {
 		mode |= MM_MODE_UNOWNED;
 	}
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index 4778beb..68a2383 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -154,9 +154,6 @@
 
 uint64_t arch_mm_mode_to_stage2_attrs(int mode)
 {
-	/* Stage-2 ignores the device mode. */
-	mode &= ~MM_MODE_D;
-
 	return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
 }
 
diff --git a/src/load.c b/src/load.c
index 01640f8..8df4fe2 100644
--- a/src/load.c
+++ b/src/load.c
@@ -96,12 +96,13 @@
  */
 static bool load_primary(struct mm_stage1_locked stage1_locked,
 			 const struct manifest *manifest,
-			 const struct memiter *cpio, uintreg_t kernel_arg,
-			 struct mpool *ppool)
+			 const struct memiter *cpio,
+			 const struct boot_params *params, struct mpool *ppool)
 {
 	paddr_t primary_begin = layout_primary_begin();
 	struct vm *vm;
 	struct vcpu_locked vcpu_locked;
+	size_t i;
 
 	/*
 	 * TODO: This bound is currently meaningless but will be addressed when
@@ -125,23 +126,38 @@
 		return false;
 	}
 
-	/* Map the 1TB of memory. */
-	/* TODO: We should do a whitelist rather than a blacklist. */
+	/*
+	 * Map 1TB of address space as device memory to, most likely, make all
+	 * devices available to the primary VM.
+	 *
+	 * TODO: We should do a whitelist rather than a blacklist.
+	 */
 	if (!mm_vm_identity_map(&vm->ptable, pa_init(0),
 				pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
-				MM_MODE_R | MM_MODE_W | MM_MODE_X, NULL,
+				MM_MODE_R | MM_MODE_W | MM_MODE_D, NULL,
 				ppool)) {
-		dlog("Unable to initialise memory for primary vm\n");
+		dlog("Unable to initialise address space for primary vm\n");
 		return false;
 	}
 
+	/* Map normal memory as such to permit caching, execution, etc. */
+	for (i = 0; i < params->mem_ranges_count; ++i) {
+		if (!mm_vm_identity_map(
+			    &vm->ptable, params->mem_ranges[i].begin,
+			    params->mem_ranges[i].end,
+			    MM_MODE_R | MM_MODE_W | MM_MODE_X, NULL, ppool)) {
+			dlog("Unable to initialise memory for primary vm\n");
+			return false;
+		}
+	}
+
 	if (!mm_vm_unmap_hypervisor(&vm->ptable, ppool)) {
 		dlog("Unable to unmap hypervisor from primary vm\n");
 		return false;
 	}
 
 	vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
-	vcpu_on(vcpu_locked, ipa_from_pa(primary_begin), kernel_arg);
+	vcpu_on(vcpu_locked, ipa_from_pa(primary_begin), params->kernel_arg);
 	vcpu_unlock(&vcpu_locked);
 
 	return true;
@@ -276,8 +292,7 @@
 	struct mem_range mem_ranges_available[MAX_MEM_RANGES];
 	size_t i;
 
-	if (!load_primary(stage1_locked, manifest, cpio, params->kernel_arg,
-			  ppool)) {
+	if (!load_primary(stage1_locked, manifest, cpio, params, ppool)) {
 		dlog("Unable to load primary VM.\n");
 		return false;
 	}
diff --git a/src/spci_architected_message.c b/src/spci_architected_message.c
index 5628a76..28d5fc7 100644
--- a/src/spci_architected_message.c
+++ b/src/spci_architected_message.c
@@ -314,15 +314,17 @@
 		return false;
 	}
 
-	/*
-	 * Ensure that the memory range is mapped with the same
-	 * mode.
-	 */
+	/* Ensure that the memory range is mapped with the same mode. */
 	if (!mm_vm_get_mode(&from->ptable, begin, end, orig_from_mode) ||
 	    !mm_vm_get_mode(&to->ptable, begin, end, &orig_to_mode)) {
 		return false;
 	}
 
+	/* Ensure the address range is normal memory and not a device. */
+	if (*orig_from_mode & MM_MODE_D) {
+		return false;
+	}
+
 	switch (share) {
 	case SPCI_MEMORY_DONATE:
 		mem_transition_table = donate_transitions;
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index b094a7a..5de2649 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -148,6 +148,14 @@
 }
 
 /**
+ * Device address space cannot be shared, only normal memory.
+ */
+TEST(memory_sharing, cannot_share_device_memory)
+{
+	check_cannot_share_memory((void *)PAGE_SIZE, PAGE_SIZE);
+}
+
+/**
  * After memory has been shared concurrently, it can't be shared again.
  */
 TEST(memory_sharing, cannot_share_concurrent_memory_twice)
@@ -262,6 +270,23 @@
 }
 
 /**
+ * Device address space cannot be shared, only normal memory.
+ */
+TEST(memory_sharing, spci_cannot_share_device_memory)
+{
+	struct mailbox_buffers mb = set_up_mailbox();
+	struct spci_memory_region_constituent constituents[] = {
+		{.address = PAGE_SIZE, .page_count = 1},
+	};
+
+	SERVICE_SELECT(SERVICE_VM0, "spci_memory_return", mb.send);
+	SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
+
+	spci_check_cannot_lend_memory(mb, constituents);
+	spci_check_cannot_donate_memory(mb, constituents, 1, -1);
+}
+
+/**
  * SPCI Memory given away can be given back.
  * Employing SPCI donate architected messages.
  */
diff --git a/test/vmapi/primary_with_secondaries/no_services.c b/test/vmapi/primary_with_secondaries/no_services.c
index edaa90a..40da63d 100644
--- a/test/vmapi/primary_with_secondaries/no_services.c
+++ b/test/vmapi/primary_with_secondaries/no_services.c
@@ -110,6 +110,14 @@
 }
 
 /**
+ * The configured send/receive addresses can't be device memory.
+ */
+TEST(hf_vm_configure, fails_with_device_memory)
+{
+	EXPECT_EQ(hf_vm_configure(PAGE_SIZE, PAGE_SIZE * 2), -1);
+}
+
+/**
  * The configured send/receive addresses can't be unaligned.
  */
 TEST(hf_vm_configure, fails_with_unaligned_pointer)