Read device memory regions from the FDT.

If provided, rather than mapping a large chunk of address space as
device memory, allow the specific ranges to be provided and only map
those. Fall back to mapping a large chunk and carving out normal memory
if no device memory regions are provided so QEMU tests continue to work.
These ranges continue to only be mapped to the primary VM for the time
being.

Change-Id: Ia3acf3bb206bf0399ebc3be33d4696cd0450539d
diff --git a/inc/hf/boot_params.h b/inc/hf/boot_params.h
index a55ee73..35b6624 100644
--- a/inc/hf/boot_params.h
+++ b/inc/hf/boot_params.h
@@ -25,6 +25,7 @@
 #include "hf/mpool.h"
 
 #define MAX_MEM_RANGES 20
+#define MAX_DEVICE_MEM_RANGES 10
 
 struct mem_range {
 	paddr_t begin;
@@ -36,6 +37,8 @@
 	size_t cpu_count;
 	struct mem_range mem_ranges[MAX_MEM_RANGES];
 	size_t mem_ranges_count;
+	struct mem_range device_mem_ranges[MAX_DEVICE_MEM_RANGES];
+	size_t device_mem_ranges_count;
 	paddr_t initrd_begin;
 	paddr_t initrd_end;
 	uintreg_t kernel_arg;
diff --git a/src/boot_flow/common.c b/src/boot_flow/common.c
index 597c421..dace9af 100644
--- a/src/boot_flow/common.c
+++ b/src/boot_flow/common.c
@@ -26,6 +26,7 @@
 			  const struct fdt_node *fdt_root)
 {
 	struct string memory = STRING_INIT("memory");
+	struct string device_memory = STRING_INIT("device-memory");
 
 	p->mem_ranges_count = 0;
 	p->kernel_arg = plat_boot_flow_get_kernel_arg();
@@ -34,7 +35,10 @@
 					       &p->initrd_end) &&
 	       fdt_find_cpus(fdt_root, p->cpu_ids, &p->cpu_count) &&
 	       fdt_find_memory_ranges(fdt_root, &memory, p->mem_ranges,
-				      &p->mem_ranges_count, MAX_MEM_RANGES);
+				      &p->mem_ranges_count, MAX_MEM_RANGES) &&
+	       fdt_find_memory_ranges(
+		       fdt_root, &device_memory, p->device_mem_ranges,
+		       &p->device_mem_ranges_count, MAX_DEVICE_MEM_RANGES);
 }
 
 /**
diff --git a/src/load.c b/src/load.c
index eee65e3..2751d7f 100644
--- a/src/load.c
+++ b/src/load.c
@@ -155,19 +155,25 @@
 		goto out;
 	}
 
-	/*
-	 * Map 1TB of address space as device memory to, most likely, make all
-	 * devices available to the primary VM.
-	 *
-	 * TODO: We should do a whitelist rather than a blacklist.
-	 */
-	if (!vm_identity_map(vm_locked, pa_init(0),
-			     pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
-			     MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
-		dlog_error(
-			"Unable to initialise address space for primary vm\n");
-		ret = false;
-		goto out;
+	if (params->device_mem_ranges_count == 0) {
+		/*
+		 * Map 1TB of address space as device memory to, most likely,
+		 * make all devices available to the primary VM.
+		 *
+		 * TODO: remove this once all targets provide valid ranges.
+		 */
+		dlog_warning("Device memory not provided, defaulting to 1 TB.");
+
+		if (!vm_identity_map(
+			    vm_locked, pa_init(0),
+			    pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
+			    MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
+			dlog_error(
+				"Unable to initialise address space for "
+				"primary vm\n");
+			ret = false;
+			goto out;
+		}
 	}
 
 	/* Map normal memory as such to permit caching, execution, etc. */
@@ -183,6 +189,19 @@
 		}
 	}
 
+	/* Map device memory as such to prevent execution, speculation etc. */
+	for (i = 0; i < params->device_mem_ranges_count; ++i) {
+		if (!vm_identity_map(
+			    vm_locked, params->device_mem_ranges[i].begin,
+			    params->device_mem_ranges[i].end,
+			    MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
+			dlog("Unable to initialise device memory for primary "
+			     "vm\n");
+			ret = false;
+			goto out;
+		}
+	}
+
 	if (!vm_unmap_hypervisor(vm_locked, ppool)) {
 		dlog_error("Unable to unmap hypervisor from primary vm\n");
 		ret = false;