Only allow identity memory mapping

The code has the assumption that virtual addresses are mapped to the
same physical address but the mm function would allow this assumption to
be broken. Only offering identity mapping function avoids this problem
by enforcing assumption.

Non-identity mapping can still be added later if there proves to be a
need for it.

Change-Id: Ie3db41e1582c72d880275acdb48cabefe05b3ba0
diff --git a/inc/mm.h b/inc/mm.h
index a6f637e..31f5c2f 100644
--- a/inc/mm.h
+++ b/inc/mm.h
@@ -41,9 +41,9 @@
 
 bool mm_ptable_init(struct mm_ptable *t, uint32_t id, int mode);
 void mm_ptable_dump(struct mm_ptable *t, int mode);
-bool mm_ptable_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
-		   paddr_t paddr, int mode);
-bool mm_ptable_map_page(struct mm_ptable *t, vaddr_t va, paddr_t pa, int mode);
+bool mm_ptable_identity_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
+			    int mode);
+bool mm_ptable_identity_map_page(struct mm_ptable *t, vaddr_t va, int mode);
 bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode);
 bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode);
 void mm_ptable_defrag(struct mm_ptable *t, int mode);
@@ -51,7 +51,7 @@
 
 bool mm_init(void);
 bool mm_cpu_init(void);
-bool mm_map(vaddr_t begin, vaddr_t end, paddr_t paddr, int mode);
+bool mm_identity_map(vaddr_t begin, vaddr_t end, int mode);
 bool mm_unmap(vaddr_t begin, vaddr_t end, int mode);
 void mm_defrag(void);
 
diff --git a/src/api.c b/src/api.c
index 93b14c2..3c6edb2 100644
--- a/src/api.c
+++ b/src/api.c
@@ -141,8 +141,8 @@
 	}
 
 	/* Map the send page as read-only in the hypervisor address space. */
-	if (!mm_map((vaddr_t)send, (vaddr_t)send + PAGE_SIZE, send,
-		    MM_MODE_R)) {
+	if (!mm_identity_map((vaddr_t)send, (vaddr_t)send + PAGE_SIZE,
+			     MM_MODE_R)) {
 		ret = -1;
 		goto exit;
 	}
@@ -151,8 +151,8 @@
 	 * Map the receive page as writable in the hypervisor address space. On
 	 * failure, unmap the send page before returning.
 	 */
-	if (!mm_map((vaddr_t)recv, (vaddr_t)recv + PAGE_SIZE, recv,
-		    MM_MODE_W)) {
+	if (!mm_identity_map((vaddr_t)recv, (vaddr_t)recv + PAGE_SIZE,
+			     MM_MODE_W)) {
 		mm_unmap((vaddr_t)send, (vaddr_t)send + PAGE_SIZE, 0);
 		ret = -1;
 		goto exit;
diff --git a/src/fdt_handler.c b/src/fdt_handler.c
index fa2f9b6..47637ca 100644
--- a/src/fdt_handler.c
+++ b/src/fdt_handler.c
@@ -167,8 +167,8 @@
 	bool ret = false;
 
 	/* Map the fdt header in. */
-	if (!mm_map((vaddr_t)fdt, (vaddr_t)fdt + fdt_header_size(),
-		    (paddr_t)fdt, MM_MODE_R)) {
+	if (!mm_identity_map((vaddr_t)fdt, (vaddr_t)fdt + fdt_header_size(),
+			     MM_MODE_R)) {
 		dlog("Unable to map FDT header.\n");
 		goto err_unmap_fdt_header;
 	}
@@ -179,8 +179,8 @@
 	}
 
 	/* Map the rest of the fdt in. */
-	if (!mm_map((vaddr_t)fdt, (vaddr_t)fdt + fdt_total_size(fdt),
-		    (paddr_t)fdt, MM_MODE_R)) {
+	if (!mm_identity_map((vaddr_t)fdt, (vaddr_t)fdt + fdt_total_size(fdt),
+			     MM_MODE_R)) {
 		dlog("Unable to map full FDT.\n");
 		goto err_unmap_fdt_header;
 	}
@@ -216,8 +216,8 @@
 	bool ret = false;
 
 	/* Map the fdt header in. */
-	if (!mm_map((vaddr_t)fdt, (vaddr_t)fdt + fdt_header_size(),
-		    (paddr_t)fdt, MM_MODE_R)) {
+	if (!mm_identity_map((vaddr_t)fdt, (vaddr_t)fdt + fdt_header_size(),
+			     MM_MODE_R)) {
 		dlog("Unable to map FDT header.\n");
 		return false;
 	}
@@ -228,9 +228,9 @@
 	}
 
 	/* Map the fdt (+ a page) in r/w mode in preparation for updating it. */
-	if (!mm_map((vaddr_t)fdt,
-		    (vaddr_t)fdt + fdt_total_size(fdt) + PAGE_SIZE,
-		    (paddr_t)fdt, MM_MODE_R | MM_MODE_W)) {
+	if (!mm_identity_map((vaddr_t)fdt,
+			     (vaddr_t)fdt + fdt_total_size(fdt) + PAGE_SIZE,
+			     MM_MODE_R | MM_MODE_W)) {
 		dlog("Unable to map FDT in r/w mode.\n");
 		goto err_unmap_fdt_header;
 	}
diff --git a/src/load.c b/src/load.c
index 7ab012e..b4df5ad 100644
--- a/src/load.c
+++ b/src/load.c
@@ -15,7 +15,7 @@
  */
 static bool copy_to_unmaped(paddr_t to, const void *from, size_t size)
 {
-	if (!mm_map((vaddr_t)to, (vaddr_t)to + size, to, MM_MODE_W)) {
+	if (!mm_identity_map((vaddr_t)to, (vaddr_t)to + size, MM_MODE_W)) {
 		return false;
 	}
 
@@ -118,10 +118,10 @@
 
 		/* Map the 1TB of memory. */
 		/* TODO: We should do a whitelist rather than a blacklist. */
-		if (!mm_ptable_map(&primary_vm.ptable, 0,
-				   1024ull * 1024 * 1024 * 1024, 0,
-				   MM_MODE_R | MM_MODE_W | MM_MODE_X |
-					   MM_MODE_NOINVALIDATE)) {
+		if (!mm_ptable_identity_map(&primary_vm.ptable, 0,
+					    1024ull * 1024 * 1024 * 1024,
+					    MM_MODE_R | MM_MODE_W | MM_MODE_X |
+						    MM_MODE_NOINVALIDATE)) {
 			dlog("Unable to initialise memory for primary vm\n");
 			return false;
 		}
@@ -200,16 +200,16 @@
 
 		/* TODO: Remove this. */
 		/* Grant VM access to uart. */
-		mm_ptable_map_page(&secondary_vm[count].ptable, PL011_BASE,
-				   PL011_BASE,
-				   MM_MODE_R | MM_MODE_W | MM_MODE_D |
-					   MM_MODE_NOINVALIDATE);
+		mm_ptable_identity_map_page(&secondary_vm[count].ptable,
+					    PL011_BASE,
+					    MM_MODE_R | MM_MODE_W | MM_MODE_D |
+						    MM_MODE_NOINVALIDATE);
 
 		/* Grant the VM access to the memory. */
-		if (!mm_ptable_map(&secondary_vm[count].ptable, *mem_end,
-				   *mem_end + mem, *mem_end,
-				   MM_MODE_R | MM_MODE_W | MM_MODE_X |
-					   MM_MODE_NOINVALIDATE)) {
+		if (!mm_ptable_identity_map(&secondary_vm[count].ptable,
+					    *mem_end, *mem_end + mem,
+					    MM_MODE_R | MM_MODE_W | MM_MODE_X |
+						    MM_MODE_NOINVALIDATE)) {
 			dlog("Unable to initialise memory for vm %u\n", count);
 			continue;
 		}
diff --git a/src/main.c b/src/main.c
index 7938fec..72815d7 100644
--- a/src/main.c
+++ b/src/main.c
@@ -68,8 +68,8 @@
 	     params.initrd_end - 1);
 
 	/* Map initrd in, and initialise cpio parser. */
-	if (!mm_map(params.initrd_begin, params.initrd_end, params.initrd_begin,
-		    MM_MODE_R)) {
+	if (!mm_identity_map(params.initrd_begin, params.initrd_end,
+			     MM_MODE_R)) {
 		panic("unable to map initrd in");
 	}
 
diff --git a/src/mm.c b/src/mm.c
index f2062c8..01e28a3 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -195,19 +195,19 @@
 
 /**
  * Updates the given table such that the given virtual address range is mapped
- * to the given physical address range in the architecture-agnostic mode
+ * to the corresponding physical address range in the architecture-agnostic mode
  * provided.
  */
-bool mm_ptable_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
-		   paddr_t paddr, int mode)
+bool mm_ptable_identity_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
+			    int mode)
 {
 	uint64_t attrs = arch_mm_mode_to_attrs(mode);
 	int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
 	int level = arch_mm_max_level(mode);
+	paddr_t paddr = arch_mm_clear_pa(begin);
 
 	begin = arch_mm_clear_va(begin);
 	end = arch_mm_clear_va(end + PAGE_SIZE - 1);
-	paddr = arch_mm_clear_pa(paddr);
 
 	/*
 	 * Do it in two steps to prevent leaving the table in a halfway updated
@@ -241,7 +241,7 @@
 	begin = arch_mm_clear_va(begin);
 	end = arch_mm_clear_va(end + PAGE_SIZE - 1);
 
-	/* Also do updates in two steps, similarly to mm_ptable_map. */
+	/* Also do updates in two steps, similarly to mm_ptable_identity_map. */
 	if (!mm_map_level(begin, end, begin, 0, t->table, level, flags)) {
 		return false;
 	}
@@ -258,18 +258,19 @@
 }
 
 /**
- * Updates the given table such that a single virtual address page is mapped
- * to a single physical address page in the provided architecture-agnostic mode.
+ * Updates the given table such that a single virtual address page is mapped to
+ * the corresponding physical address page in the provided architecture-agnostic
+ * mode.
  */
-bool mm_ptable_map_page(struct mm_ptable *t, vaddr_t va, paddr_t pa, int mode)
+bool mm_ptable_identity_map_page(struct mm_ptable *t, vaddr_t va, int mode)
 {
 	size_t i;
 	uint64_t attrs = arch_mm_mode_to_attrs(mode);
 	pte_t *table = t->table;
 	bool sync = !(mode & MM_MODE_NOSYNC);
+	paddr_t pa = arch_mm_clear_pa(va);
 
 	va = arch_mm_clear_va(va);
-	pa = arch_mm_clear_pa(pa);
 
 	for (i = arch_mm_max_level(mode); i > 0; i--) {
 		table = mm_populate_table_pte(table + mm_index(va, i), i, sync);
@@ -414,12 +415,13 @@
 
 /**
  * Updates the hypervisor page table such that the given virtual address range
- * is mapped to the given physical address range in the architecture-agnostic
- * mode provided.
+ * is mapped to the corresponding physical address range in the
+ * architecture-agnostic mode provided.
  */
-bool mm_map(vaddr_t begin, vaddr_t end, paddr_t paddr, int mode)
+bool mm_identity_map(vaddr_t begin, vaddr_t end, int mode)
 {
-	return mm_ptable_map(&ptable, begin, end, paddr, mode | MM_MODE_STAGE1);
+	return mm_ptable_identity_map(&ptable, begin, end,
+				      mode | MM_MODE_STAGE1);
 }
 
 /**
@@ -447,19 +449,19 @@
 
 	/* Map page for uart. */
 	/* TODO: We may not want to map this. */
-	mm_ptable_map_page(&ptable, PL011_BASE, PL011_BASE,
-			   MM_MODE_R | MM_MODE_W | MM_MODE_D | MM_MODE_NOSYNC |
-				   MM_MODE_STAGE1);
+	mm_ptable_identity_map_page(&ptable, PL011_BASE,
+				    MM_MODE_R | MM_MODE_W | MM_MODE_D |
+					    MM_MODE_NOSYNC | MM_MODE_STAGE1);
 
 	/* Map each section. */
-	mm_map((vaddr_t)text_begin, (vaddr_t)text_end, (paddr_t)text_begin,
-	       MM_MODE_X | MM_MODE_NOSYNC);
+	mm_identity_map((vaddr_t)text_begin, (vaddr_t)text_end,
+			MM_MODE_X | MM_MODE_NOSYNC);
 
-	mm_map((vaddr_t)rodata_begin, (vaddr_t)rodata_end,
-	       (paddr_t)rodata_begin, MM_MODE_R | MM_MODE_NOSYNC);
+	mm_identity_map((vaddr_t)rodata_begin, (vaddr_t)rodata_end,
+			MM_MODE_R | MM_MODE_NOSYNC);
 
-	mm_map((vaddr_t)data_begin, (vaddr_t)data_end, (paddr_t)data_begin,
-	       MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
+	mm_identity_map((vaddr_t)data_begin, (vaddr_t)data_end,
+			MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
 
 	return arch_mm_init((paddr_t)ptable.table, true);
 }