Better use of address types in mm.h.

These functions are called to map physical pages in and out of the
address space. It makes more sense for the APIs to use those physical
addresses and avoid mistakes or confusion in the conversions.

Once the physical address has been mapped it will be available as a
virtual address or pointer in the hypervisor and an intermediate
physical address in the VMs. The corresponding type is now returned from
the mapping functions to avoid complication or mistakes in conversion by
the caller and the vast majority of the address type conversions now
happen within mm.

Internally to mm, the stage 1 and stage 2 tables still share the same
management code so that is made generic for the type of input address
being used.

Change-Id: I9201b98b7329ead304903b8b8968c4378eb5a4db
diff --git a/Makefile b/Makefile
index 15751a2..d8b80af 100644
--- a/Makefile
+++ b/Makefile
@@ -39,6 +39,7 @@
 # see .clang-format
 .PHONY: format
 format:
+	@echo "Formatting..."
 	@find src/ -name *.c -o -name *.h | xargs clang-format -style file -i
 	@find inc/ -name *.c -o -name *.h | xargs clang-format -style file -i
 	@find test/ -name *.c -o -name *.h | xargs clang-format -style file -i
diff --git a/inc/addr.h b/inc/addr.h
index 964660b..15d32b5 100644
--- a/inc/addr.h
+++ b/inc/addr.h
@@ -70,11 +70,11 @@
 }
 
 /**
- * Advances a virtual address.
+ * Advances a physical address.
  */
-static inline vaddr_t va_add(vaddr_t va, size_t n)
+static inline paddr_t pa_add(paddr_t pa, size_t n)
 {
-	return va_init(va_addr(va) + n);
+	return pa_init(pa_addr(pa) + n);
 }
 
 /**
diff --git a/inc/mm.h b/inc/mm.h
index 74158a7..6c539df 100644
--- a/inc/mm.h
+++ b/inc/mm.h
@@ -42,39 +42,21 @@
 
 bool mm_ptable_init(struct mm_ptable *t, uint32_t id, int mode);
 void mm_ptable_dump(struct mm_ptable *t, int mode);
-bool mm_ptable_identity_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
-			    int mode);
-bool mm_ptable_identity_map_page(struct mm_ptable *t, vaddr_t va, int mode);
-bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode);
-bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode);
 void mm_ptable_defrag(struct mm_ptable *t, int mode);
 bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode);
 
+bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
+			int mode, ipaddr_t *ipa);
+bool mm_vm_identity_map_page(struct mm_ptable *t, paddr_t begin, int mode,
+			     ipaddr_t *ipa);
+bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode);
+bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode);
+bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa);
+
 bool mm_init(void);
 bool mm_cpu_init(void);
-bool mm_identity_map(vaddr_t begin, vaddr_t end, int mode);
-bool mm_unmap(vaddr_t begin, vaddr_t end, int mode);
+void *mm_identity_map(paddr_t begin, paddr_t end, int mode);
+bool mm_unmap(paddr_t begin, paddr_t end, int mode);
 void mm_defrag(void);
 
-/**
- * Converts an intermediate physical address to a physical address. Addresses
- * are currently identity mapped so this is a simple type convertion. Returns
- * true if the address was mapped in the table and the address was converted.
- */
-static inline bool mm_ptable_translate_ipa(struct mm_ptable *t, ipaddr_t ipa,
-					   paddr_t *pa)
-{
-	/* TODO: the ptable functions map physical to virtual addresses but they
-	 * should really be mapping to intermediate physical addresses.
-	 * It might be better to have different interfaces to the mm functions?
-	 * This might also mean ipaddr_t should be used when building the VM
-	 * tables too?
-	 * */
-	if (mm_ptable_is_mapped(t, va_init(ipa_addr(ipa)), 0)) {
-		*pa = pa_init(ipa_addr(ipa));
-		return true;
-	}
-	return false;
-}
-
 #endif /* _MM_H */
diff --git a/src/api.c b/src/api.c
index 2b7b5d9..c823ec0 100644
--- a/src/api.c
+++ b/src/api.c
@@ -110,12 +110,10 @@
 int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
 {
 	struct vm *vm = cpu()->current->vm;
-	paddr_t pa_send;
-	paddr_t pa_recv;
-	vaddr_t send_begin;
-	vaddr_t send_end;
-	vaddr_t recv_begin;
-	vaddr_t recv_end;
+	paddr_t pa_send_begin;
+	paddr_t pa_send_end;
+	paddr_t pa_recv_begin;
+	paddr_t pa_recv_end;
 	int32_t ret;
 
 	/* Fail if addresses are not page-aligned. */
@@ -142,19 +140,18 @@
 	 * provided the address was acessible from the VM which ensures that the
 	 * caller isn't trying to use another VM's memory.
 	 */
-	if (!mm_ptable_translate_ipa(&vm->ptable, send, &pa_send) ||
-	    !mm_ptable_translate_ipa(&vm->ptable, recv, &pa_recv)) {
+	if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
+	    !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
 		ret = -1;
 		goto exit;
 	}
 
-	send_begin = va_from_pa(pa_send);
-	send_end = va_add(send_begin, PAGE_SIZE);
-	recv_begin = va_from_pa(pa_recv);
-	recv_end = va_add(recv_begin, PAGE_SIZE);
+	pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
+	pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
 
 	/* Map the send page as read-only in the hypervisor address space. */
-	if (!mm_identity_map(send_begin, send_end, MM_MODE_R)) {
+	vm->rpc.send = mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
+	if (!vm->rpc.send) {
 		ret = -1;
 		goto exit;
 	}
@@ -163,16 +160,14 @@
 	 * Map the receive page as writable in the hypervisor address space. On
 	 * failure, unmap the send page before returning.
 	 */
-	if (!mm_identity_map(recv_begin, recv_end, MM_MODE_W)) {
-		mm_unmap(send_begin, send_end, 0);
+	vm->rpc.recv = mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
+	if (!vm->rpc.recv) {
+		vm->rpc.send = NULL;
+		mm_unmap(pa_send_begin, pa_send_end, 0);
 		ret = -1;
 		goto exit;
 	}
 
-	/* Save pointers to the pages. */
-	vm->rpc.send = ptr_from_va(send_begin);
-	vm->rpc.recv = ptr_from_va(recv_begin);
-
 	/* TODO: Notify any waiters. */
 
 	ret = 0;
diff --git a/src/arch/aarch64/inc/arch_addr.h b/src/arch/aarch64/inc/arch_addr.h
index 92bc2fa..2fdf7e1 100644
--- a/src/arch/aarch64/inc/arch_addr.h
+++ b/src/arch/aarch64/inc/arch_addr.h
@@ -3,7 +3,6 @@
 
 #include <stdint.h>
 
-#define PAGE_LEVEL_BITS 9
 #define PAGE_BITS 12
 
 /* Integer type large enough to hold a physical address. */
@@ -12,7 +11,4 @@
 /* Integer type large enough to hold a virtual address. */
 typedef uintptr_t uintvaddr_t;
 
-/* A page table entry. */
-typedef uint64_t pte_t;
-
 #endif /* _ARCH_ADDR_H */
diff --git a/src/arch/aarch64/inc/arch_mm.h b/src/arch/aarch64/inc/arch_mm.h
index 2bd80f2..e904dab 100644
--- a/src/arch/aarch64/inc/arch_mm.h
+++ b/src/arch/aarch64/inc/arch_mm.h
@@ -6,6 +6,11 @@
 
 #include "addr.h"
 
+/* A page table entry. */
+typedef uint64_t pte_t;
+
+#define PAGE_LEVEL_BITS 9
+
 /**
  * Converts a physical address to a table PTE.
  *
@@ -86,15 +91,6 @@
 	((v) & ~((1ull << PAGE_BITS) - 1) & ((1ull << 48) - 1))
 
 /**
- * Clears the given virtual address, i.e., sets the ignored bits (from a page
- * table perspective) to zero.
- */
-static inline vaddr_t arch_mm_clear_va(vaddr_t va)
-{
-	return va_init(CLEAR_PTE_ATTRS(va_addr(va)));
-}
-
-/**
  * Clears the given physical address, i.e., sets the ignored bits (from a page
  * table perspective) to zero.
  */
@@ -112,11 +108,12 @@
 }
 
 /**
- * Extracts a page table pointer from the given page table entry.
+ * Extracts the physical address of the page table referred to by the given page
+ * table entry.
  */
-static inline pte_t *arch_mm_pte_to_table(pte_t pte)
+static inline paddr_t arch_mm_pte_to_table(pte_t pte)
 {
-	return (pte_t *)CLEAR_PTE_ATTRS(pte);
+	return pa_init(CLEAR_PTE_ATTRS(pte));
 }
 
 #undef CLEAR_PTE_ATTRS
@@ -144,14 +141,15 @@
 }
 
 /**
- * Invalidates stage-2 TLB entries referring to the given virtual address range.
+ * Invalidates stage-2 TLB entries referring to the given intermediate physical
+ * address range.
  */
-static inline void arch_mm_invalidate_stage2_range(vaddr_t va_begin,
-						   vaddr_t va_end)
+static inline void arch_mm_invalidate_stage2_range(ipaddr_t va_begin,
+						   ipaddr_t va_end)
 {
-	uintvaddr_t begin = va_addr(va_begin);
-	uintvaddr_t end = va_addr(va_end);
-	uintvaddr_t it;
+	uintpaddr_t begin = ipa_addr(va_begin);
+	uintpaddr_t end = ipa_addr(va_end);
+	uintpaddr_t it;
 
 	/* TODO: This only applies to the current VMID. */
 
diff --git a/src/fdt_handler.c b/src/fdt_handler.c
index 99ea4d4..06320e4 100644
--- a/src/fdt_handler.c
+++ b/src/fdt_handler.c
@@ -174,24 +174,22 @@
 	bool ret = false;
 
 	/* Map the fdt header in. */
-	if (!mm_identity_map(va_from_pa(fdt_addr),
-			     va_add(va_from_pa(fdt_addr), fdt_header_size()),
-			     MM_MODE_R)) {
+	fdt = mm_identity_map(fdt_addr, pa_add(fdt_addr, fdt_header_size()),
+			      MM_MODE_R);
+	if (!fdt) {
 		dlog("Unable to map FDT header.\n");
 		goto err_unmap_fdt_header;
 	}
 
-	fdt = ptr_from_va(va_from_pa(fdt_addr));
-
 	if (!fdt_root_node(&n, fdt)) {
 		dlog("FDT failed validation.\n");
 		goto err_unmap_fdt_header;
 	}
 
 	/* Map the rest of the fdt in. */
-	if (!mm_identity_map(va_from_pa(fdt_addr),
-			     va_add(va_from_pa(fdt_addr), fdt_total_size(fdt)),
-			     MM_MODE_R)) {
+	fdt = mm_identity_map(fdt_addr, pa_add(fdt_addr, fdt_total_size(fdt)),
+			      MM_MODE_R);
+	if (!fdt) {
 		dlog("Unable to map full FDT.\n");
 		goto err_unmap_fdt_header;
 	}
@@ -213,13 +211,11 @@
 	ret = true;
 
 out_unmap_fdt:
-	mm_unmap(va_from_pa(fdt_addr),
-		 va_add(va_from_pa(fdt_addr), fdt_total_size(fdt)), 0);
+	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_total_size(fdt)), 0);
 	return ret;
 
 err_unmap_fdt_header:
-	mm_unmap(va_from_pa(fdt_addr),
-		 va_add(va_from_pa(fdt_addr), fdt_header_size()), 0);
+	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), 0);
 	return false;
 }
 
@@ -230,25 +226,23 @@
 	bool ret = false;
 
 	/* Map the fdt header in. */
-	if (!mm_identity_map(va_from_pa(fdt_addr),
-			     va_add(va_from_pa(fdt_addr), fdt_header_size()),
-			     MM_MODE_R)) {
+	fdt = mm_identity_map(fdt_addr, pa_add(fdt_addr, fdt_header_size()),
+			      MM_MODE_R);
+	if (!fdt) {
 		dlog("Unable to map FDT header.\n");
 		return false;
 	}
 
-	fdt = ptr_from_va(va_from_pa(fdt_addr));
-
 	if (!fdt_root_node(&n, fdt)) {
 		dlog("FDT failed validation.\n");
 		goto err_unmap_fdt_header;
 	}
 
 	/* Map the fdt (+ a page) in r/w mode in preparation for updating it. */
-	if (!mm_identity_map(va_from_pa(fdt_addr),
-			     va_add(va_from_pa(fdt_addr),
-				    fdt_total_size(fdt) + PAGE_SIZE),
-			     MM_MODE_R | MM_MODE_W)) {
+	fdt = mm_identity_map(fdt_addr,
+			      pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE),
+			      MM_MODE_R | MM_MODE_W);
+	if (!fdt) {
 		dlog("Unable to map FDT in r/w mode.\n");
 		goto err_unmap_fdt_header;
 	}
@@ -291,17 +285,14 @@
 
 out_unmap_fdt:
 	/* Unmap FDT. */
-	if (!mm_unmap(va_from_pa(fdt_addr),
-		      va_add(va_from_pa(fdt_addr),
-			     fdt_total_size(fdt) + PAGE_SIZE),
-		      0)) {
+	if (!mm_unmap(fdt_addr,
+		      pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE), 0)) {
 		dlog("Unable to unmap writable FDT.\n");
 		return false;
 	}
 	return ret;
 
 err_unmap_fdt_header:
-	mm_unmap(va_from_pa(fdt_addr),
-		 va_add(va_from_pa(fdt_addr), fdt_header_size()), 0);
+	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), 0);
 	return false;
 }
diff --git a/src/load.c b/src/load.c
index 17dfc57..44c282c 100644
--- a/src/load.c
+++ b/src/load.c
@@ -15,16 +15,17 @@
  */
 static bool copy_to_unmaped(paddr_t to, const void *from, size_t size)
 {
-	vaddr_t begin = va_from_pa(to);
-	vaddr_t end = va_add(begin, size);
+	paddr_t to_end = pa_add(to, size);
+	void *ptr;
 
-	if (!mm_identity_map(begin, end, MM_MODE_W)) {
+	ptr = mm_identity_map(to, to_end, MM_MODE_W);
+	if (!ptr) {
 		return false;
 	}
 
-	memcpy(ptr_from_va(begin), from, size);
+	memcpy(ptr, from, size);
 
-	mm_unmap(begin, end, 0);
+	mm_unmap(to, to_end, 0);
 
 	return true;
 }
@@ -122,11 +123,11 @@
 
 		/* Map the 1TB of memory. */
 		/* TODO: We should do a whitelist rather than a blacklist. */
-		if (!mm_ptable_identity_map(
-			    &primary_vm.ptable, va_init(0),
-			    va_init(1024ull * 1024 * 1024 * 1024),
-			    MM_MODE_R | MM_MODE_W | MM_MODE_X |
-				    MM_MODE_NOINVALIDATE)) {
+		if (!mm_vm_identity_map(&primary_vm.ptable, pa_init(0),
+					pa_init(1024ull * 1024 * 1024 * 1024),
+					MM_MODE_R | MM_MODE_W | MM_MODE_X |
+						MM_MODE_NOINVALIDATE,
+					NULL)) {
 			dlog("Unable to initialise memory for primary vm\n");
 			return false;
 		}
@@ -170,7 +171,9 @@
 	     memiter_parse_str(&it, &str) && count < MAX_VMS;
 	     count++) {
 		struct memiter kernel;
-		ipaddr_t secondary_mem_begin;
+		paddr_t secondary_mem_begin;
+		paddr_t secondary_mem_end;
+		ipaddr_t secondary_entry;
 
 		if (!memiter_find_file(cpio, &str, &kernel)) {
 			dlog("Unable to load kernel for vm %u\n", count);
@@ -192,8 +195,10 @@
 			continue;
 		}
 
+		secondary_mem_end = *mem_end;
 		*mem_end = pa_init(pa_addr(*mem_end) - mem);
-		secondary_mem_begin = ipa_from_pa(*mem_end);
+		secondary_mem_begin = *mem_end;
+
 		if (!copy_to_unmaped(*mem_end, kernel.next,
 				     kernel.limit - kernel.next)) {
 			dlog("Unable to copy kernel for vm %u\n", count);
@@ -207,25 +212,25 @@
 
 		/* TODO: Remove this. */
 		/* Grant VM access to uart. */
-		mm_ptable_identity_map_page(&secondary_vm[count].ptable,
-					    va_init(PL011_BASE),
-					    MM_MODE_R | MM_MODE_W | MM_MODE_D |
-						    MM_MODE_NOINVALIDATE);
+		mm_vm_identity_map_page(&secondary_vm[count].ptable,
+					pa_init(PL011_BASE),
+					MM_MODE_R | MM_MODE_W | MM_MODE_D |
+						MM_MODE_NOINVALIDATE,
+					NULL);
 
 		/* Grant the VM access to the memory. */
-		if (!mm_ptable_identity_map(&secondary_vm[count].ptable,
-					    va_from_pa(*mem_end),
-					    va_add(va_from_pa(*mem_end), mem),
-					    MM_MODE_R | MM_MODE_W | MM_MODE_X |
-						    MM_MODE_NOINVALIDATE)) {
+		if (!mm_vm_identity_map(&secondary_vm[count].ptable,
+					secondary_mem_begin, secondary_mem_end,
+					MM_MODE_R | MM_MODE_W | MM_MODE_X |
+						MM_MODE_NOINVALIDATE,
+					&secondary_entry)) {
 			dlog("Unable to initialise memory for vm %u\n", count);
 			continue;
 		}
 
 		/* Deny the primary VM access to this memory. */
-		if (!mm_ptable_unmap(&primary_vm.ptable, va_from_pa(*mem_end),
-				     va_add(va_from_pa(*mem_end), mem),
-				     MM_MODE_NOINVALIDATE)) {
+		if (!mm_vm_unmap(&primary_vm.ptable, secondary_mem_begin,
+				 secondary_mem_end, MM_MODE_NOINVALIDATE)) {
 			dlog("Unable to unmap secondary VM from primary VM\n");
 			return false;
 		}
@@ -233,7 +238,7 @@
 		dlog("Loaded VM%u with %u vcpus, entry at 0x%x\n", count, cpu,
 		     pa_addr(*mem_end));
 
-		vm_start_vcpu(secondary_vm + count, 0, secondary_mem_begin, 0);
+		vm_start_vcpu(secondary_vm + count, 0, secondary_entry, 0);
 	}
 
 	secondary_vm_count = count;
diff --git a/src/main.c b/src/main.c
index d41b1b3..0b21bbf 100644
--- a/src/main.c
+++ b/src/main.c
@@ -48,6 +48,7 @@
 	paddr_t new_mem_end;
 	struct memiter primary_initrd;
 	struct memiter cpio;
+	void *initrd;
 
 	dlog("Initialising hafnium\n");
 
@@ -68,12 +69,13 @@
 	     pa_addr(params.initrd_end) - 1);
 
 	/* Map initrd in, and initialise cpio parser. */
-	if (!mm_identity_map(va_from_pa(params.initrd_begin),
-			     va_from_pa(params.initrd_end), MM_MODE_R)) {
+	initrd = mm_identity_map(params.initrd_begin, params.initrd_end,
+				 MM_MODE_R);
+	if (!initrd) {
 		panic("unable to map initrd in");
 	}
 
-	memiter_init(&cpio, ptr_from_va(va_from_pa(params.initrd_begin)),
+	memiter_init(&cpio, initrd,
 		     pa_addr(params.initrd_end) - pa_addr(params.initrd_begin));
 
 	/* Load all VMs. */
diff --git a/src/mm.c b/src/mm.c
index d240c55..b4ef264 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -1,11 +1,24 @@
 #include "mm.h"
 
+#include <assert.h>
 #include <stdatomic.h>
 #include <stdint.h>
 
 #include "alloc.h"
 #include "dlog.h"
 
+/* The type of addresses stored in the page table. */
+typedef uintvaddr_t ptable_addr_t;
+
+/* For stage 2, the input is an intermediate physical addresses rather than a
+ * virtual address so: */
+static_assert(
+	sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
+	"Currently, the same code manages the stage 1 and stage 2 page tables "
+	"which only works if the virtual and intermediate physical addresses "
+	"are the same size. It looks like that assumption might not be holding "
+	"so we need to check that everything is going to be ok.");
+
 /* Keep macro alignment */
 /* clang-format off */
 
@@ -24,6 +37,22 @@
 static struct mm_ptable ptable;
 
 /**
+ * Rounds an address down to a page boundary.
+ */
+static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
+{
+	return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
+}
+
+/**
+ * Rounds an address up to a page boundary.
+ */
+static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
+{
+	return mm_round_down_to_page(addr + PAGE_SIZE - 1);
+}
+
+/**
  * Calculates the size of the address space represented by a page table entry at
  * the given level.
  */
@@ -33,22 +62,22 @@
 }
 
 /**
- * For a given virtual address, calculates the maximum (plus one) address that
- * can be represented by the same table at the given level.
+ * For a given address, calculates the maximum (plus one) address that can be
+ * represented by the same table at the given level.
  */
-static inline vaddr_t mm_level_end(vaddr_t va, int level)
+static inline ptable_addr_t mm_level_end(ptable_addr_t addr, int level)
 {
 	size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
-	return va_init(((va_addr(va) >> offset) + 1) << offset);
+	return ((addr >> offset) + 1) << offset;
 }
 
 /**
- * For a given virtual address, calculates the index at which its entry is
- * stored in a table at the given level.
+ * For a given address, calculates the index at which its entry is stored in a
+ * table at the given level.
  */
-static inline size_t mm_index(vaddr_t va, int level)
+static inline size_t mm_index(ptable_addr_t addr, int level)
 {
-	uintvaddr_t v = va_addr(va) >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
+	ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
 	return v & ((1ull << PAGE_LEVEL_BITS) - 1);
 }
 
@@ -68,7 +97,7 @@
 
 	/* Just return pointer to table if it's already populated. */
 	if (arch_mm_pte_is_table(v)) {
-		return arch_mm_pte_to_table(v);
+		return ptr_from_va(va_from_pa(arch_mm_pte_to_table(v)));
 	}
 
 	/* Allocate a new table. */
@@ -124,21 +153,18 @@
 }
 
 /**
- * Updates the page table at the given level to map the given virtual address
- * range to a physical range using the provided (architecture-specific)
- * attributes.
+ * Updates the page table at the given level to map the given address range to a
+ * physical range using the provided (architecture-specific) attributes.
  *
  * This function calls itself recursively if it needs to update additional
  * levels, but the recursion is bound by the maximum number of levels in a page
  * table.
  */
-static bool mm_map_level(vaddr_t va_begin, vaddr_t va_end, paddr_t pa,
+static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
 			 uint64_t attrs, pte_t *table, int level, int flags)
 {
-	pte_t *pte = table + mm_index(va_begin, level);
-	uintvaddr_t level_end = va_addr(mm_level_end(va_begin, level));
-	uintvaddr_t begin = va_addr(va_begin);
-	uintvaddr_t end = va_addr(va_end);
+	pte_t *pte = table + mm_index(begin, level);
+	ptable_addr_t level_end = mm_level_end(begin, level);
 	size_t entry_size = mm_entry_size(level);
 	bool commit = flags & MAP_FLAG_COMMIT;
 	bool sync = flags & MAP_FLAG_SYNC;
@@ -170,8 +196,8 @@
 				return false;
 			}
 
-			if (!mm_map_level(va_begin, va_end, pa, attrs, nt,
-					  level - 1, flags)) {
+			if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
+					  flags)) {
 				return false;
 			}
 		}
@@ -185,44 +211,47 @@
 }
 
 /**
- * Invalidates the TLB for the given virtual address range.
+ * Invalidates the TLB for the given address range.
  */
-static void mm_invalidate_tlb(vaddr_t begin, vaddr_t end, bool stage1)
+static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
+			      bool stage1)
 {
 	if (stage1) {
-		arch_mm_invalidate_stage1_range(begin, end);
+		arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
 	} else {
-		arch_mm_invalidate_stage2_range(begin, end);
+		arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
 	}
 }
 
 /**
- * Updates the given table such that the given virtual address range is mapped
- * to the corresponding physical address range in the architecture-agnostic mode
- * provided.
+ * Updates the given table such that the given physical address range is mapped
+ * into the address space with the corresponding address range in the
+ * architecture-agnostic mode provided.
  */
-bool mm_ptable_identity_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
-			    int mode)
+static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
+				   paddr_t pa_end, int mode)
 {
 	uint64_t attrs = arch_mm_mode_to_attrs(mode);
 	int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
 	int level = arch_mm_max_level(mode);
 	pte_t *table = ptr_from_va(va_from_pa(t->table));
-	paddr_t paddr = arch_mm_clear_pa(pa_from_va(begin));
+	ptable_addr_t begin;
+	ptable_addr_t end;
 
-	begin = arch_mm_clear_va(begin);
-	end = arch_mm_clear_va(va_add(end, PAGE_SIZE - 1));
+	pa_begin = arch_mm_clear_pa(pa_begin);
+	begin = pa_addr(pa_begin);
+	end = mm_round_up_to_page(pa_addr(pa_end));
 
 	/*
 	 * Do it in two steps to prevent leaving the table in a halfway updated
 	 * state. In such a two-step implementation, the table may be left with
 	 * extra internal tables, but no different mapping on failure.
 	 */
-	if (!mm_map_level(begin, end, paddr, attrs, table, level, flags)) {
+	if (!mm_map_level(begin, end, pa_begin, attrs, table, level, flags)) {
 		return false;
 	}
 
-	mm_map_level(begin, end, paddr, attrs, table, level,
+	mm_map_level(begin, end, pa_begin, attrs, table, level,
 		     flags | MAP_FLAG_COMMIT);
 
 	/* Invalidate the tlb. */
@@ -234,25 +263,28 @@
 }
 
 /**
- * Updates the given table such that the given virtual address range is not
- * mapped to any physical address.
+ * Updates the given table such that the given physical address range is not
+ * mapped into the address space.
  */
-bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode)
+static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
+			    paddr_t pa_end, int mode)
 {
 	int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
 	int level = arch_mm_max_level(mode);
 	pte_t *table = ptr_from_va(va_from_pa(t->table));
+	ptable_addr_t begin;
+	ptable_addr_t end;
 
-	begin = arch_mm_clear_va(begin);
-	end = arch_mm_clear_va(va_add(end, PAGE_SIZE - 1));
+	pa_begin = arch_mm_clear_pa(pa_begin);
+	begin = pa_addr(pa_begin);
+	end = mm_round_up_to_page(pa_addr(pa_end));
 
 	/* Also do updates in two steps, similarly to mm_ptable_identity_map. */
-	if (!mm_map_level(begin, end, pa_from_va(begin), 0, table, level,
-			  flags)) {
+	if (!mm_map_level(begin, end, pa_begin, 0, table, level, flags)) {
 		return false;
 	}
 
-	mm_map_level(begin, end, pa_from_va(begin), 0, table, level,
+	mm_map_level(begin, end, pa_begin, 0, table, level,
 		     flags | MAP_FLAG_COMMIT);
 
 	/* Invalidate the tlb. */
@@ -264,28 +296,31 @@
 }
 
 /**
- * Updates the given table such that a single virtual address page is mapped to
- * the corresponding physical address page in the provided architecture-agnostic
- * mode.
+ * Updates the given table such that a single physical address page is mapped
+ * into the address space with the corresponding address page in the provided
+ * architecture-agnostic mode.
  */
-bool mm_ptable_identity_map_page(struct mm_ptable *t, vaddr_t va, int mode)
+static bool mm_ptable_identity_map_page(struct mm_ptable *t, paddr_t pa,
+					int mode)
 {
 	size_t i;
 	uint64_t attrs = arch_mm_mode_to_attrs(mode);
 	pte_t *table = ptr_from_va(va_from_pa(t->table));
 	bool sync = !(mode & MM_MODE_NOSYNC);
-	paddr_t pa = arch_mm_clear_pa(pa_from_va(va));
+	ptable_addr_t addr;
 
-	va = arch_mm_clear_va(va);
+	pa = arch_mm_clear_pa(pa);
+	addr = pa_addr(pa);
 
 	for (i = arch_mm_max_level(mode); i > 0; i--) {
-		table = mm_populate_table_pte(table + mm_index(va, i), i, sync);
+		table = mm_populate_table_pte(table + mm_index(addr, i), i,
+					      sync);
 		if (!table) {
 			return false;
 		}
 	}
 
-	i = mm_index(va, 0);
+	i = mm_index(addr, 0);
 	table[i] = arch_mm_pa_to_page_pte(pa, attrs);
 	return true;
 }
@@ -308,8 +343,10 @@
 		}
 
 		if (arch_mm_pte_is_table(table[i])) {
-			mm_dump_table_recursive(arch_mm_pte_to_table(table[i]),
-						level - 1, max_level);
+			mm_dump_table_recursive(
+				ptr_from_va(va_from_pa(
+					arch_mm_pte_to_table(table[i]))),
+				level - 1, max_level);
 		}
 	}
 }
@@ -341,25 +378,26 @@
 bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode)
 {
 	/* TODO: If we add pages dynamically, they must be included here too. */
-	return mm_ptable_unmap(t, va_init((uintvaddr_t)text_begin),
-			       va_init((uintvaddr_t)text_end), mode) &&
-	       mm_ptable_unmap(t, va_init((uintvaddr_t)rodata_begin),
-			       va_init((uintvaddr_t)rodata_end), mode) &&
-	       mm_ptable_unmap(t, va_init((uintvaddr_t)data_begin),
-			       va_init((uintvaddr_t)data_end), mode);
+	return mm_ptable_unmap(t, pa_init((uintpaddr_t)text_begin),
+			       pa_init((uintpaddr_t)text_end), mode) &&
+	       mm_ptable_unmap(t, pa_init((uintpaddr_t)rodata_begin),
+			       pa_init((uintpaddr_t)rodata_end), mode) &&
+	       mm_ptable_unmap(t, pa_init((uintpaddr_t)data_begin),
+			       pa_init((uintpaddr_t)data_end), mode);
 }
 
 /**
- * Determines if the given virtual address is mapped in the given page table
- * by recursively traversing all levels of the page table.
+ * Determines if the given address is mapped in the given page table by
+ * recursively traversing all levels of the page table.
  */
-static bool mm_is_mapped_recursive(const pte_t *table, vaddr_t addr, int level)
+static bool mm_is_mapped_recursive(const pte_t *table, ptable_addr_t addr,
+				   int level)
 {
 	pte_t pte;
-	uintvaddr_t va_level_end = va_addr(mm_level_end(addr, level));
+	ptable_addr_t va_level_end = mm_level_end(addr, level);
 
 	/* It isn't mapped if it doesn't fit in the table. */
-	if (va_addr(addr) >= va_level_end) {
+	if (addr >= va_level_end) {
 		return false;
 	}
 
@@ -374,22 +412,24 @@
 	}
 
 	if (arch_mm_pte_is_table(pte)) {
-		return mm_is_mapped_recursive(arch_mm_pte_to_table(pte), addr,
-					      level - 1);
+		return mm_is_mapped_recursive(
+			ptr_from_va(va_from_pa(arch_mm_pte_to_table(pte))),
+			addr, level - 1);
 	}
 
 	return false;
 }
 
 /**
- * Determines if the given virtual address is mapped in the given page table.
+ * Determines if the given address is mapped in the given page table.
  */
-bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode)
+static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
+				int mode)
 {
 	pte_t *table = ptr_from_va(va_from_pa(t->table));
 	int level = arch_mm_max_level(mode);
 
-	addr = arch_mm_clear_va(addr);
+	addr = mm_round_down_to_page(addr);
 
 	return mm_is_mapped_recursive(table, addr, level);
 }
@@ -425,21 +465,95 @@
 }
 
 /**
- * Updates the hypervisor page table such that the given virtual address range
- * is mapped to the corresponding physical address range in the
+ * Updates a VM's page table such that the given physical address range is
+ * mapped in the address space at the corresponding address range in the
  * architecture-agnostic mode provided.
  */
-bool mm_identity_map(vaddr_t begin, vaddr_t end, int mode)
+bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
+			int mode, ipaddr_t *ipa)
 {
-	return mm_ptable_identity_map(&ptable, begin, end,
-				      mode | MM_MODE_STAGE1);
+	bool success =
+		mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1);
+
+	if (success && ipa != NULL) {
+		*ipa = ipa_from_pa(begin);
+	}
+
+	return success;
 }
 
 /**
- * Updates the hypervisor table such that the given virtual address range is not
- * mapped to any physical address.
+ * Updates a VM's page table such that the given physical address page is
+ * mapped in the address space at the corresponding address page in the
+ * architecture-agnostic mode provided.
  */
-bool mm_unmap(vaddr_t begin, vaddr_t end, int mode)
+bool mm_vm_identity_map_page(struct mm_ptable *t, paddr_t begin, int mode,
+			     ipaddr_t *ipa)
+{
+	bool success =
+		mm_ptable_identity_map_page(t, begin, mode & ~MM_MODE_STAGE1);
+
+	if (success && ipa != NULL) {
+		*ipa = ipa_from_pa(begin);
+	}
+
+	return success;
+}
+
+/**
+ * Updates the VM's table such that the given physical address range is not
+ * mapped in the address space.
+ */
+bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode)
+{
+	return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1);
+}
+
+/**
+ * Checks whether the given intermediate physical addess is mapped in the given
+ * page table of a VM.
+ */
+bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode)
+{
+	return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1);
+}
+
+/**
+ * Translates an intermediate physical address to a physical address. Addresses
+ * are currently identity mapped so this is a simple type convertion. Returns
+ * true if the address was mapped in the table and the address was converted.
+ */
+bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa)
+{
+	bool mapped = mm_vm_is_mapped(t, ipa, 0);
+
+	if (mapped) {
+		*pa = pa_init(ipa_addr(ipa));
+	}
+
+	return mapped;
+}
+
+/**
+ * Updates the hypervisor page table such that the given physical address range
+ * is mapped into the address space at the corresponding address range in the
+ * architecture-agnostic mode provided.
+ */
+void *mm_identity_map(paddr_t begin, paddr_t end, int mode)
+{
+	if (mm_ptable_identity_map(&ptable, begin, end,
+				   mode | MM_MODE_STAGE1)) {
+		return ptr_from_va(va_from_pa(begin));
+	}
+
+	return NULL;
+}
+
+/**
+ * Updates the hypervisor table such that the given physical address range is
+ * not mapped in the address space.
+ */
+bool mm_unmap(paddr_t begin, paddr_t end, int mode)
 {
 	return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
 }
@@ -460,21 +574,21 @@
 
 	/* Map page for uart. */
 	/* TODO: We may not want to map this. */
-	mm_ptable_identity_map_page(&ptable, va_init(PL011_BASE),
+	mm_ptable_identity_map_page(&ptable, pa_init(PL011_BASE),
 				    MM_MODE_R | MM_MODE_W | MM_MODE_D |
 					    MM_MODE_NOSYNC | MM_MODE_STAGE1);
 
 	/* Map each section. */
-	mm_identity_map(va_init((uintvaddr_t)text_begin),
-			va_init((uintvaddr_t)text_end),
+	mm_identity_map(pa_init((uintpaddr_t)text_begin),
+			pa_init((uintpaddr_t)text_end),
 			MM_MODE_X | MM_MODE_NOSYNC);
 
-	mm_identity_map(va_init((uintvaddr_t)rodata_begin),
-			va_init((uintvaddr_t)rodata_end),
+	mm_identity_map(pa_init((uintpaddr_t)rodata_begin),
+			pa_init((uintpaddr_t)rodata_end),
 			MM_MODE_R | MM_MODE_NOSYNC);
 
-	mm_identity_map(va_init((uintvaddr_t)data_begin),
-			va_init((uintvaddr_t)data_end),
+	mm_identity_map(pa_init((uintpaddr_t)data_begin),
+			pa_init((uintpaddr_t)data_end),
 			MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
 
 	return arch_mm_init(ptable.table, true);