Opaque virtual address type

This ensures that conversions and arithmetic on virtual addresses are
explicit and not accidental due to it being a primitive integer.

Change-Id: I94dd8e82e065757ae448d98be0cb89eaa1f6542d
diff --git a/build/arch/aarch64.gni b/build/arch/aarch64.gni
index 5b8e3d2..54775e3 100644
--- a/build/arch/aarch64.gni
+++ b/build/arch/aarch64.gni
@@ -6,5 +6,6 @@
   arch_aarch64_pl011_base_address = ""
 }
 
-assert(!arch_aarch64_use_pl011 || arch_aarch64_pl011_base_address != "",
-       "Must provide the PL011 base address as \"arch_aarch64_pl011_base_address\".")
+assert(
+    !arch_aarch64_use_pl011 || arch_aarch64_pl011_base_address != "",
+    "Must provide the PL011 base address as \"arch_aarch64_pl011_base_address\".")
diff --git a/inc/api.h b/inc/api.h
index 851c2da..1db9ca8 100644
--- a/inc/api.h
+++ b/inc/api.h
@@ -16,7 +16,7 @@
 int32_t api_vcpu_get_count(uint32_t vm_idx);
 int32_t api_vcpu_run(uint32_t vm_idx, uint32_t vcpu_idx, struct vcpu **next);
 struct vcpu *api_wait_for_interrupt(void);
-int32_t api_vm_configure(paddr_t send, paddr_t recv);
+int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv);
 
 int32_t api_rpc_request(uint32_t vm_idx, size_t size);
 int32_t api_rpc_read_request(bool block, struct vcpu **next);
diff --git a/inc/fdt_handler.h b/inc/fdt_handler.h
index 9ac6853..e85b8f5 100644
--- a/inc/fdt_handler.h
+++ b/inc/fdt_handler.h
@@ -3,8 +3,9 @@
 
 #include "boot_params.h"
 #include "fdt.h"
+#include "mm.h"
 
-bool fdt_get_boot_params(struct fdt_header *fdt, struct boot_params *p);
-bool fdt_patch(struct fdt_header *fdt, struct boot_params_update *p);
+bool fdt_get_boot_params(paddr_t fdt_addr, struct boot_params *p);
+bool fdt_patch(paddr_t fdt_addr, struct boot_params_update *p);
 
 #endif /* _FDT_HANDLER_H */
diff --git a/inc/load.h b/inc/load.h
index 7a41b9a..6d19ae6 100644
--- a/inc/load.h
+++ b/inc/load.h
@@ -6,10 +6,11 @@
 
 #include "cpio.h"
 #include "memiter.h"
+#include "mm.h"
 
 bool load_primary(const struct memiter *cpio, size_t kernel_arg,
 		  struct memiter *initrd);
-bool load_secondary(const struct memiter *cpio, uint64_t mem_begin,
-		    uint64_t *mem_end);
+bool load_secondary(const struct memiter *cpio, paddr_t mem_begin,
+		    paddr_t *mem_end);
 
 #endif /* _LOAD_H */
diff --git a/inc/mm.h b/inc/mm.h
index 31f5c2f..1e00310 100644
--- a/inc/mm.h
+++ b/inc/mm.h
@@ -7,10 +7,15 @@
 #include "arch_mm.h"
 
 struct mm_ptable {
-	pte_t *table;
+	paddr_t table;
 	uint32_t id;
 };
 
+/* An opaque type for an intermediate physical address from a VM. */
+typedef struct {
+	uintpaddr_t ipa;
+} ipaddr_t;
+
 #define PAGE_SIZE (1 << PAGE_BITS)
 
 /* The following are arch-independent page mapping modes. */
@@ -55,4 +60,68 @@
 bool mm_unmap(vaddr_t begin, vaddr_t end, int mode);
 void mm_defrag(void);
 
+/**
+ * Initializes an intermeditate physical address.
+ */
+static inline ipaddr_t ipa_init(uintvaddr_t v)
+{
+	return (ipaddr_t){.ipa = v};
+}
+
+/**
+ * Extracts the absolute intermediate physical address.
+ */
+static inline uintpaddr_t ipa_addr(ipaddr_t ipa)
+{
+	return ipa.ipa;
+}
+
+/**
+ * Converts a physical address to a virtual address. Addresses are currently
+ * identity mapped so this is a simple type convertion.
+ */
+static inline vaddr_t mm_va_from_pa(paddr_t pa)
+{
+	return va_init(pa_addr(pa));
+}
+
+/**
+ * Converts a virtual address to a physical address. Addresses are currently
+ * identity mapped so this is a simple type convertion.
+ */
+static inline paddr_t mm_pa_from_va(vaddr_t va)
+{
+	return pa_init(va_addr(va));
+}
+
+/**
+ * Converts an intermediate physical address to a physical address. Addresses
+ * are currently identity mapped so this is a simple type convertion. Returns
+ * true if the address was mapped in the table and the address was converted.
+ */
+static inline bool mm_pa_from_ipa(struct mm_ptable *t, ipaddr_t ipa,
+				  paddr_t *pa)
+{
+	/* TODO: the ptable functions map physical to virtual addresses but they
+	 * should really be mapping to intermediate physical addresses.
+	 * It might be better to have different interfaces to the mm functions?
+	 * This might also mean ipaddr_t should be used when building the VM
+	 * tables too?
+	 * */
+	if (mm_ptable_is_mapped(t, va_init(ipa_addr(ipa)), 0)) {
+		*pa = pa_init(ipa_addr(ipa));
+		return true;
+	}
+	return false;
+}
+
+/**
+ * Converts a virtual address to a pointer. Only use when the virtual address
+ * is mapped for the calling context.
+ */
+static inline void *mm_ptr_from_va(vaddr_t va)
+{
+	return (void *)va_addr(va);
+}
+
 #endif /* _MM_H */
diff --git a/src/api.c b/src/api.c
index 3c6edb2..e860e11 100644
--- a/src/api.c
+++ b/src/api.c
@@ -107,20 +107,27 @@
  * Configures the VM to send/receive data through the specified pages. The pages
  * must not be shared.
  */
-int32_t api_vm_configure(paddr_t send, paddr_t recv)
+int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
 {
 	struct vm *vm = cpu()->current->vm;
+	paddr_t pa_send;
+	paddr_t pa_recv;
+	vaddr_t send_begin;
+	vaddr_t send_end;
+	vaddr_t recv_begin;
+	vaddr_t recv_end;
 	int32_t ret;
 
 	/* Fail if addresses are not page-aligned. */
-	if ((recv & (PAGE_SIZE - 1)) || (send & (PAGE_SIZE - 1))) {
+	if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
+	    (ipa_addr(recv) & (PAGE_SIZE - 1))) {
 		return -1;
 	}
 
 	sl_lock(&vm->lock);
 
 	/* We only allow these to be setup once. */
-	if (vm->rpc.recv || vm->rpc.send) {
+	if (vm->rpc.send || vm->rpc.recv) {
 		ret = -1;
 		goto exit;
 	}
@@ -131,18 +138,23 @@
 	 */
 
 	/*
-	 * Check that both pages are acessible from the VM, i.e., ensure that
-	 * the caller isn't trying to use another VM's memory.
+	 * Convert the intermediate physical addresses to physical address
+	 * provided the address was acessible from the VM which ensures that the
+	 * caller isn't trying to use another VM's memory.
 	 */
-	if (!mm_ptable_is_mapped(&vm->ptable, recv, 0) ||
-	    !mm_ptable_is_mapped(&vm->ptable, send, 0)) {
+	if (!mm_pa_from_ipa(&vm->ptable, send, &pa_send) ||
+	    !mm_pa_from_ipa(&vm->ptable, recv, &pa_recv)) {
 		ret = -1;
 		goto exit;
 	}
 
+	send_begin = mm_va_from_pa(pa_send);
+	send_end = va_add(send_begin, PAGE_SIZE);
+	recv_begin = mm_va_from_pa(pa_recv);
+	recv_end = va_add(recv_begin, PAGE_SIZE);
+
 	/* Map the send page as read-only in the hypervisor address space. */
-	if (!mm_identity_map((vaddr_t)send, (vaddr_t)send + PAGE_SIZE,
-			     MM_MODE_R)) {
+	if (!mm_identity_map(send_begin, send_end, MM_MODE_R)) {
 		ret = -1;
 		goto exit;
 	}
@@ -151,16 +163,15 @@
 	 * Map the receive page as writable in the hypervisor address space. On
 	 * failure, unmap the send page before returning.
 	 */
-	if (!mm_identity_map((vaddr_t)recv, (vaddr_t)recv + PAGE_SIZE,
-			     MM_MODE_W)) {
-		mm_unmap((vaddr_t)send, (vaddr_t)send + PAGE_SIZE, 0);
+	if (!mm_identity_map(recv_begin, recv_end, MM_MODE_W)) {
+		mm_unmap(send_begin, send_end, 0);
 		ret = -1;
 		goto exit;
 	}
 
 	/* Save pointers to the pages. */
-	vm->rpc.send = (const void *)(vaddr_t)send;
-	vm->rpc.recv = (void *)(vaddr_t)recv;
+	vm->rpc.send = mm_ptr_from_va(send_begin);
+	vm->rpc.recv = mm_ptr_from_va(recv_begin);
 
 	/* TODO: Notify any waiters. */
 
diff --git a/src/arch/aarch64/entry.S b/src/arch/aarch64/entry.S
index 7f8a4e1..f1ca71a 100644
--- a/src/arch/aarch64/entry.S
+++ b/src/arch/aarch64/entry.S
@@ -62,8 +62,8 @@
 	b 3b
 
 4:	/* Save the FDT to a global variable. */
-	adrp x30, fdt
-	add x30, x30, :lo12:fdt
+	adrp x30, fdt_addr
+	add x30, x30, :lo12:fdt_addr
 	str x0, [x30]
 
 	/* Get pointer to first cpu. */
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index 2e5e75e..f5bdd21 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -182,7 +182,7 @@
 		break;
 
 	case HF_VM_CONFIGURE:
-		ret.user_ret = api_vm_configure(arg1, arg2);
+		ret.user_ret = api_vm_configure(ipa_init(arg1), ipa_init(arg2));
 		break;
 
 	case HF_RPC_REQUEST:
diff --git a/src/arch/aarch64/inc/arch_cpu.h b/src/arch/aarch64/inc/arch_cpu.h
index 6aa7bc4..933de8e 100644
--- a/src/arch/aarch64/inc/arch_cpu.h
+++ b/src/arch/aarch64/inc/arch_cpu.h
@@ -65,20 +65,20 @@
 
 	/* TODO: Determine if we need to set TSW. */
 	hcr = (1u << 31) | /* RW bit. */
-			  (1u << 21) | /* TACR, trap access to ACTRL_EL1. */
-			  (1u << 19) | /* TSC, trap SMC instructions. */
-			  (1u << 20) | /* TIDCP, trap impl-defined funct. */
-			  (1u << 2) |  /* PTW, Protected Table Walk. */
-			  (1u << 0);   /* VM: enable stage-2 translation. */
+	      (1u << 21) | /* TACR, trap access to ACTRL_EL1. */
+	      (1u << 19) | /* TSC, trap SMC instructions. */
+	      (1u << 20) | /* TIDCP, trap impl-defined funct. */
+	      (1u << 2) |  /* PTW, Protected Table Walk. */
+	      (1u << 0);   /* VM: enable stage-2 translation. */
 
 	cptr = 0;
 	cnthctl = 0;
 
 	if (!is_primary) {
 		hcr |= (7u << 3) |  /* AMO, IMO, FMO bits. */
-				(1u << 9) |  /* FB bit. */
-				(1u << 10) | /* BSU bits set to inner-sh. */
-				(3u << 13);  /* TWI, TWE bits. */
+		       (1u << 9) |  /* FB bit. */
+		       (1u << 10) | /* BSU bits set to inner-sh. */
+		       (3u << 13);  /* TWI, TWE bits. */
 
 		cptr |= (1u << 10); /* TFP, trap fp access. */
 
@@ -86,9 +86,9 @@
 			   (1u << 1);  /* EL1PCEN, trap phys timer access. */
 	}
 
-	__asm__ volatile("msr hcr_el2, %0" ::  "r"(hcr));
-	__asm__ volatile("msr cptr_el2, %0" ::  "r"(cptr));
-	__asm__ volatile("msr cnthctl_el2, %0" ::  "r"(cnthctl));
+	__asm__ volatile("msr hcr_el2, %0" ::"r"(hcr));
+	__asm__ volatile("msr cptr_el2, %0" ::"r"(cptr));
+	__asm__ volatile("msr cnthctl_el2, %0" ::"r"(cnthctl));
 }
 
 static inline void arch_regs_init(struct arch_regs *r, size_t pc, size_t arg,
diff --git a/src/arch/aarch64/inc/arch_mm.h b/src/arch/aarch64/inc/arch_mm.h
index 8662f78..ba44387 100644
--- a/src/arch/aarch64/inc/arch_mm.h
+++ b/src/arch/aarch64/inc/arch_mm.h
@@ -5,14 +5,64 @@
 #include <stddef.h>
 #include <stdint.h>
 
-/* A physical address. */
-typedef size_t paddr_t;
+/* Integer type large enough to hold a physical address. */
+typedef uintptr_t uintpaddr_t;
 
-/* A virtual address. */
-typedef size_t vaddr_t;
+/* Integer type large enough to hold a virtual address. */
+typedef uintptr_t uintvaddr_t;
 
 /* A page table entry. */
-typedef size_t pte_t;
+typedef uint64_t pte_t;
+
+/* An opaque type for a physical address. */
+typedef struct {
+	uintpaddr_t pa;
+} paddr_t;
+
+/* An opaque type for a virtual address. */
+typedef struct {
+	uintvaddr_t va;
+} vaddr_t;
+
+/**
+ * Initializes a physical address.
+ */
+static inline paddr_t pa_init(uintpaddr_t p)
+{
+	return (paddr_t){.pa = p};
+}
+
+/**
+ * Extracts the absolute physical address.
+ */
+static inline uintpaddr_t pa_addr(paddr_t pa)
+{
+	return pa.pa;
+}
+
+/**
+ * Initializes a virtual address.
+ */
+static inline vaddr_t va_init(uintvaddr_t v)
+{
+	return (vaddr_t){.va = v};
+}
+
+/**
+ * Extracts the absolute virtual address.
+ */
+static inline uintvaddr_t va_addr(vaddr_t va)
+{
+	return va.va;
+}
+
+/**
+ * Advances a virtual address.
+ */
+static inline vaddr_t va_add(vaddr_t va, size_t n)
+{
+	return va_init(va_addr(va) + n);
+}
 
 #define PAGE_LEVEL_BITS 9
 #define PAGE_BITS 12
@@ -25,7 +75,7 @@
  */
 static inline pte_t arch_mm_pa_to_table_pte(paddr_t pa)
 {
-	return pa | 0x3;
+	return pa_addr(pa) | 0x3;
 }
 
 /**
@@ -33,7 +83,7 @@
  */
 static inline pte_t arch_mm_pa_to_block_pte(paddr_t pa, uint64_t attrs)
 {
-	return pa | attrs;
+	return pa_addr(pa) | attrs;
 }
 
 /**
@@ -41,7 +91,7 @@
  */
 static inline pte_t arch_mm_pa_to_page_pte(paddr_t pa, uint64_t attrs)
 {
-	return pa | attrs | ((attrs & 1) << 1);
+	return pa_addr(pa) | attrs | ((attrs & 1) << 1);
 }
 
 /**
@@ -93,22 +143,25 @@
 	return (pte & 3) == 1;
 }
 
+#define CLEAR_PTE_ATTRS(v) \
+	((v) & ~((1ull << PAGE_BITS) - 1) & ((1ull << 48) - 1))
+
 /**
  * Clears the given virtual address, i.e., sets the ignored bits (from a page
  * table perspective) to zero.
  */
-static inline vaddr_t arch_mm_clear_va(vaddr_t addr)
+static inline vaddr_t arch_mm_clear_va(vaddr_t va)
 {
-	return addr & ~((1ull << PAGE_BITS) - 1) & ((1ull << 48) - 1);
+	return va_init(CLEAR_PTE_ATTRS(va_addr(va)));
 }
 
 /**
  * Clears the given physical address, i.e., sets the ignored bits (from a page
  * table perspective) to zero.
  */
-static inline paddr_t arch_mm_clear_pa(paddr_t addr)
+static inline paddr_t arch_mm_clear_pa(paddr_t pa)
 {
-	return addr & ~((1ull << PAGE_BITS) - 1) & ((1ull << 48) - 1);
+	return pa_init(CLEAR_PTE_ATTRS(pa_addr(pa)));
 }
 
 /**
@@ -116,7 +169,7 @@
  */
 static inline paddr_t arch_mm_pte_to_paddr(pte_t pte)
 {
-	return arch_mm_clear_pa(pte);
+	return pa_init(CLEAR_PTE_ATTRS(pte));
 }
 
 /**
@@ -124,15 +177,20 @@
  */
 static inline pte_t *arch_mm_pte_to_table(pte_t pte)
 {
-	return (pte_t *)arch_mm_pte_to_paddr(pte);
+	return (pte_t *)CLEAR_PTE_ATTRS(pte);
 }
 
+#undef CLEAR_PTE_ATTRS
+
 /**
  * Invalidates stage-1 TLB entries referring to the given virtual address range.
  */
-static inline void arch_mm_invalidate_stage1_range(vaddr_t begin, vaddr_t end)
+static inline void arch_mm_invalidate_stage1_range(vaddr_t va_begin,
+						   vaddr_t va_end)
 {
-	vaddr_t it;
+	uintvaddr_t begin = va_addr(va_begin);
+	uintvaddr_t end = va_addr(va_end);
+	uintvaddr_t it;
 
 	begin >>= 12;
 	end >>= 12;
@@ -149,9 +207,12 @@
 /**
  * Invalidates stage-2 TLB entries referring to the given virtual address range.
  */
-static inline void arch_mm_invalidate_stage2_range(vaddr_t begin, vaddr_t end)
+static inline void arch_mm_invalidate_stage2_range(vaddr_t va_begin,
+						   vaddr_t va_end)
 {
-	vaddr_t it;
+	uintvaddr_t begin = va_addr(va_begin);
+	uintvaddr_t end = va_addr(va_end);
+	uintvaddr_t it;
 
 	/* TODO: This only applies to the current VMID. */
 
@@ -172,7 +233,9 @@
 
 static inline void arch_mm_set_vm(uint64_t vmid, paddr_t table)
 {
-	__asm__ volatile("msr vttbr_el2, %0" : : "r"(table | (vmid << 48)));
+	__asm__ volatile("msr vttbr_el2, %0"
+			 :
+			 : "r"(pa_addr(table) | (vmid << 48)));
 }
 
 uint64_t arch_mm_mode_to_attrs(int mode);
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 53506bc..7729491 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -203,7 +203,7 @@
 	write_msr(mair_el2, (0 << (8 * STAGE1_DEVICEINDX)) |
 				    (0xff << (8 * STAGE1_NORMALINDX)));
 
-	write_msr(ttbr0_el2, table);
+	write_msr(ttbr0_el2, pa_addr(table));
 
 	/*
 	 * Configure tcr_el2.
diff --git a/src/arch/aarch64/params.c b/src/arch/aarch64/params.c
index d044d69..00d181a 100644
--- a/src/arch/aarch64/params.c
+++ b/src/arch/aarch64/params.c
@@ -1,7 +1,8 @@
 #include "boot_params.h"
 #include "fdt_handler.h"
 
-struct fdt_header *fdt;
+/* This is set by entry.S. */
+uintpaddr_t fdt_addr;
 
 /*
  * The following are declared weak so that they can overwritten by platform code
@@ -10,11 +11,11 @@
 #pragma weak plat_get_boot_params
 bool plat_get_boot_params(struct boot_params *p)
 {
-	return fdt_get_boot_params(fdt, p);
+	return fdt_get_boot_params(pa_init(fdt_addr), p);
 }
 
 #pragma weak plat_update_boot_params
 bool plat_update_boot_params(struct boot_params_update *p)
 {
-	return fdt_patch(fdt, p);
+	return fdt_patch(pa_init(fdt_addr), p);
 }
diff --git a/src/fdt_handler.c b/src/fdt_handler.c
index 47637ca..8121790 100644
--- a/src/fdt_handler.c
+++ b/src/fdt_handler.c
@@ -84,21 +84,27 @@
  */
 static bool find_initrd(struct fdt_node *n, struct boot_params *p)
 {
+	uint64_t begin;
+	uint64_t end;
+
 	if (!fdt_find_child(n, "chosen")) {
 		dlog("Unable to find 'chosen'\n");
 		return false;
 	}
 
-	if (!fdt_read_number(n, "linux,initrd-start", &p->initrd_begin)) {
+	if (!fdt_read_number(n, "linux,initrd-start", &begin)) {
 		dlog("Unable to read linux,initrd-start\n");
 		return false;
 	}
 
-	if (!fdt_read_number(n, "linux,initrd-end", &p->initrd_end)) {
+	if (!fdt_read_number(n, "linux,initrd-end", &end)) {
 		dlog("Unable to read linux,initrd-end\n");
 		return false;
 	}
 
+	p->initrd_begin = pa_init(begin);
+	p->initrd_end = pa_init(end);
+
 	return true;
 }
 
@@ -143,14 +149,14 @@
 
 		/* Traverse all memory ranges within this node. */
 		while (size >= entry_size) {
-			uint64_t addr = convert_number(data, address_size);
-			uint64_t len =
+			uintpaddr_t addr = convert_number(data, address_size);
+			size_t len =
 				convert_number(data + address_size, size_size);
 
-			if (len > p->mem_end - p->mem_begin) {
+			if (len > pa_addr(p->mem_end) - pa_addr(p->mem_begin)) {
 				/* Remember the largest range we've found. */
-				p->mem_begin = addr;
-				p->mem_end = addr + len;
+				p->mem_begin = pa_init(addr);
+				p->mem_end = pa_init(addr + len);
 			}
 
 			size -= entry_size;
@@ -161,25 +167,30 @@
 	/* TODO: Check for "reserved-memory" nodes. */
 }
 
-bool fdt_get_boot_params(struct fdt_header *fdt, struct boot_params *p)
+bool fdt_get_boot_params(paddr_t fdt_addr, struct boot_params *p)
 {
+	struct fdt_header *fdt;
 	struct fdt_node n;
 	bool ret = false;
 
 	/* Map the fdt header in. */
-	if (!mm_identity_map((vaddr_t)fdt, (vaddr_t)fdt + fdt_header_size(),
+	if (!mm_identity_map(mm_va_from_pa(fdt_addr),
+			     va_init(pa_addr(fdt_addr) + fdt_header_size()),
 			     MM_MODE_R)) {
 		dlog("Unable to map FDT header.\n");
 		goto err_unmap_fdt_header;
 	}
 
+	fdt = mm_ptr_from_va(mm_va_from_pa(fdt_addr));
+
 	if (!fdt_root_node(&n, fdt)) {
 		dlog("FDT failed validation.\n");
 		goto err_unmap_fdt_header;
 	}
 
 	/* Map the rest of the fdt in. */
-	if (!mm_identity_map((vaddr_t)fdt, (vaddr_t)fdt + fdt_total_size(fdt),
+	if (!mm_identity_map(mm_va_from_pa(fdt_addr),
+			     va_init(pa_addr(fdt_addr) + fdt_total_size(fdt)),
 			     MM_MODE_R)) {
 		dlog("Unable to map full FDT.\n");
 		goto err_unmap_fdt_header;
@@ -190,8 +201,8 @@
 		goto out_unmap_fdt;
 	}
 
-	p->mem_begin = 0;
-	p->mem_end = 0;
+	p->mem_begin = pa_init(0);
+	p->mem_end = pa_init(0);
 	find_memory_range(&n, p);
 
 	if (!find_initrd(&n, p)) {
@@ -202,34 +213,41 @@
 	ret = true;
 
 out_unmap_fdt:
-	mm_unmap((vaddr_t)fdt, (vaddr_t)fdt + fdt_total_size(fdt), 0);
+	mm_unmap(mm_va_from_pa(fdt_addr),
+		 va_init(pa_addr(fdt_addr) + fdt_total_size(fdt)), 0);
 	return ret;
 
 err_unmap_fdt_header:
-	mm_unmap((vaddr_t)fdt, (vaddr_t)fdt + fdt_header_size(), 0);
+	mm_unmap(mm_va_from_pa(fdt_addr),
+		 va_init(pa_addr(fdt_addr) + fdt_header_size()), 0);
 	return false;
 }
 
-bool fdt_patch(struct fdt_header *fdt, struct boot_params_update *p)
+bool fdt_patch(paddr_t fdt_addr, struct boot_params_update *p)
 {
+	struct fdt_header *fdt;
 	struct fdt_node n;
 	bool ret = false;
 
 	/* Map the fdt header in. */
-	if (!mm_identity_map((vaddr_t)fdt, (vaddr_t)fdt + fdt_header_size(),
+	if (!mm_identity_map(mm_va_from_pa(fdt_addr),
+			     va_init(pa_addr(fdt_addr) + fdt_header_size()),
 			     MM_MODE_R)) {
 		dlog("Unable to map FDT header.\n");
 		return false;
 	}
 
+	fdt = mm_ptr_from_va(mm_va_from_pa(fdt_addr));
+
 	if (!fdt_root_node(&n, fdt)) {
 		dlog("FDT failed validation.\n");
 		goto err_unmap_fdt_header;
 	}
 
 	/* Map the fdt (+ a page) in r/w mode in preparation for updating it. */
-	if (!mm_identity_map((vaddr_t)fdt,
-			     (vaddr_t)fdt + fdt_total_size(fdt) + PAGE_SIZE,
+	if (!mm_identity_map(mm_va_from_pa(fdt_addr),
+			     va_init(pa_addr(fdt_addr) + fdt_total_size(fdt) +
+				     PAGE_SIZE),
 			     MM_MODE_R | MM_MODE_W)) {
 		dlog("Unable to map FDT in r/w mode.\n");
 		goto err_unmap_fdt_header;
@@ -246,12 +264,13 @@
 	}
 
 	/* Patch FDT to point to new ramdisk. */
-	if (!fdt_write_number(&n, "linux,initrd-start", p->initrd_begin)) {
+	if (!fdt_write_number(&n, "linux,initrd-start",
+			      pa_addr(p->initrd_begin))) {
 		dlog("Unable to write linux,initrd-start\n");
 		goto out_unmap_fdt;
 	}
 
-	if (!fdt_write_number(&n, "linux,initrd-end", p->initrd_end)) {
+	if (!fdt_write_number(&n, "linux,initrd-end", pa_addr(p->initrd_end))) {
 		dlog("Unable to write linux,initrd-end\n");
 		goto out_unmap_fdt;
 	}
@@ -264,21 +283,25 @@
 	}
 
 	/* Patch fdt to reserve memory for secondary VMs. */
-	fdt_add_mem_reservation(fdt, p->reserved_begin,
-				p->reserved_end - p->reserved_begin);
+	fdt_add_mem_reservation(
+		fdt, pa_addr(p->reserved_begin),
+		pa_addr(p->reserved_end) - pa_addr(p->reserved_begin));
 
 	ret = true;
 
 out_unmap_fdt:
 	/* Unmap FDT. */
-	if (!mm_unmap((vaddr_t)fdt,
-		      (vaddr_t)fdt + fdt_total_size(fdt) + PAGE_SIZE, 0)) {
+	if (!mm_unmap(mm_va_from_pa(fdt_addr),
+		      va_init(pa_addr(fdt_addr) + fdt_total_size(fdt) +
+			      PAGE_SIZE),
+		      0)) {
 		dlog("Unable to unmap writable FDT.\n");
 		return false;
 	}
 	return ret;
 
 err_unmap_fdt_header:
-	mm_unmap((vaddr_t)fdt, (vaddr_t)fdt + fdt_header_size(), 0);
+	mm_unmap(mm_va_from_pa(fdt_addr),
+		 va_init(pa_addr(fdt_addr) + fdt_header_size()), 0);
 	return false;
 }
diff --git a/src/load.c b/src/load.c
index b4df5ad..d36ac21 100644
--- a/src/load.c
+++ b/src/load.c
@@ -15,13 +15,16 @@
  */
 static bool copy_to_unmaped(paddr_t to, const void *from, size_t size)
 {
-	if (!mm_identity_map((vaddr_t)to, (vaddr_t)to + size, MM_MODE_W)) {
+	vaddr_t begin = mm_va_from_pa(to);
+	vaddr_t end = va_add(begin, size);
+
+	if (!mm_identity_map(begin, end, MM_MODE_W)) {
 		return false;
 	}
 
-	memcpy((void *)to, from, size);
+	memcpy(mm_ptr_from_va(begin), from, size);
 
-	mm_unmap(to, to + size, 0);
+	mm_unmap(begin, end, 0);
 
 	return true;
 }
@@ -34,8 +37,8 @@
 	/* TODO: This is a hack. We must read the alignment from the binary. */
 	extern char bin_end[];
 	size_t tmp = (size_t)&bin_end[0];
-	paddr_t dest = (tmp + 0x80000 - 1) & ~(0x80000 - 1);
-	dlog("bin_end is at %p, copying to %p\n", &bin_end[0], dest);
+	paddr_t dest = pa_init((tmp + 0x80000 - 1) & ~(0x80000 - 1));
+	dlog("bin_end is at %p, copying to %p\n", &bin_end[0], pa_addr(dest));
 	return copy_to_unmaped(dest, from, size);
 }
 
@@ -88,6 +91,7 @@
 /**
  * Loads the primary VM.
  */
+// TODO: kernel_arg is a size_t???
 bool load_primary(const struct memiter *cpio, size_t kernel_arg,
 		  struct memiter *initrd)
 {
@@ -118,10 +122,11 @@
 
 		/* Map the 1TB of memory. */
 		/* TODO: We should do a whitelist rather than a blacklist. */
-		if (!mm_ptable_identity_map(&primary_vm.ptable, 0,
-					    1024ull * 1024 * 1024 * 1024,
-					    MM_MODE_R | MM_MODE_W | MM_MODE_X |
-						    MM_MODE_NOINVALIDATE)) {
+		if (!mm_ptable_identity_map(
+			    &primary_vm.ptable, va_init(0),
+			    va_init(1024ull * 1024 * 1024 * 1024),
+			    MM_MODE_R | MM_MODE_W | MM_MODE_X |
+				    MM_MODE_NOINVALIDATE)) {
 			dlog("Unable to initialise memory for primary vm\n");
 			return false;
 		}
@@ -143,8 +148,8 @@
  * reflect the fact that some of the memory isn't available to the primary VM
  * anymore.
  */
-bool load_secondary(const struct memiter *cpio, uint64_t mem_begin,
-		    uint64_t *mem_end)
+bool load_secondary(const struct memiter *cpio, paddr_t mem_begin,
+		    paddr_t *mem_end)
 {
 	struct memiter it;
 	struct memiter str;
@@ -158,7 +163,7 @@
 	}
 
 	/* Round the last address down to the page size. */
-	*mem_end &= ~(PAGE_SIZE - 1);
+	*mem_end = pa_init(pa_addr(*mem_end) & ~(PAGE_SIZE - 1));
 
 	for (count = 0;
 	     memiter_parse_uint(&it, &mem) && memiter_parse_uint(&it, &cpu) &&
@@ -173,7 +178,7 @@
 
 		/* Round up to page size. */
 		mem = (mem + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
-		if (mem > *mem_end - mem_begin) {
+		if (mem > pa_addr(*mem_end) - pa_addr(mem_begin)) {
 			dlog("Not enough memory for vm %u (%u bytes)\n", count,
 			     mem);
 			continue;
@@ -186,7 +191,7 @@
 			continue;
 		}
 
-		*mem_end -= mem;
+		*mem_end = pa_init(pa_addr(*mem_end) - mem);
 		if (!copy_to_unmaped(*mem_end, kernel.next,
 				     kernel.limit - kernel.next)) {
 			dlog("Unable to copy kernel for vm %u\n", count);
@@ -201,30 +206,37 @@
 		/* TODO: Remove this. */
 		/* Grant VM access to uart. */
 		mm_ptable_identity_map_page(&secondary_vm[count].ptable,
-					    PL011_BASE,
+					    va_init(PL011_BASE),
 					    MM_MODE_R | MM_MODE_W | MM_MODE_D |
 						    MM_MODE_NOINVALIDATE);
 
 		/* Grant the VM access to the memory. */
-		if (!mm_ptable_identity_map(&secondary_vm[count].ptable,
-					    *mem_end, *mem_end + mem,
-					    MM_MODE_R | MM_MODE_W | MM_MODE_X |
-						    MM_MODE_NOINVALIDATE)) {
+		if (!mm_ptable_identity_map(
+			    &secondary_vm[count].ptable,
+			    mm_va_from_pa(*mem_end),
+			    va_add(mm_va_from_pa(*mem_end), mem),
+			    MM_MODE_R | MM_MODE_W | MM_MODE_X |
+				    MM_MODE_NOINVALIDATE)) {
 			dlog("Unable to initialise memory for vm %u\n", count);
 			continue;
 		}
 
 		/* Deny the primary VM access to this memory. */
-		if (!mm_ptable_unmap(&primary_vm.ptable, *mem_end,
-				     *mem_end + mem, MM_MODE_NOINVALIDATE)) {
+		if (!mm_ptable_unmap(&primary_vm.ptable,
+				     mm_va_from_pa(*mem_end),
+				     va_add(mm_va_from_pa(*mem_end), mem),
+				     MM_MODE_NOINVALIDATE)) {
 			dlog("Unable to unmap secondary VM from primary VM\n");
 			return false;
 		}
 
 		dlog("Loaded VM%u with %u vcpus, entry at 0x%x\n", count, cpu,
-		     *mem_end);
+		     pa_addr(*mem_end));
 
-		vm_start_vcpu(secondary_vm + count, 0, *mem_end, 0, false);
+		// TODO: entry is a size_t which seems to be wrong, what should
+		// it be?
+		vm_start_vcpu(secondary_vm + count, 0, pa_addr(*mem_end), 0,
+			      false);
 	}
 
 	secondary_vm_count = count;
diff --git a/src/main.c b/src/main.c
index 72815d7..5f81a89 100644
--- a/src/main.c
+++ b/src/main.c
@@ -45,7 +45,7 @@
 {
 	struct boot_params params;
 	struct boot_params_update update;
-	uint64_t new_mem_end;
+	paddr_t new_mem_end;
 	struct memiter primary_initrd;
 	struct memiter cpio;
 
@@ -62,19 +62,19 @@
 		panic("unable to retrieve boot params");
 	}
 
-	dlog("Memory range:  0x%x - 0x%x\n", params.mem_begin,
-	     params.mem_end - 1);
-	dlog("Ramdisk range: 0x%x - 0x%x\n", params.initrd_begin,
-	     params.initrd_end - 1);
+	dlog("Memory range:  0x%x - 0x%x\n", pa_addr(params.mem_begin),
+	     pa_addr(params.mem_end) - 1);
+	dlog("Ramdisk range: 0x%x - 0x%x\n", pa_addr(params.initrd_begin),
+	     pa_addr(params.initrd_end) - 1);
 
 	/* Map initrd in, and initialise cpio parser. */
-	if (!mm_identity_map(params.initrd_begin, params.initrd_end,
-			     MM_MODE_R)) {
+	if (!mm_identity_map(mm_va_from_pa(params.initrd_begin),
+			     mm_va_from_pa(params.initrd_end), MM_MODE_R)) {
 		panic("unable to map initrd in");
 	}
 
-	memiter_init(&cpio, (void *)params.initrd_begin,
-		     params.initrd_end - params.initrd_begin);
+	memiter_init(&cpio, mm_ptr_from_va(mm_va_from_pa(params.initrd_begin)),
+		     pa_addr(params.initrd_end) - pa_addr(params.initrd_begin));
 
 	/* Load all VMs. */
 	new_mem_end = params.mem_end;
@@ -87,10 +87,13 @@
 	}
 
 	/* Prepare to run by updating bootparams as seens by primary VM. */
-	update.initrd_begin = (paddr_t)primary_initrd.next;
-	update.initrd_end = (paddr_t)primary_initrd.limit;
+	update.initrd_begin =
+		mm_pa_from_va(va_init((uintvaddr_t)primary_initrd.next));
+	update.initrd_end =
+		mm_pa_from_va(va_init((uintvaddr_t)primary_initrd.limit));
 	update.reserved_begin = new_mem_end;
-	update.reserved_end = params.mem_end - new_mem_end;
+	update.reserved_end =
+		pa_init(pa_addr(params.mem_end) - pa_addr(new_mem_end));
 	if (!plat_update_boot_params(&update)) {
 		panic("plat_update_boot_params failed");
 	}
diff --git a/src/mm.c b/src/mm.c
index 01e28a3..9ecc313 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -14,12 +14,13 @@
 
 /* clang-format on */
 
-extern char text_begin[];
-extern char text_end[];
-extern char rodata_begin[];
-extern char rodata_end[];
-extern char data_begin[];
-extern char data_end[];
+extern uint8_t text_begin[];
+extern uint8_t text_end[];
+extern uint8_t rodata_begin[];
+extern uint8_t rodata_end[];
+extern uint8_t data_begin[];
+extern uint8_t data_end[];
+
 static struct mm_ptable ptable;
 
 /**
@@ -38,7 +39,7 @@
 static inline vaddr_t mm_level_end(vaddr_t va, int level)
 {
 	size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
-	return ((va >> offset) + 1) << offset;
+	return va_init(((va_addr(va) >> offset) + 1) << offset);
 }
 
 /**
@@ -47,7 +48,7 @@
  */
 static inline size_t mm_index(vaddr_t va, int level)
 {
-	vaddr_t v = va >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
+	uintvaddr_t v = va_addr(va) >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
 	return v & ((1ull << PAGE_LEVEL_BITS) - 1);
 }
 
@@ -102,7 +103,7 @@
 	 * update it.
 	 */
 	atomic_thread_fence(memory_order_release);
-	*pte = arch_mm_pa_to_table_pte((paddr_t)ntable);
+	*pte = arch_mm_pa_to_table_pte(pa_init((uintpaddr_t)ntable));
 
 	return ntable;
 }
@@ -131,29 +132,31 @@
  * levels, but the recursion is bound by the maximum number of levels in a page
  * table.
  */
-static bool mm_map_level(vaddr_t va, vaddr_t va_end, paddr_t pa, uint64_t attrs,
-			 pte_t *table, int level, int flags)
+static bool mm_map_level(vaddr_t va_begin, vaddr_t va_end, paddr_t pa,
+			 uint64_t attrs, pte_t *table, int level, int flags)
 {
-	pte_t *pte = table + mm_index(va, level);
-	vaddr_t va_level_end = mm_level_end(va, level);
+	pte_t *pte = table + mm_index(va_begin, level);
+	uintvaddr_t level_end = va_addr(mm_level_end(va_begin, level));
+	uintvaddr_t begin = va_addr(va_begin);
+	uintvaddr_t end = va_addr(va_end);
 	size_t entry_size = mm_entry_size(level);
 	bool commit = flags & MAP_FLAG_COMMIT;
 	bool sync = flags & MAP_FLAG_SYNC;
 
-	/* Cap va_end so that we don't go over the current level max. */
-	if (va_end > va_level_end) {
-		va_end = va_level_end;
+	/* Cap end so that we don't go over the current level max. */
+	if (end > level_end) {
+		end = level_end;
 	}
 
 	/* Fill each entry in the table. */
-	while (va < va_end) {
+	while (begin < end) {
 		if (level == 0) {
 			if (commit) {
 				*pte = arch_mm_pa_to_page_pte(pa, attrs);
 			}
-		} else if ((va_end - va) >= entry_size &&
+		} else if ((end - begin) >= entry_size &&
 			   arch_mm_is_block_allowed(level) &&
-			   (va & (entry_size - 1)) == 0) {
+			   (begin & (entry_size - 1)) == 0) {
 			if (commit) {
 				pte_t v = *pte;
 				*pte = arch_mm_pa_to_block_pte(pa, attrs);
@@ -167,14 +170,14 @@
 				return false;
 			}
 
-			if (!mm_map_level(va, va_end, pa, attrs, nt, level - 1,
-					  flags)) {
+			if (!mm_map_level(va_begin, va_end, pa, attrs, nt,
+					  level - 1, flags)) {
 				return false;
 			}
 		}
 
-		va = (va + entry_size) & ~(entry_size - 1);
-		pa = (pa + entry_size) & ~(entry_size - 1);
+		begin = (begin + entry_size) & ~(entry_size - 1);
+		pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1));
 		pte++;
 	}
 
@@ -204,21 +207,22 @@
 	uint64_t attrs = arch_mm_mode_to_attrs(mode);
 	int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
 	int level = arch_mm_max_level(mode);
-	paddr_t paddr = arch_mm_clear_pa(begin);
+	pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
+	paddr_t paddr = arch_mm_clear_pa(mm_pa_from_va(begin));
 
 	begin = arch_mm_clear_va(begin);
-	end = arch_mm_clear_va(end + PAGE_SIZE - 1);
+	end = arch_mm_clear_va(va_add(end, PAGE_SIZE - 1));
 
 	/*
 	 * Do it in two steps to prevent leaving the table in a halfway updated
 	 * state. In such a two-step implementation, the table may be left with
 	 * extra internal tables, but no different mapping on failure.
 	 */
-	if (!mm_map_level(begin, end, paddr, attrs, t->table, level, flags)) {
+	if (!mm_map_level(begin, end, paddr, attrs, table, level, flags)) {
 		return false;
 	}
 
-	mm_map_level(begin, end, paddr, attrs, t->table, level,
+	mm_map_level(begin, end, paddr, attrs, table, level,
 		     flags | MAP_FLAG_COMMIT);
 
 	/* Invalidate the tlb. */
@@ -237,16 +241,18 @@
 {
 	int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
 	int level = arch_mm_max_level(mode);
+	pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
 
 	begin = arch_mm_clear_va(begin);
-	end = arch_mm_clear_va(end + PAGE_SIZE - 1);
+	end = arch_mm_clear_va(va_add(end, PAGE_SIZE - 1));
 
 	/* Also do updates in two steps, similarly to mm_ptable_identity_map. */
-	if (!mm_map_level(begin, end, begin, 0, t->table, level, flags)) {
+	if (!mm_map_level(begin, end, mm_pa_from_va(begin), 0, table, level,
+			  flags)) {
 		return false;
 	}
 
-	mm_map_level(begin, end, begin, 0, t->table, level,
+	mm_map_level(begin, end, mm_pa_from_va(begin), 0, table, level,
 		     flags | MAP_FLAG_COMMIT);
 
 	/* Invalidate the tlb. */
@@ -266,9 +272,9 @@
 {
 	size_t i;
 	uint64_t attrs = arch_mm_mode_to_attrs(mode);
-	pte_t *table = t->table;
+	pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
 	bool sync = !(mode & MM_MODE_NOSYNC);
-	paddr_t pa = arch_mm_clear_pa(va);
+	paddr_t pa = arch_mm_clear_pa(mm_pa_from_va(va));
 
 	va = arch_mm_clear_va(va);
 
@@ -313,8 +319,9 @@
  */
 void mm_ptable_dump(struct mm_ptable *t, int mode)
 {
+	pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
 	int max_level = arch_mm_max_level(mode);
-	mm_dump_table_recursive(t->table, max_level, max_level);
+	mm_dump_table_recursive(table, max_level, max_level);
 }
 
 /**
@@ -334,11 +341,12 @@
 bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode)
 {
 	/* TODO: If we add pages dynamically, they must be included here too. */
-	return mm_ptable_unmap(t, (vaddr_t)text_begin, (vaddr_t)text_end,
-			       mode) &&
-	       mm_ptable_unmap(t, (vaddr_t)rodata_begin, (vaddr_t)rodata_end,
-			       mode) &&
-	       mm_ptable_unmap(t, (vaddr_t)data_begin, (vaddr_t)data_end, mode);
+	return mm_ptable_unmap(t, va_init((uintvaddr_t)text_begin),
+			       va_init((uintvaddr_t)text_end), mode) &&
+	       mm_ptable_unmap(t, va_init((uintvaddr_t)rodata_begin),
+			       va_init((uintvaddr_t)rodata_end), mode) &&
+	       mm_ptable_unmap(t, va_init((uintvaddr_t)data_begin),
+			       va_init((uintvaddr_t)data_end), mode);
 }
 
 /**
@@ -348,10 +356,10 @@
 static bool mm_is_mapped_recursive(const pte_t *table, vaddr_t addr, int level)
 {
 	pte_t pte;
-	vaddr_t va_level_end = mm_level_end(addr, level);
+	uintvaddr_t va_level_end = va_addr(mm_level_end(addr, level));
 
 	/* It isn't mapped if it doesn't fit in the table. */
-	if (addr >= va_level_end) {
+	if (va_addr(addr) >= va_level_end) {
 		return false;
 	}
 
@@ -378,11 +386,12 @@
  */
 bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode)
 {
+	pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
 	int level = arch_mm_max_level(mode);
 
 	addr = arch_mm_clear_va(addr);
 
-	return mm_is_mapped_recursive(t->table, addr, level);
+	return mm_is_mapped_recursive(table, addr, level);
 }
 
 /**
@@ -407,7 +416,9 @@
 		table[i] = arch_mm_absent_pte();
 	}
 
-	t->table = table;
+	/* TODO: halloc could return a virtual or physical address if mm not
+	 * enabled? */
+	t->table = pa_init((uintpaddr_t)table);
 	t->id = id;
 
 	return true;
@@ -449,26 +460,29 @@
 
 	/* Map page for uart. */
 	/* TODO: We may not want to map this. */
-	mm_ptable_identity_map_page(&ptable, PL011_BASE,
+	mm_ptable_identity_map_page(&ptable, va_init(PL011_BASE),
 				    MM_MODE_R | MM_MODE_W | MM_MODE_D |
 					    MM_MODE_NOSYNC | MM_MODE_STAGE1);
 
 	/* Map each section. */
-	mm_identity_map((vaddr_t)text_begin, (vaddr_t)text_end,
+	mm_identity_map(va_init((uintvaddr_t)text_begin),
+			va_init((uintvaddr_t)text_end),
 			MM_MODE_X | MM_MODE_NOSYNC);
 
-	mm_identity_map((vaddr_t)rodata_begin, (vaddr_t)rodata_end,
+	mm_identity_map(va_init((uintvaddr_t)rodata_begin),
+			va_init((uintvaddr_t)rodata_end),
 			MM_MODE_R | MM_MODE_NOSYNC);
 
-	mm_identity_map((vaddr_t)data_begin, (vaddr_t)data_end,
+	mm_identity_map(va_init((uintvaddr_t)data_begin),
+			va_init((uintvaddr_t)data_end),
 			MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
 
-	return arch_mm_init((paddr_t)ptable.table, true);
+	return arch_mm_init(ptable.table, true);
 }
 
 bool mm_cpu_init(void)
 {
-	return arch_mm_init((paddr_t)ptable.table, false);
+	return arch_mm_init(ptable.table, false);
 }
 
 /**
diff --git a/src/vm.c b/src/vm.c
index 3c4fd4d..66be66e 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -35,5 +35,5 @@
 void vm_set_current(struct vm *vm)
 {
 	arch_cpu_update(vm == &primary_vm);
-	arch_mm_set_vm(vm->ptable.id, (paddr_t)vm->ptable.table);
+	arch_mm_set_vm(vm->ptable.id, vm->ptable.table);
 }