Support concatenated page tables.

To reduce the depth of the page table tree, some architectures
concatenate top level page tables rather than introducing a new level in
the tree.

The tests cover more of the memory management code and also document
some of the quirks that exist in the APIs. The tests also exercise the
concatenated tables logic.

Bug: 117549422
Change-Id: I99991aaf3bfb753dd6176cb9df7a5337ed9c184d
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index d5752f4..912b5d3 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -37,7 +37,8 @@
 	      "A page table must be page aligned.");
 
 struct mm_ptable {
-	paddr_t table;
+	/** Address of the root of the page table. */
+	paddr_t root;
 };
 
 /* The following are arch-independent page mapping modes. */
@@ -67,13 +68,14 @@
 #define MM_MODE_NOINVALIDATE 0x40
 
 bool mm_ptable_init(struct mm_ptable *t, int mode);
+void mm_ptable_fini(struct mm_ptable *t, int mode);
 void mm_ptable_dump(struct mm_ptable *t, int mode);
 void mm_ptable_defrag(struct mm_ptable *t, int mode);
-bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode);
 
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
 			int mode, ipaddr_t *ipa);
 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode);
+bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode);
 bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode);
 bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa);
 
diff --git a/src/BUILD.gn b/src/BUILD.gn
index 008315a..d3d1a34 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -21,7 +21,6 @@
   ]
   deps = [
     ":src_not_testable_yet",
-    "//src/arch/${arch}",
   ]
 }
 
@@ -56,6 +55,7 @@
     ":common",
     ":fdt",
     ":memiter",
+    "//src/arch/${arch}",
   ]
 
   if (is_debug) {
diff --git a/src/arch/aarch64/inc/hf/arch/mm.h b/src/arch/aarch64/inc/hf/arch/mm.h
index b2552e3..6872f64 100644
--- a/src/arch/aarch64/inc/hf/arch/mm.h
+++ b/src/arch/aarch64/inc/hf/arch/mm.h
@@ -207,6 +207,7 @@
 uint64_t arch_mm_mode_to_attrs(int mode);
 bool arch_mm_init(paddr_t table, bool first);
 uint8_t arch_mm_max_level(int mode);
+uint8_t arch_mm_root_table_count(int mode);
 
 /**
  * Given the attrs from a table at some level and the attrs from all the blocks
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 946f069..366fc7b 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -86,7 +86,8 @@
 
 /* clang-format on */
 
-static uint8_t mm_max_s2_level = 2;
+static uint8_t mm_s2_max_level;
+static uint8_t mm_s2_root_table_count;
 
 void arch_mm_write_back_dcache(void *base, size_t size)
 {
@@ -183,7 +184,26 @@
 		return 2;
 	}
 
-	return mm_max_s2_level;
+	return mm_s2_max_level;
+}
+
+/**
+ * Determines the number of concatenated tables at the root of the page table
+ * for the given mode.
+ *
+ * Tables are concatenated at the root to avoid introducing another level in the
+ * page table meaning the table is shallow and wide. Each level is an extra
+ * memory access when walking the table so keeping it shallow reducing the
+ * memory accesses to aid performance.
+ */
+uint8_t arch_mm_root_table_count(int mode)
+{
+	if (mode & MM_MODE_STAGE1) {
+		/* Stage 1 doesn't concatenate tables. */
+		return 1;
+	}
+
+	return mm_s2_root_table_count;
 }
 
 bool arch_mm_init(paddr_t table, bool first)
@@ -192,6 +212,7 @@
 	uint64_t features = read_msr(id_aa64mmfr0_el1);
 	uint64_t v;
 	int pa_bits = pa_bits_table[features & 0xf];
+	int extend_bits;
 	int sl0;
 
 	/* Check that 4KB granules are supported. */
@@ -214,20 +235,41 @@
 	}
 
 	/*
-	 * Determine sl0 based on the number of bits. The maximum value is given
-	 * in D4-7 of the ARM arm.
+	 * Determine sl0, starting level of the page table, based on the number
+	 * of bits. The value is chosen to give the shallowest tree by making
+	 * use of concatenated translation tables.
+	 *
+	 *  - 0 => start at level 1
+	 *  - 1 => start at level 2
+	 *  - 2 => start at level 3
 	 */
 	if (pa_bits >= 44) {
-		mm_max_s2_level = 3;
 		sl0 = 2;
-	} else {
-		mm_max_s2_level = 2;
+		mm_s2_max_level = 3;
+	} else if (pa_bits >= 35) {
 		sl0 = 1;
+		mm_s2_max_level = 2;
+	} else {
+		sl0 = 0;
+		mm_s2_max_level = 1;
 	}
 
+	/*
+	 * Since the shallowest possible tree is used, the maximum number of
+	 * concatenated tables must be used. This means if no more than 4 bits
+	 * are used from the next level, they are instead used to index into the
+	 * concatenated tables.
+	 */
+	extend_bits = ((pa_bits - PAGE_BITS) % PAGE_LEVEL_BITS);
+	if (extend_bits > 4) {
+		extend_bits = 0;
+	}
+	mm_s2_root_table_count = 1 << extend_bits;
+
 	if (first) {
-		dlog_nosync("Number of page table levels: %d\n",
-			    mm_max_s2_level + 1);
+		dlog("Stage 2 has %d page table levels with %d pages at the "
+		     "root.\n",
+		     mm_s2_max_level + 1, mm_s2_root_table_count);
 	}
 
 	v = (1u << 31) |	       /* RES1. */
diff --git a/src/arch/fake/BUILD.gn b/src/arch/fake/BUILD.gn
index 4089bc5..59a02e4 100644
--- a/src/arch/fake/BUILD.gn
+++ b/src/arch/fake/BUILD.gn
@@ -12,6 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+source_set("fake") {
+  sources = [
+    "mm.c",
+  ]
+}
+
 # Fake implementation of putchar logs to the console.
 source_set("putchar") {
   sources = [
diff --git a/src/arch/fake/inc/hf/arch/mm.h b/src/arch/fake/inc/hf/arch/mm.h
index d0daa55..9e8fe08 100644
--- a/src/arch/fake/inc/hf/arch/mm.h
+++ b/src/arch/fake/inc/hf/arch/mm.h
@@ -17,188 +17,35 @@
 #pragma once
 
 #include <stdbool.h>
-#include <stddef.h>
 
 #include "hf/addr.h"
 
-/*
- * Our fake architecture has page tables rather similar to aarch64, but not
- * quite.
- * - The highest level table is always 2, lowest level is 0.
- * - Blocks are allowed at all levels.
- * There are four types of entries:
- * - Absent: 0
- * - Page, at level 0: <page-aligned address> | <attrs> | 0x3
- * - Block, at level 2 or 1: <block-aligned address> | <attrs> | 0x1
- * - Subtable, at level 2 or 1: <subtable address> | 0x3
- * <attrs> are always 0 for now.
- */
-
 /** A page table entry. */
 typedef uint64_t pte_t;
 
 #define PAGE_LEVEL_BITS 9
 
-/**
- * Returns the encoding of a page table entry that isn't present.
+/*
+ * TODO: move the arch_mm_* declarations to a shared header. That header can
+ * also check that the specific implementation defines everything it needs to
+ * too.
  */
-static inline pte_t arch_mm_absent_pte(int level)
-{
-	return 0;
-}
-
-/**
- * Converts a physical address to a table PTE.
- *
- * The spec says that 'Table descriptors for stage 2 translations do not
- * include any attribute field', so we don't take any attributes as arguments.
- */
-static inline pte_t arch_mm_table_pte(int level, paddr_t pa)
-{
-	/* This is the same for all levels on aarch64. */
-	(void)level;
-	return pa_addr(pa) | 0x3;
-}
-
-/**
- * Converts a physical address to a block PTE.
- *
- * The level must allow block entries.
- */
-static inline pte_t arch_mm_block_pte(int level, paddr_t pa, uint64_t attrs)
-{
-	pte_t pte = pa_addr(pa) | attrs | 0x1;
-	if (level == 0) {
-		pte |= 0x2;
-	}
-	return pte;
-}
-
-/**
- * Specifies whether block mappings are acceptable at the given level.
- *
- * Level 0 must allow block entries.
- */
-static inline bool arch_mm_is_block_allowed(int level)
-{
-	return level <= 2;
-}
-
-/**
- * Determines if the given pte is present, i.e., if it points to another table,
- * to a page, or a block of pages.
- */
-static inline bool arch_mm_pte_is_present(pte_t pte, int level)
-{
-	return (pte & 0x1) != 0;
-}
-
-/**
- * Determines if the given pte references another table.
- */
-static inline bool arch_mm_pte_is_table(pte_t pte, int level)
-{
-	return level != 0 && (pte & 0x3) == 0x3;
-}
-
-/**
- * Determines if the given pte references a block of pages.
- */
-static inline bool arch_mm_pte_is_block(pte_t pte, int level)
-{
-	return arch_mm_is_block_allowed(level) &&
-	       (pte & 0x3) == (level == 0 ? 0x3 : 0x1);
-}
-
-static inline uint64_t hf_arch_fake_mm_clear_pte_attrs(pte_t pte)
-{
-	return pte & ~0x3;
-}
-
-/**
- * Clears the given physical address, i.e., sets the ignored bits (from a page
- * table perspective) to zero.
- */
-static inline paddr_t arch_mm_clear_pa(paddr_t pa)
-{
-	/* This is assumed to round down to the page boundary. */
-	return pa_init(hf_arch_fake_mm_clear_pte_attrs(pa_addr(pa)) &
-		       ~((1 << PAGE_BITS) - 1));
-}
-
-/**
- * Extracts the physical address of the block referred to by the given page
- * table entry.
- */
-static inline paddr_t arch_mm_block_from_pte(pte_t pte)
-{
-	return pa_init(hf_arch_fake_mm_clear_pte_attrs(pte));
-}
-
-/**
- * Extracts the physical address of the page table referred to by the given page
- * table entry.
- */
-static inline paddr_t arch_mm_table_from_pte(pte_t pte)
-{
-	return pa_init(hf_arch_fake_mm_clear_pte_attrs(pte));
-}
-
-/**
- * Extracts the architecture specific attributes applies to the given page table
- * entry.
- */
-static inline uint64_t arch_mm_pte_attrs(pte_t pte)
-{
-	return 0;
-}
-
-/**
- * Given the attrs from a table at some level and the attrs from all the blocks
- * in that table, return equivalent attrs to use for a block which will replace
- * the entire table.
- */
-static inline uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
-							 uint64_t block_attrs)
-{
-	return table_attrs | block_attrs;
-}
-
-/**
- * Invalidates stage-1 TLB entries referring to the given virtual address range.
- */
-static inline void arch_mm_invalidate_stage1_range(vaddr_t va_begin,
-						   vaddr_t va_end)
-{
-}
-
-/**
- * Invalidates stage-2 TLB entries referring to the given intermediate physical
- * address range.
- */
-static inline void arch_mm_invalidate_stage2_range(ipaddr_t va_begin,
-						   ipaddr_t va_end)
-{
-}
-
-/**
- * Determines the maximum level supported by the given mode.
- */
-static inline uint8_t arch_mm_max_level(int mode)
-{
-	(void)mode;
-	return 2;
-}
-
-static inline uint64_t arch_mm_mode_to_attrs(int mode)
-{
-	(void)mode;
-	return 0;
-}
-
-static inline bool arch_mm_init(paddr_t table, bool first)
-{
-	(void)table;
-	(void)first;
-	return true;
-}
+pte_t arch_mm_absent_pte(int level);
+pte_t arch_mm_table_pte(int level, paddr_t pa);
+pte_t arch_mm_block_pte(int level, paddr_t pa, uint64_t attrs);
+bool arch_mm_is_block_allowed(int level);
+bool arch_mm_pte_is_present(pte_t pte, int level);
+bool arch_mm_pte_is_table(pte_t pte, int level);
+bool arch_mm_pte_is_block(pte_t pte, int level);
+paddr_t arch_mm_clear_pa(paddr_t pa);
+paddr_t arch_mm_block_from_pte(pte_t pte);
+paddr_t arch_mm_table_from_pte(pte_t pte);
+uint64_t arch_mm_pte_attrs(pte_t pte);
+uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
+					   uint64_t block_attrs);
+void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end);
+void arch_mm_invalidate_stage2_range(ipaddr_t va_begin, ipaddr_t va_end);
+uint8_t arch_mm_max_level(int mode);
+uint8_t arch_mm_root_table_count(int mode);
+uint64_t arch_mm_mode_to_attrs(int mode);
+bool arch_mm_init(paddr_t table, bool first);
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
new file mode 100644
index 0000000..81e106f
--- /dev/null
+++ b/src/arch/fake/mm.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/mm.h"
+
+#include "hf/mm.h"
+
+/*
+ * Our fake architecture has page tables base on those of aarch64:
+ *
+ *  - The highest level table is always 2, lowest level is 0.
+ *  - Blocks are allowed at all levels.
+ *
+ * There are four kinds of entry:
+ *
+ *  1. Absent: 0
+ *  2. Page, at level 0: <page-aligned address> | <attrs> | 0x3
+ *  3. Block, at level 2 or 1: <block-aligned address> | <attrs> | 0x1
+ *  4. Subtable, at level 2 or 1: <subtable address> | 0x3
+ *
+ * <attrs> are always 0 for now.
+ */
+
+pte_t arch_mm_absent_pte(int level)
+{
+	(void)level;
+	return 0;
+}
+
+pte_t arch_mm_table_pte(int level, paddr_t pa)
+{
+	/* This is the same for all levels. */
+	(void)level;
+	return pa_addr(pa) | 0x3;
+}
+
+pte_t arch_mm_block_pte(int level, paddr_t pa, uint64_t attrs)
+{
+	/* Single pages are encoded differently to larger blocks. */
+	pte_t pte = pa_addr(pa) | attrs | 0x1;
+	if (level == 0) {
+		pte |= 0x2;
+	}
+	return pte;
+}
+
+bool arch_mm_is_block_allowed(int level)
+{
+	/* All levels can have blocks. */
+	(void)level;
+	return true;
+}
+
+bool arch_mm_pte_is_present(pte_t pte, int level)
+{
+	(void)level;
+	return (pte & 0x1) != 0;
+}
+
+bool arch_mm_pte_is_table(pte_t pte, int level)
+{
+	/* Level 0 only contains pages so cannot be a table. */
+	return level != 0 && (pte & 0x3) == 0x3;
+}
+
+bool arch_mm_pte_is_block(pte_t pte, int level)
+{
+	/* Single pages are encoded differently to larger blocks. */
+	return arch_mm_is_block_allowed(level) &&
+	       (pte & 0x3) == (level == 0 ? 0x3 : 0x1);
+}
+
+static uint64_t hf_arch_fake_mm_clear_pte_attrs(pte_t pte)
+{
+	return pte & ~0x3;
+}
+
+paddr_t arch_mm_clear_pa(paddr_t pa)
+{
+	/* This is assumed to round down to the page boundary. */
+	return pa_init(hf_arch_fake_mm_clear_pte_attrs(pa_addr(pa)) &
+		       ~((1 << PAGE_BITS) - 1));
+}
+
+paddr_t arch_mm_block_from_pte(pte_t pte)
+{
+	return pa_init(hf_arch_fake_mm_clear_pte_attrs(pte));
+}
+
+paddr_t arch_mm_table_from_pte(pte_t pte)
+{
+	return pa_init(hf_arch_fake_mm_clear_pte_attrs(pte));
+}
+
+uint64_t arch_mm_pte_attrs(pte_t pte)
+{
+	/* Attributes are not modelled. */
+	(void)pte;
+	return 0;
+}
+
+uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
+					   uint64_t block_attrs)
+{
+	return table_attrs | block_attrs;
+}
+
+void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end)
+{
+	/* There's no modelling of the stage-1 TLB. */
+}
+
+void arch_mm_invalidate_stage2_range(ipaddr_t va_begin, ipaddr_t va_end)
+{
+	/* There's no modelling of the stage-2 TLB. */
+}
+
+uint8_t arch_mm_max_level(int mode)
+{
+	/* All modes have 3 levels in the page table. */
+	(void)mode;
+	return 2;
+}
+
+uint8_t arch_mm_root_table_count(int mode)
+{
+	/* Stage 1 has no concatenated tables but stage 2 has 4 of them. */
+	return (mode & MM_MODE_STAGE1) ? 1 : 4;
+}
+
+uint64_t arch_mm_mode_to_attrs(int mode)
+{
+	/* Attributes are not modelled. */
+	(void)mode;
+	return 0;
+}
+
+bool arch_mm_init(paddr_t table, bool first)
+{
+	/* No initialization required. */
+	(void)table;
+	(void)first;
+	return true;
+}
diff --git a/src/cpu.c b/src/cpu.c
index 1ab94aa..0f2bbba 100644
--- a/src/cpu.c
+++ b/src/cpu.c
@@ -96,7 +96,7 @@
 	if (!prev) {
 		struct vm *vm = vm_get(HF_PRIMARY_VM_ID);
 		struct vcpu *vcpu = &vm->vcpus[cpu_index(c)];
-		arch_regs_init(&vcpu->regs, true, vm->id, vm->ptable.table,
+		arch_regs_init(&vcpu->regs, true, vm->id, vm->ptable.root,
 			       entry, arg);
 		vcpu_on(vcpu);
 	}
diff --git a/src/load.c b/src/load.c
index 0c642e3..1a894d7 100644
--- a/src/load.c
+++ b/src/load.c
@@ -152,8 +152,8 @@
 			return false;
 		}
 
-		if (!mm_ptable_unmap_hypervisor(&vm->ptable,
-						MM_MODE_NOINVALIDATE)) {
+		if (!mm_vm_unmap_hypervisor(&vm->ptable,
+					    MM_MODE_NOINVALIDATE)) {
 			dlog("Unable to unmap hypervisor from primary vm\n");
 			return false;
 		}
diff --git a/src/mm.c b/src/mm.c
index 1660863..161fbeb 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -114,15 +114,14 @@
 /**
  * Allocate a new page table.
  */
-static struct mm_page_table *mm_alloc_page_table(bool nosync)
+static struct mm_page_table *mm_alloc_page_tables(size_t count, bool nosync)
 {
+	size_t size_and_align = count * sizeof(struct mm_page_table);
 	if (nosync) {
-		return halloc_aligned_nosync(sizeof(struct mm_page_table),
-					     alignof(struct mm_page_table));
+		return halloc_aligned_nosync(size_and_align, size_and_align);
 	}
 
-	return halloc_aligned(sizeof(struct mm_page_table),
-			      alignof(struct mm_page_table));
+	return halloc_aligned(size_and_align, size_and_align);
 }
 
 /**
@@ -214,7 +213,7 @@
 	}
 
 	/* Allocate a new table. */
-	ntable = mm_alloc_page_table(flags & MAP_FLAG_NOSYNC);
+	ntable = mm_alloc_page_tables(1, flags & MAP_FLAG_NOSYNC);
 	if (ntable == NULL) {
 		dlog("Failed to allocate memory for page table\n");
 		return NULL;
@@ -251,7 +250,7 @@
 /**
  * Returns whether all entries in this table are absent.
  */
-static bool mm_ptable_is_empty(struct mm_page_table *table, uint8_t level)
+static bool mm_page_table_is_empty(struct mm_page_table *table, uint8_t level)
 {
 	uint64_t i;
 
@@ -341,7 +340,7 @@
 			 * an absent value.
 			 */
 			if (commit && unmap &&
-			    mm_ptable_is_empty(nt, level - 1)) {
+			    mm_page_table_is_empty(nt, level - 1)) {
 				pte_t v = *pte;
 				*pte = arch_mm_absent_pte(level);
 				mm_free_page_pte(v, level);
@@ -357,6 +356,31 @@
 }
 
 /**
+ * Updates the page table from the root to map the given address range to a
+ * physical range using the provided (architecture-specific) attributes. Or if
+ * MAP_FLAG_UNMAP is set, unmap the given range instead.
+ */
+static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
+			ptable_addr_t end, uint64_t attrs, uint8_t root_level,
+			int flags)
+{
+	size_t root_table_size = mm_entry_size(root_level);
+	struct mm_page_table *table =
+		&mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
+
+	while (begin < end) {
+		if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
+				  root_level - 1, flags)) {
+			return false;
+		}
+		begin = (begin + root_table_size) & ~(root_table_size - 1);
+		table++;
+	}
+
+	return true;
+}
+
+/**
  * Updates the given table such that the given physical address range is mapped
  * or not mapped into the address space with the architecture-agnostic mode
  * provided.
@@ -369,27 +393,37 @@
 		    (mode & MM_MODE_NOINVALIDATE ? MAP_FLAG_NOBBM : 0) |
 		    (mode & MM_MODE_STAGE1 ? MAP_FLAG_STAGE1 : 0) |
 		    (unmap ? MAP_FLAG_UNMAP : 0);
-	uint8_t level = arch_mm_max_level(mode);
-	struct mm_page_table *table = mm_page_table_from_pa(t->table);
-	ptable_addr_t begin;
-	ptable_addr_t end;
+	uint8_t root_level = arch_mm_max_level(mode) + 1;
+	ptable_addr_t ptable_end =
+		arch_mm_root_table_count(mode) * mm_entry_size(root_level);
+	ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
+	ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
 
-	pa_begin = arch_mm_clear_pa(pa_begin);
-	begin = pa_addr(pa_begin);
-	end = mm_round_up_to_page(pa_addr(pa_end));
+	/*
+	 * TODO: replace with assertions that the max level will be greater than
+	 * 0 and less than 255 so wrapping will not be a problem and will not
+	 * lead to subsequent overflows.
+	 */
+	if (root_level == 0 || root_level == 1) {
+		return false;
+	}
+
+	/* Cap end to stay within the bounds of the page table. */
+	if (end > ptable_end) {
+		end = ptable_end;
+	}
 
 	/*
 	 * Do it in two steps to prevent leaving the table in a halfway updated
 	 * state. In such a two-step implementation, the table may be left with
 	 * extra internal tables, but no different mapping on failure.
 	 */
-	if (!mm_map_level(begin, end, pa_begin, attrs, table, level, flags)) {
+	if (!mm_map_root(t, begin, end, attrs, root_level, flags) ||
+	    !mm_map_root(t, begin, end, attrs, root_level,
+			 flags | MAP_FLAG_COMMIT)) {
 		return false;
 	}
 
-	mm_map_level(begin, end, pa_begin, attrs, table, level,
-		     flags | MAP_FLAG_COMMIT);
-
 	/* Invalidate the tlb. */
 	if (!(mode & MM_MODE_NOINVALIDATE)) {
 		mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
@@ -448,9 +482,13 @@
  */
 void mm_ptable_dump(struct mm_ptable *t, int mode)
 {
-	struct mm_page_table *table = mm_page_table_from_pa(t->table);
+	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
 	int max_level = arch_mm_max_level(mode);
-	mm_dump_table_recursive(table, max_level, max_level);
+	uint8_t root_table_count = arch_mm_root_table_count(mode);
+	uint8_t i;
+	for (i = 0; i < root_table_count; ++i) {
+		mm_dump_table_recursive(&tables[i], max_level, max_level);
+	}
 }
 
 /**
@@ -571,34 +609,25 @@
  */
 void mm_ptable_defrag(struct mm_ptable *t, int mode)
 {
-	struct mm_page_table *table = mm_page_table_from_pa(t->table);
+	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
 	uint8_t level = arch_mm_max_level(mode);
-	uint64_t i;
+	uint8_t root_table_count = arch_mm_root_table_count(mode);
+	uint8_t i;
+	uint64_t j;
 
 	/*
 	 * Loop through each entry in the table. If it points to another table,
 	 * check if that table can be replaced by a block or an absent entry.
 	 */
-	for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		table->entries[i] =
-			mm_ptable_defrag_entry(table->entries[i], level);
+	for (i = 0; i < root_table_count; ++i) {
+		for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
+			tables[i].entries[j] = mm_ptable_defrag_entry(
+				tables[i].entries[j], level);
+		}
 	}
 }
 
 /**
- * Unmaps the hypervisor pages from the given page table.
- */
-bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode)
-{
-	/* TODO: If we add pages dynamically, they must be included here too. */
-	return mm_ptable_unmap(t, layout_text_begin(), layout_text_end(),
-			       mode) &&
-	       mm_ptable_unmap(t, layout_rodata_begin(), layout_rodata_end(),
-			       mode) &&
-	       mm_ptable_unmap(t, layout_data_begin(), layout_data_end(), mode);
-}
-
-/**
  * Determines if the given address is mapped in the given page table by
  * recursively traversing all levels of the page table.
  */
@@ -635,12 +664,18 @@
 static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
 				int mode)
 {
-	struct mm_page_table *table = mm_page_table_from_pa(t->table);
+	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
 	uint8_t level = arch_mm_max_level(mode);
+	size_t index;
 
 	addr = mm_round_down_to_page(addr);
+	index = mm_index(addr, level + 1);
 
-	return mm_is_mapped_recursive(table, addr, level);
+	if (index >= arch_mm_root_table_count(mode)) {
+		return false;
+	}
+
+	return mm_is_mapped_recursive(&tables[index], addr, level);
 }
 
 /**
@@ -648,26 +683,51 @@
  */
 bool mm_ptable_init(struct mm_ptable *t, int mode)
 {
-	size_t i;
-	struct mm_page_table *table;
+	uint8_t i;
+	size_t j;
+	struct mm_page_table *tables;
+	uint8_t root_table_count = arch_mm_root_table_count(mode);
 
-	table = mm_alloc_page_table(mode & MM_MODE_NOSYNC);
-	if (table == NULL) {
+	tables = mm_alloc_page_tables(root_table_count, mode & MM_MODE_NOSYNC);
+	if (tables == NULL) {
 		return false;
 	}
 
-	for (i = 0; i < MM_PTE_PER_PAGE; i++) {
-		table->entries[i] = arch_mm_absent_pte(arch_mm_max_level(mode));
+	for (i = 0; i < root_table_count; i++) {
+		for (j = 0; j < MM_PTE_PER_PAGE; j++) {
+			tables[i].entries[j] =
+				arch_mm_absent_pte(arch_mm_max_level(mode));
+		}
 	}
 
 	/* TODO: halloc could return a virtual or physical address if mm not
 	 * enabled? */
-	t->table = pa_init((uintpaddr_t)table);
+	t->root = pa_init((uintpaddr_t)tables);
 
 	return true;
 }
 
 /**
+ * Frees all memory associated with the give page table.
+ */
+void mm_ptable_fini(struct mm_ptable *t, int mode)
+{
+	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
+	uint8_t level = arch_mm_max_level(mode);
+	uint8_t root_table_count = arch_mm_root_table_count(mode);
+	uint8_t i;
+	uint64_t j;
+
+	for (i = 0; i < root_table_count; ++i) {
+		for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
+			mm_free_page_pte(tables[i].entries[j], level);
+		}
+	}
+
+	hfree(tables);
+}
+
+/**
  * Updates a VM's page table such that the given physical address range is
  * mapped in the address space at the corresponding address range in the
  * architecture-agnostic mode provided.
@@ -695,6 +755,18 @@
 }
 
 /**
+ * Unmaps the hypervisor pages from the given page table.
+ */
+bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode)
+{
+	/* TODO: If we add pages dynamically, they must be included here too. */
+	return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), mode) &&
+	       mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(),
+			   mode) &&
+	       mm_vm_unmap(t, layout_data_begin(), layout_data_end(), mode);
+}
+
+/**
  * Checks whether the given intermediate physical addess is mapped in the given
  * page table of a VM.
  */
@@ -777,12 +849,12 @@
 	mm_identity_map(layout_data_begin(), layout_data_end(),
 			MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
 
-	return arch_mm_init(ptable.table, true);
+	return arch_mm_init(ptable.root, true);
 }
 
 bool mm_cpu_init(void)
 {
-	return arch_mm_init(ptable.table, false);
+	return arch_mm_init(ptable.root, false);
 }
 
 /**
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 7990d27..cd1e663 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -22,17 +22,28 @@
 #include "hf/alloc.h"
 }
 
+#include <limits>
 #include <memory>
+#include <span>
+#include <vector>
 
 #include <gmock/gmock.h>
 
 namespace
 {
-using ::testing::Eq;
+using namespace ::std::placeholders;
 
-constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 10;
+using ::testing::AllOf;
+using ::testing::Contains;
+using ::testing::Each;
+using ::testing::Eq;
+using ::testing::SizeIs;
+using ::testing::Truly;
+
+constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
 const int TOP_LEVEL = arch_mm_max_level(0);
 const pte_t ABSENT_ENTRY = arch_mm_absent_pte(TOP_LEVEL);
+const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
 
 /**
  * Calculates the size of the address space represented by a page table entry at
@@ -44,267 +55,723 @@
 }
 
 /**
- * Get the page table from the physical address.
+ * Get an STL representation of the page table.
  */
-struct mm_page_table *page_table_from_pa(paddr_t pa)
+std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
 {
-	return reinterpret_cast<struct mm_page_table *>(
+	auto table = reinterpret_cast<struct mm_page_table *>(
 		ptr_from_va(va_from_pa(pa)));
+	return std::span<pte_t>(table->entries, std::end(table->entries));
 }
 
 /**
- * Allocate a page table.
+ * Get an STL representation of the ptable.
  */
-struct mm_page_table *alloc_page_table()
+std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
+	const struct mm_ptable &ptable, int mode)
 {
-	return reinterpret_cast<struct mm_page_table *>(halloc_aligned(
-		sizeof(struct mm_page_table), alignof(struct mm_page_table)));
-}
-
-/**
- * Fill a ptable with absent entries.
- */
-void init_absent(struct mm_page_table *table)
-{
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		table->entries[i] = ABSENT_ENTRY;
+	std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
+	const uint8_t root_table_count = arch_mm_root_table_count(mode);
+	for (uint8_t i = 0; i < root_table_count; ++i) {
+		all.push_back(get_table(
+			pa_add(ptable.root, i * sizeof(struct mm_page_table))));
 	}
+	return all;
 }
 
-/**
- * Fill a ptable with block entries.
- */
-void init_blocks(struct mm_page_table *table, int level, paddr_t start_address,
-		 uint64_t attrs)
+class mm : public ::testing::Test
 {
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		table->entries[i] = arch_mm_block_pte(
-			level, pa_add(start_address, i * mm_entry_size(level)),
-			attrs);
+	void SetUp() override
+	{
+		/*
+		 * TODO: replace with direct use of stdlib allocator so
+		 * sanitizers are more effective.
+		 */
+		test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
+		halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
 	}
-}
+
+	std::unique_ptr<uint8_t[]> test_heap;
+};
 
 /**
- * Defragging an entirely empty table should have no effect.
+ * A new table is initially empty.
  */
-TEST(mm, ptable_defrag_empty)
+TEST_F(mm, ptable_init_empty)
 {
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	struct mm_page_table *table = alloc_page_table();
-	init_absent(table);
+	constexpr int mode = MM_MODE_STAGE1;
 	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	mm_ptable_defrag(&ptable, 0);
-
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(1), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
 }
 
 /**
- * Defragging a table with some empty subtables (even nested) should result in
+ * Each new concatenated table is initially empty.
+ */
+TEST_F(mm, ptable_init_concatenated_empty)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Only the first page is mapped with all others left absent.
+ */
+TEST_F(mm, map_first_page)
+{
+	constexpr int mode = 0;
+	const paddr_t page_begin = pa_init(0);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+				       nullptr));
+
+	auto tables = get_ptable(ptable, mode);
+	EXPECT_THAT(tables, SizeIs(4));
+	ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+	/* Check that the first page is mapped and nothing else. */
+	EXPECT_THAT(std::span(tables).last(3), Each(Each(ABSENT_ENTRY)));
+
+	auto table_l2 = tables.front();
+	EXPECT_THAT(table_l2.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
+
+	auto table_l1 = get_table(arch_mm_table_from_pte(table_l2[0]));
+	EXPECT_THAT(table_l1.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
+
+	auto table_l0 = get_table(arch_mm_table_from_pte(table_l1[0]));
+	EXPECT_THAT(table_l0.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0])),
+		    Eq(pa_addr(page_begin)));
+
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * The start address is rounded down and the end address is rounded up to page
+ * boundaries.
+ */
+TEST_F(mm, map_round_to_page)
+{
+	constexpr int mode = 0;
+	const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
+	const paddr_t map_end = pa_add(map_begin, 268);
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
+
+	auto tables = get_ptable(ptable, mode);
+	EXPECT_THAT(tables, SizeIs(4));
+	ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+	/* Check that the last page is mapped, and nothing else. */
+	EXPECT_THAT(std::span(tables).first(3), Each(Each(ABSENT_ENTRY)));
+
+	auto table_l2 = tables.back();
+	EXPECT_THAT(table_l2.first(table_l2.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
+
+	auto table_l1 = get_table(arch_mm_table_from_pte(table_l2.last(1)[0]));
+	EXPECT_THAT(table_l1.first(table_l1.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
+
+	auto table_l0 = get_table(arch_mm_table_from_pte(table_l1.last(1)[0]));
+	EXPECT_THAT(table_l0.first(table_l0.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0])),
+		    Eq(0x200'0000'0000 - PAGE_SIZE));
+
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Map a two page range over the boundary of two tables.
+ */
+TEST_F(mm, map_across_tables)
+{
+	constexpr int mode = 0;
+	const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
+	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
+
+	auto tables = get_ptable(ptable, mode);
+	EXPECT_THAT(tables, SizeIs(4));
+	EXPECT_THAT(std::span(tables).last(2), Each(Each(ABSENT_ENTRY)));
+	ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+	/* Check only the last page of the first table is mapped. */
+	auto table0_l2 = tables.front();
+	EXPECT_THAT(table0_l2.first(table0_l2.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
+
+	auto table0_l1 =
+		get_table(arch_mm_table_from_pte(table0_l2.last(1)[0]));
+	EXPECT_THAT(table0_l1.first(table0_l1.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
+
+	auto table0_l0 =
+		get_table(arch_mm_table_from_pte(table0_l1.last(1)[0]));
+	EXPECT_THAT(table0_l0.first(table0_l0.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0])),
+		    Eq(pa_addr(map_begin)));
+
+	/* Checl only the first page of the second table is mapped. */
+	auto table1_l2 = tables[1];
+	EXPECT_THAT(table1_l2.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
+
+	auto table1_l1 = get_table(arch_mm_table_from_pte(table1_l2[0]));
+	EXPECT_THAT(table1_l1.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
+
+	auto table1_l0 = get_table(arch_mm_table_from_pte(table1_l1[0]));
+	EXPECT_THAT(table1_l0.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table1_l0[0])),
+		    Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
+
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping all of memory creates blocks at the highest level.
+ */
+TEST_F(mm, map_all_at_top_level)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	auto tables = get_ptable(ptable, mode);
+	EXPECT_THAT(
+		tables,
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	for (uint64_t i = 0; i < tables.size(); ++i) {
+		for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
+			EXPECT_THAT(
+				pa_addr(arch_mm_block_from_pte(tables[i][j])),
+				Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
+				   (j * mm_entry_size(TOP_LEVEL))))
+				<< "i=" << i << " j=" << j;
+		}
+	}
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Map all memory then trying to map a page again doesn't introduce a special
+ * mapping for that particular page.
+ */
+TEST_F(mm, map_already_mapped)
+{
+	constexpr int mode = 0;
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
+				       mode, &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(0));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping a reverse range, i.e. the end comes before the start, is treated as
+ * an empty range so no mappings are made.
+ */
+TEST_F(mm, map_reverse_range)
+{
+	constexpr int mode = 0;
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
+				       pa_init(0x5000), mode, &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping a reverse range in the same page will map the page because the start
+ * of the range is rounded down and the end is rounded up.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, map_reverse_range_quirk)
+{
+	constexpr int mode = 0;
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
+				       &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(20));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa, mode));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping a range up to the maximum address causes the range end to wrap to
+ * zero as it is rounded up to a page boundary meaning no memory is mapped.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, map_last_address_quirk)
+{
+	constexpr int mode = 0;
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(
+		&ptable, pa_init(0),
+		pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(0));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping a range that goes beyond the available memory clamps to the available
+ * range.
+ */
+TEST_F(mm, map_clamp_to_range)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
+				       pa_init(0xf32'0000'0000'0000), mode,
+				       nullptr));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping a range outside of the available memory is ignored and doesn't alter
+ * the page tables.
+ */
+TEST_F(mm, map_ignore_out_of_range)
+{
+	constexpr int mode = 0;
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(
+		&ptable, VM_MEM_END, pa_init(0xf0'0000'0000'0000), mode, &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, 0);
+}
+
+/**
+ * Map a single page and then map all of memory which replaces the single page
+ * mapping with a higher level block mapping.
+ */
+TEST_F(mm, map_block_replaces_table)
+{
+	constexpr int mode = 0;
+	const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Map all memory at the top level, unmapping a page and remapping at a lower
+ * level does not result in all memory being mapped at the top level again.
+ */
+TEST_F(mm, map_does_not_defrag)
+{
+	constexpr int mode = 0;
+	const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+				       nullptr));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4),
+			  Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
+						    TOP_LEVEL)))),
+			  Contains(Contains(Truly(std::bind(
+				  arch_mm_pte_is_block, _1, TOP_LEVEL)))),
+			  Contains(Contains(Truly(std::bind(
+				  arch_mm_pte_is_table, _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * If nothing is mapped, unmapping the hypervisor has no effect.
+ */
+TEST_F(mm, vm_unmap_hypervisor_not_mapped)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * If range is not mapped, unmapping has no effect.
+ */
+TEST_F(mm, unmap_not_mapped)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	EXPECT_TRUE(
+		mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmapping everything should result in an empty page table with no subtables.
+ */
+TEST_F(mm, unmap_all)
+{
+	constexpr int mode = 0;
+	const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
+	const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
+	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
+	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr));
+	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmap range is rounded to the containing pages.
+ */
+TEST_F(mm, unmap_round_to_page)
+{
+	constexpr int mode = 0;
+	const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
+	const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
+				pa_add(map_begin, 99), mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmap a range that of page mappings that spans multiple concatenated tables.
+ */
+TEST_F(mm, unmap_across_tables)
+{
+	constexpr int mode = 0;
+	const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
+	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmapping outside the range of memory had no effect.
+ */
+TEST_F(mm, unmap_out_of_range)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
+				mode));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmapping a reverse range, i.e. the end comes before the start, is treated as
+ * an empty range so no change is made.
+ */
+TEST_F(mm, unmap_reverse_range)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
+				mode));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmapping a reverse range in the same page will unmap the page because the
+ * start of the range is rounded down and the end is rounded up.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, unmap_reverse_range_quirk)
+{
+	constexpr int mode = 0;
+	const paddr_t page_begin = pa_init(0x180'0000'0000);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
+				pa_add(page_begin, 50), mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmapping a range up to the maximum address causes the range end to wrap to
+ * zero as it is rounded up to a page boundary meaning no change is made.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, unmap_last_address_quirk)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(
+		&ptable, pa_init(0),
+		pa_init(std::numeric_limits<uintpaddr_t>::max()), mode));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping then unmapping a page does not defrag the table.
+ */
+TEST_F(mm, unmap_does_not_defrag)
+{
+	constexpr int mode = 0;
+	const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
+	const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
+	const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
+	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, MM_MODE_STAGE1);
+}
+
+/**
+ * Nothing is mapped in an empty table.
+ */
+TEST_F(mm, is_mapped_empty)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344), mode));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073), mode));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Everything is mapped in a full table.
+ */
+TEST_F(mm, is_mapped_all)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3), mode));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b), mode));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * A page is mapped for the range [begin, end).
+ */
+TEST_F(mm, is_mapped_page)
+{
+	constexpr int mode = 0;
+	const paddr_t page_begin = pa_init(0x100'0000'0000);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+				       nullptr));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin), mode));
+	EXPECT_TRUE(mm_vm_is_mapped(
+		&ptable, ipa_from_pa(pa_add(page_begin, 127)), mode));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end), mode));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Everything out of range is not mapped.
+ */
+TEST_F(mm, is_mapped_out_of_range)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END), mode));
+	EXPECT_FALSE(
+		mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123), mode));
+	EXPECT_FALSE(mm_vm_is_mapped(
+		&ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max()),
+		mode));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Defragging an entirely empty table has no effect.
+ */
+TEST_F(mm, defrag_empty)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	mm_ptable_defrag(&ptable, mode);
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Defragging a table with some empty subtables (even nested) results in
  * an empty table.
  */
-TEST(mm, ptable_defrag_empty_subtables)
+TEST_F(mm, defrag_empty_subtables)
 {
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	struct mm_page_table *subtable_a = alloc_page_table();
-	struct mm_page_table *subtable_aa = alloc_page_table();
-	struct mm_page_table *subtable_b = alloc_page_table();
-	struct mm_page_table *table = alloc_page_table();
-	init_absent(subtable_a);
-	init_absent(subtable_aa);
-	init_absent(subtable_b);
-	init_absent(table);
-
-	subtable_a->entries[3] = arch_mm_table_pte(
-		TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
-	table->entries[0] =
-		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
-	table->entries[5] =
-		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_b));
-
+	constexpr int mode = 0;
+	const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
+	const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
+	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
+	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode));
 	mm_ptable_defrag(&ptable, 0);
-
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
 }
 
 /**
  * Any subtable with all blocks with the same attributes should be replaced
  * with a single block.
  */
-TEST(mm, ptable_defrag_block_subtables)
+TEST_F(mm, defrag_block_subtables)
 {
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	struct mm_page_table *subtable_a = alloc_page_table();
-	struct mm_page_table *subtable_aa = alloc_page_table();
-	struct mm_page_table *subtable_b = alloc_page_table();
-	struct mm_page_table *table = alloc_page_table();
-	init_blocks(subtable_a, TOP_LEVEL - 1, pa_init(0), 0);
-	init_blocks(subtable_aa, TOP_LEVEL - 2,
-		    pa_init(3 * mm_entry_size(TOP_LEVEL - 1)), 0);
-	init_blocks(subtable_b, TOP_LEVEL - 1,
-		    pa_init(5 * mm_entry_size(TOP_LEVEL)), 0);
-	init_blocks(table, TOP_LEVEL, pa_init(0), 0);
-
-	subtable_a->entries[3] = arch_mm_table_pte(
-		TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
-	table->entries[0] =
-		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
-	table->entries[5] =
-		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_b));
-
+	constexpr int mode = 0;
+	const paddr_t begin = pa_init(39456 * mm_entry_size(1));
+	const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
+	const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr));
 	mm_ptable_defrag(&ptable, 0);
-
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_TRUE(
-			arch_mm_pte_is_present(table->entries[i], TOP_LEVEL))
-			<< "i=" << i;
-		EXPECT_TRUE(arch_mm_pte_is_block(table->entries[i], TOP_LEVEL))
-			<< "i=" << i;
-		EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table->entries[i])),
-			    Eq(i * mm_entry_size(TOP_LEVEL)))
-			<< "i=" << i;
-	}
-}
-
-/** If nothing is mapped, unmapping the hypervisor should have no effect. */
-TEST(mm, ptable_unmap_hypervisor_not_mapped)
-{
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	struct mm_page_table *table = alloc_page_table();
-	init_absent(table);
-
-	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	EXPECT_TRUE(mm_ptable_unmap_hypervisor(&ptable, 0));
-
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
-}
-
-/**
- * Unmapping everything should result in an empty page table with no subtables.
- */
-TEST(mm, vm_unmap)
-{
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	struct mm_page_table *subtable_a = alloc_page_table();
-	struct mm_page_table *subtable_aa = alloc_page_table();
-	struct mm_page_table *table = alloc_page_table();
-	init_absent(subtable_a);
-	init_absent(subtable_aa);
-	init_absent(table);
-
-	subtable_aa->entries[0] =
-		arch_mm_block_pte(TOP_LEVEL - 2, pa_init(0), 0);
-	subtable_a->entries[0] = arch_mm_table_pte(
-		TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
-	table->entries[0] =
-		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
-
-	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), pa_init(1), 0));
-
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
-}
-
-/**
- * Mapping a range should result in just the corresponding pages being mapped.
- */
-TEST(mm, vm_identity_map)
-{
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	/* Start with an empty page table. */
-	struct mm_page_table *table = alloc_page_table();
-	init_absent(table);
-	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	/* Try mapping the first page. */
-	ipaddr_t ipa = ipa_init(-1);
-	EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
-				       0, &ipa));
-	EXPECT_THAT(ipa_addr(ipa), Eq(0));
-
-	/* Check that the first page is mapped, and nothing else. */
-	for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
-	ASSERT_TRUE(arch_mm_pte_is_table(table->entries[0], TOP_LEVEL));
-	struct mm_page_table *subtable_a =
-		page_table_from_pa(arch_mm_table_from_pte(table->entries[0]));
-	for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(subtable_a->entries[i], Eq(ABSENT_ENTRY))
-			<< "i=" << i;
-	}
-	ASSERT_TRUE(
-		arch_mm_pte_is_table(subtable_a->entries[0], TOP_LEVEL - 1));
-	struct mm_page_table *subtable_aa = page_table_from_pa(
-		arch_mm_table_from_pte(subtable_a->entries[0]));
-	for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(subtable_aa->entries[i], Eq(ABSENT_ENTRY))
-			<< "i=" << i;
-	}
-	EXPECT_TRUE(
-		arch_mm_pte_is_block(subtable_aa->entries[0], TOP_LEVEL - 2));
-	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(subtable_aa->entries[0])),
-		    Eq(0));
-}
-
-/** Mapping a range that is already mapped should be a no-op. */
-TEST(mm, vm_identity_map_already_mapped)
-{
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	/* Start with a full page table mapping everything. */
-	struct mm_page_table *table = alloc_page_table();
-	init_blocks(table, TOP_LEVEL, pa_init(0), 0);
-	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	/* Try mapping the first page. */
-	ipaddr_t ipa = ipa_init(-1);
-	EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
-				       0, &ipa));
-	EXPECT_THAT(ipa_addr(ipa), Eq(0));
-
-	/*
-	 * The table should still be full of blocks, with no subtables or
-	 * anything else.
-	 */
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_TRUE(arch_mm_pte_is_block(table->entries[i], TOP_LEVEL))
-			<< "i=" << i;
-	}
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
 }
 
 } /* namespace */
diff --git a/src/vm.c b/src/vm.c
index 7387f20..824f1b7 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -74,7 +74,7 @@
 	struct vcpu *vcpu = &vm->vcpus[index];
 	if (index < vm->vcpu_count) {
 		arch_regs_init(&vcpu->regs, vm->id == HF_PRIMARY_VM_ID, vm->id,
-			       vm->ptable.table, entry, arg);
+			       vm->ptable.root, entry, arg);
 		vcpu_on(vcpu);
 	}
 }