Remove MM_MODE_STAGE1.

This doesn't belong in the modes and wasn't being used directly outside
of mm.c, where the uses are migrated over to an internal flag. The
memory management functions are now split into two, those for VM page
table, i.e. stage-2, and those for the hypervisor's own page tables,
i.e. stage-1.

Change-Id: Ib84736a905981c151a9279dac5089956d7d9ed85
diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h
index d6c8211..4a9f535 100644
--- a/inc/hf/arch/mm.h
+++ b/inc/hf/arch/mm.h
@@ -118,20 +118,39 @@
 void arch_mm_write_back_dcache(void *base, size_t size);
 
 /**
- * Gets the maximum level allowed in the page table for the given mode.
+ * Gets the maximum level allowed in the page table for stage-1.
  */
-uint8_t arch_mm_max_level(int mode);
+uint8_t arch_mm_stage1_max_level(void);
 
 /**
- * Gets the number of concatenated page tables used at the root for the given
- * mode.
+ * Gets the maximum level allowed in the page table for stage-2.
  */
-uint8_t arch_mm_root_table_count(int mode);
+uint8_t arch_mm_stage2_max_level(void);
 
 /**
- * Converts the mode into attributes for a block PTE.
+ * Gets the number of concatenated page tables used at the root for stage-1.
+ *
+ * Tables are concatenated at the root to avoid introducing another level in the
+ * page table meaning the table is shallow and wide. Each level is an extra
+ * memory access when walking the table so keeping it shallow reduces the memory
+ * accesses to aid performance.
  */
-uint64_t arch_mm_mode_to_attrs(int mode);
+uint8_t arch_mm_stage1_root_table_count(void);
+
+/**
+ * Gets the number of concatenated page tables used at the root for stage-2.
+ */
+uint8_t arch_mm_stage2_root_table_count(void);
+
+/**
+ * Converts the mode into stage-1 attributes for a block PTE.
+ */
+uint64_t arch_mm_mode_to_stage1_attrs(int mode);
+
+/**
+ * Converts the mode into stage-2 attributes for a block PTE.
+ */
+uint64_t arch_mm_mode_to_stage2_attrs(int mode);
 
 /**
  * Converts the stage-2 block attributes back to the corresponding mode.
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 509d79e..d7a1c4c 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -72,17 +72,10 @@
 #define MM_MODE_SHARED  0x0040
 
 /**
- * This flag indicates that the mapping is intended to be used in a first
- * stage translation table, which might have different encodings for the
- * attribute bits than the second stage table.
- */
-#define MM_MODE_STAGE1 0x0080
-
-/**
  * This flag indicates that no TLB invalidations should be issued for the
  * changes in the page table.
  */
-#define MM_MODE_NOINVALIDATE 0x0100
+#define MM_MODE_NOINVALIDATE 0x0080
 
 /* clang-format on */
 
@@ -99,16 +92,15 @@
 	paddr_t root;
 };
 
-bool mm_ptable_init(struct mm_ptable *t, int mode, struct mpool *ppool);
-void mm_ptable_fini(struct mm_ptable *t, int mode, struct mpool *ppool);
-void mm_ptable_dump(struct mm_ptable *t, int mode);
-void mm_ptable_defrag(struct mm_ptable *t, int mode, struct mpool *ppool);
-
+bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
+void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
 			int mode, ipaddr_t *ipa, struct mpool *ppool);
 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode,
 		 struct mpool *ppool);
 bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode, struct mpool *ppool);
+void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool);
+void mm_vm_dump(struct mm_ptable *t);
 bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
 		    int *mode);
 
diff --git a/src/api.c b/src/api.c
index ae03306..fa26e1d 100644
--- a/src/api.c
+++ b/src/api.c
@@ -342,7 +342,7 @@
 				NULL, &local_page_pool)) {
 		/* TODO: partial defrag of failed range. */
 		/* Recover any memory consumed in failed mapping. */
-		mm_ptable_defrag(&vm->ptable, 0, &local_page_pool);
+		mm_vm_defrag(&vm->ptable, &local_page_pool);
 		goto fail_undo_send;
 	}
 
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 5c5b281..ed29ab5 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -298,81 +298,89 @@
 	__asm__ volatile("dsb sy");
 }
 
-uint64_t arch_mm_mode_to_attrs(int mode)
+uint64_t arch_mm_mode_to_stage1_attrs(int mode)
 {
 	uint64_t attrs = 0;
 
+	attrs |= STAGE1_AF | STAGE1_SH(OUTER_SHAREABLE);
+
+	/* Define the execute bits. */
+	if (!(mode & MM_MODE_X)) {
+		attrs |= STAGE1_XN;
+	}
+
+	/* Define the read/write bits. */
+	if (mode & MM_MODE_W) {
+		attrs |= STAGE1_AP(STAGE1_READWRITE);
+	} else {
+		attrs |= STAGE1_AP(STAGE1_READONLY);
+	}
+
+	/* Define the memory attribute bits. */
+	if (mode & MM_MODE_D) {
+		attrs |= STAGE1_ATTRINDX(STAGE1_DEVICEINDX);
+	} else {
+		attrs |= STAGE1_ATTRINDX(STAGE1_NORMALINDX);
+	}
+
 	/* Define the valid bit. */
 	if (!(mode & MM_MODE_INVALID)) {
 		attrs |= PTE_VALID;
 	}
 
-	if (mode & MM_MODE_STAGE1) {
-		attrs |= STAGE1_AF | STAGE1_SH(OUTER_SHAREABLE);
+	return attrs;
+}
 
-		/* Define the execute bits. */
-		if (!(mode & MM_MODE_X)) {
-			attrs |= STAGE1_XN;
-		}
+uint64_t arch_mm_mode_to_stage2_attrs(int mode)
+{
+	uint64_t attrs = 0;
+	uint64_t access = 0;
 
-		/* Define the read/write bits. */
-		if (mode & MM_MODE_W) {
-			attrs |= STAGE1_AP(STAGE1_READWRITE);
-		} else {
-			attrs |= STAGE1_AP(STAGE1_READONLY);
-		}
+	/*
+	 * Non-shareable is the "neutral" share mode, i.e., the
+	 * shareability attribute of stage 1 will determine the actual
+	 * attribute.
+	 */
+	attrs |= STAGE2_AF | STAGE2_SH(NON_SHAREABLE);
 
-		/* Define the memory attribute bits. */
-		if (mode & MM_MODE_D) {
-			attrs |= STAGE1_ATTRINDX(STAGE1_DEVICEINDX);
-		} else {
-			attrs |= STAGE1_ATTRINDX(STAGE1_NORMALINDX);
-		}
+	/* Define the read/write bits. */
+	if (mode & MM_MODE_R) {
+		access |= STAGE2_ACCESS_READ;
+	}
+
+	if (mode & MM_MODE_W) {
+		access |= STAGE2_ACCESS_WRITE;
+	}
+
+	attrs |= STAGE2_S2AP(access);
+
+	/* Define the execute bits. */
+	if (mode & MM_MODE_X) {
+		attrs |= STAGE2_XN(STAGE2_EXECUTE_ALL);
 	} else {
-		uint64_t access = 0;
+		attrs |= STAGE2_XN(STAGE2_EXECUTE_NONE);
+	}
 
-		/*
-		 * Non-shareable is the "neutral" share mode, i.e., the
-		 * shareability attribute of stage 1 will determine the actual
-		 * attribute.
-		 */
-		attrs |= STAGE2_AF | STAGE2_SH(NON_SHAREABLE);
+	/*
+	 * Define the memory attribute bits, using the "neutral" values
+	 * which give the stage-1 attributes full control of the
+	 * attributes.
+	 */
+	attrs |= STAGE2_MEMATTR_NORMAL(STAGE2_WRITEBACK, STAGE2_WRITEBACK);
 
-		/* Define the read/write bits. */
-		if (mode & MM_MODE_R) {
-			access |= STAGE2_ACCESS_READ;
-		}
+	/* Define the ownership bit. */
+	if (!(mode & MM_MODE_UNOWNED)) {
+		attrs |= STAGE2_SW_OWNED;
+	}
 
-		if (mode & MM_MODE_W) {
-			access |= STAGE2_ACCESS_WRITE;
-		}
+	/* Define the exclusivity bit. */
+	if (!(mode & MM_MODE_SHARED)) {
+		attrs |= STAGE2_SW_EXCLUSIVE;
+	}
 
-		attrs |= STAGE2_S2AP(access);
-
-		/* Define the execute bits. */
-		if (mode & MM_MODE_X) {
-			attrs |= STAGE2_XN(STAGE2_EXECUTE_ALL);
-		} else {
-			attrs |= STAGE2_XN(STAGE2_EXECUTE_NONE);
-		}
-
-		/*
-		 * Define the memory attribute bits, using the "neutral" values
-		 * which give the stage-1 attributes full control of the
-		 * attributes.
-		 */
-		attrs |= STAGE2_MEMATTR_NORMAL(STAGE2_WRITEBACK,
-					       STAGE2_WRITEBACK);
-
-		/* Define the ownership bit. */
-		if (!(mode & MM_MODE_UNOWNED)) {
-			attrs |= STAGE2_SW_OWNED;
-		}
-
-		/* Define the exclusivity bit. */
-		if (!(mode & MM_MODE_SHARED)) {
-			attrs |= STAGE2_SW_EXCLUSIVE;
-		}
+	/* Define the valid bit. */
+	if (!(mode & MM_MODE_INVALID)) {
+		attrs |= PTE_VALID;
 	}
 
 	return attrs;
@@ -410,39 +418,29 @@
 	return mode;
 }
 
-/**
- * Determines the maximum level supported by the given mode.
- */
-uint8_t arch_mm_max_level(int mode)
+uint8_t arch_mm_stage1_max_level(void)
 {
-	if (mode & MM_MODE_STAGE1) {
-		/*
-		 * For stage 1 we hard-code this to 2 for now so that we can
-		 * save one page table level at the expense of limiting the
-		 * physical memory to 512GB.
-		 */
-		return 2;
-	}
+	/*
+	 * For stage 1 we hard-code this to 2 for now so that we can
+	 * save one page table level at the expense of limiting the
+	 * physical memory to 512GB.
+	 */
+	return 2;
+}
 
+uint8_t arch_mm_stage2_max_level(void)
+{
 	return mm_s2_max_level;
 }
 
-/**
- * Determines the number of concatenated tables at the root of the page table
- * for the given mode.
- *
- * Tables are concatenated at the root to avoid introducing another level in the
- * page table meaning the table is shallow and wide. Each level is an extra
- * memory access when walking the table so keeping it shallow reducing the
- * memory accesses to aid performance.
- */
-uint8_t arch_mm_root_table_count(int mode)
+uint8_t arch_mm_stage1_root_table_count(void)
 {
-	if (mode & MM_MODE_STAGE1) {
-		/* Stage 1 doesn't concatenate tables. */
-		return 1;
-	}
+	/* Stage 1 doesn't concatenate tables. */
+	return 1;
+}
 
+uint8_t arch_mm_stage2_root_table_count(void)
+{
 	return mm_s2_root_table_count;
 }
 
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index 561aa4d..ffe52f2 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -23,10 +23,9 @@
  * to memory. The flags are shifted to avoid equality of modes and attributes.
  */
 #define PTE_ATTR_MODE_SHIFT 48
-#define PTE_ATTR_MODE_MASK                                               \
-	((uint64_t)(MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_D |      \
-		    MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED | \
-		    MM_MODE_STAGE1)                                      \
+#define PTE_ATTR_MODE_MASK                                              \
+	((uint64_t)(MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_D |     \
+		    MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED) \
 	 << PTE_ATTR_MODE_SHIFT)
 
 /* The bit to distinguish a table from a block is the highest of the page bits.
@@ -122,27 +121,38 @@
 	/* There's no modelling of the stage-2 TLB. */
 }
 
-uint8_t arch_mm_max_level(int mode)
+uint8_t arch_mm_stage1_max_level(void)
 {
-	/* All modes have 3 levels in the page table. */
-	(void)mode;
 	return 2;
 }
 
-uint8_t arch_mm_root_table_count(int mode)
+uint8_t arch_mm_stage2_max_level(void)
 {
-	/* Stage-1 has no concatenated tables but stage 2 has 4 of them. */
-	return (mode & MM_MODE_STAGE1) ? 1 : 4;
+	return 2;
 }
 
-uint64_t arch_mm_mode_to_attrs(int mode)
+uint8_t arch_mm_stage1_root_table_count(void)
+{
+	return 1;
+}
+
+uint8_t arch_mm_stage2_root_table_count(void)
+{
+	/* Stage-2 has many concatenated page tables. */
+	return 4;
+}
+
+uint64_t arch_mm_mode_to_stage1_attrs(int mode)
 {
 	mode &= ~MM_MODE_NOINVALIDATE;
 
+	return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
+}
+
+uint64_t arch_mm_mode_to_stage2_attrs(int mode)
+{
 	/* Stage-2 ignores the device mode. */
-	if (!(mode & MM_MODE_STAGE1)) {
-		mode &= ~MM_MODE_D;
-	}
+	mode &= ~MM_MODE_NOINVALIDATE & ~MM_MODE_D;
 
 	return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
 }
diff --git a/src/mm.c b/src/mm.c
index e46902a..28f5428 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -47,10 +47,10 @@
 /* Keep macro alignment */
 /* clang-format off */
 
-#define MAP_FLAG_COMMIT 0x01
-#define MAP_FLAG_UNMAP  0x02
-#define MAP_FLAG_NOBBM  0x04
-#define MAP_FLAG_STAGE1 0x08
+#define MM_FLAG_COMMIT       0x01
+#define MM_FLAG_UNMAP        0x02
+#define MM_FLAG_NOINVALIDATE 0x04
+#define MM_FLAG_STAGE1       0x08
 
 /* clang-format on */
 
@@ -144,12 +144,29 @@
 }
 
 /**
+ * Returns the maximum level in the page table given the flags.
+ */
+static uint8_t mm_max_level(int flags)
+{
+	return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_max_level()
+					: arch_mm_stage2_max_level();
+}
+
+/**
+ * Returns the number of root-level tables given the flags.
+ */
+static uint8_t mm_root_table_count(int flags)
+{
+	return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_root_table_count()
+					: arch_mm_stage2_root_table_count();
+}
+
+/**
  * Invalidates the TLB for the given address range.
  */
-static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
-			      bool stage1)
+static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, int flags)
 {
-	if (stage1) {
+	if (flags & MM_FLAG_STAGE1) {
 		arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
 	} else {
 		arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
@@ -180,6 +197,58 @@
 }
 
 /**
+ * Initialises the given page table.
+ */
+static bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool)
+{
+	uint8_t i;
+	size_t j;
+	struct mm_page_table *tables;
+	uint8_t root_table_count = mm_root_table_count(flags);
+
+	tables = mm_alloc_page_tables(root_table_count, ppool);
+	if (tables == NULL) {
+		return false;
+	}
+
+	for (i = 0; i < root_table_count; i++) {
+		for (j = 0; j < MM_PTE_PER_PAGE; j++) {
+			tables[i].entries[j] =
+				arch_mm_absent_pte(mm_max_level(flags));
+		}
+	}
+
+	/*
+	 * TODO: halloc could return a virtual or physical address if mm not
+	 * enabled?
+	 */
+	t->root = pa_init((uintpaddr_t)tables);
+
+	return true;
+}
+
+/**
+ * Frees all memory associated with the give page table.
+ */
+static void mm_ptable_fini(struct mm_ptable *t, int flags, struct mpool *ppool)
+{
+	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
+	uint8_t level = mm_max_level(flags);
+	uint8_t root_table_count = mm_root_table_count(flags);
+	uint8_t i;
+	uint64_t j;
+
+	for (i = 0; i < root_table_count; ++i) {
+		for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
+			mm_free_page_pte(tables[i].entries[j], level, ppool);
+		}
+	}
+
+	mpool_add_chunk(ppool, tables,
+			sizeof(struct mm_page_table) * root_table_count);
+}
+
+/**
  * Replaces a page table entry with the given value. If both old and new values
  * are valid, it performs a break-before-make sequence where it first writes an
  * invalid value to the PTE, flushes the TLB, then writes the actual new value.
@@ -195,11 +264,10 @@
 	 * We need to do the break-before-make sequence if both values are
 	 * present, and if it hasn't been inhibited by the NOBBM flag.
 	 */
-	if (!(flags & MAP_FLAG_NOBBM) && arch_mm_pte_is_valid(v, level) &&
+	if (!(flags & MM_FLAG_NOINVALIDATE) && arch_mm_pte_is_valid(v, level) &&
 	    arch_mm_pte_is_valid(new_pte, level)) {
 		*pte = arch_mm_absent_pte(level);
-		mm_invalidate_tlb(begin, begin + mm_entry_size(level),
-				  flags & MAP_FLAG_STAGE1);
+		mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags);
 	}
 
 	/* Assign the new pte. */
@@ -286,7 +354,7 @@
 /**
  * Updates the page table at the given level to map the given address range to a
  * physical range using the provided (architecture-specific) attributes. Or if
- * MAP_FLAG_UNMAP is set, unmap the given range instead.
+ * MM_FLAG_UNMAP is set, unmap the given range instead.
  *
  * This function calls itself recursively if it needs to update additional
  * levels, but the recursion is bound by the maximum number of levels in a page
@@ -299,8 +367,8 @@
 	pte_t *pte = &table->entries[mm_index(begin, level)];
 	ptable_addr_t level_end = mm_level_end(begin, level);
 	size_t entry_size = mm_entry_size(level);
-	bool commit = flags & MAP_FLAG_COMMIT;
-	bool unmap = flags & MAP_FLAG_UNMAP;
+	bool commit = flags & MM_FLAG_COMMIT;
+	bool unmap = flags & MM_FLAG_UNMAP;
 
 	/* Cap end so that we don't go over the current level max. */
 	if (end > level_end) {
@@ -378,7 +446,7 @@
 /**
  * Updates the page table from the root to map the given address range to a
  * physical range using the provided (architecture-specific) attributes. Or if
- * MAP_FLAG_UNMAP is set, unmap the given range instead.
+ * MM_FLAG_UNMAP is set, unmap the given range instead.
  */
 static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
 			ptable_addr_t end, uint64_t attrs, uint8_t root_level,
@@ -406,18 +474,12 @@
  * provided.
  */
 static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
-				      paddr_t pa_end, int mode,
+				      paddr_t pa_end, uint64_t attrs, int flags,
 				      struct mpool *ppool)
 {
-	uint64_t attrs = arch_mm_mode_to_attrs(mode);
-	int flags = (mode & MM_MODE_NOINVALIDATE ? MAP_FLAG_NOBBM : 0) |
-		    (mode & MM_MODE_STAGE1 ? MAP_FLAG_STAGE1 : 0) |
-		    (mode & MM_MODE_INVALID && mode & MM_MODE_UNOWNED
-			     ? MAP_FLAG_UNMAP
-			     : 0);
-	uint8_t root_level = arch_mm_max_level(mode) + 1;
+	uint8_t root_level = mm_max_level(flags) + 1;
 	ptable_addr_t ptable_end =
-		arch_mm_root_table_count(mode) * mm_entry_size(root_level);
+		mm_root_table_count(flags) * mm_entry_size(root_level);
 	ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
 	ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
 
@@ -442,42 +504,19 @@
 	 */
 	if (!mm_map_root(t, begin, end, attrs, root_level, flags, ppool) ||
 	    !mm_map_root(t, begin, end, attrs, root_level,
-			 flags | MAP_FLAG_COMMIT, ppool)) {
+			 flags | MM_FLAG_COMMIT, ppool)) {
 		return false;
 	}
 
 	/* Invalidate the tlb. */
-	if (!(mode & MM_MODE_NOINVALIDATE)) {
-		mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
+	if (!(flags & MM_FLAG_NOINVALIDATE)) {
+		mm_invalidate_tlb(begin, end, flags);
 	}
 
 	return true;
 }
 
 /**
- * Updates the given table such that the given physical address range is mapped
- * into the address space with the architecture-agnostic mode provided.
- */
-static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
-				   paddr_t pa_end, int mode,
-				   struct mpool *ppool)
-{
-	return mm_ptable_identity_update(t, pa_begin, pa_end, mode, ppool);
-}
-
-/**
- * Updates the given table such that the given physical address range is not
- * mapped into the address space.
- */
-static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
-			    paddr_t pa_end, int mode, struct mpool *ppool)
-{
-	return mm_ptable_identity_update(
-		t, pa_begin, pa_end, mode | MM_MODE_UNOWNED | MM_MODE_INVALID,
-		ppool);
-}
-
-/**
  * Writes the given table to the debug log, calling itself recursively to
  * write sub-tables.
  */
@@ -506,11 +545,11 @@
 /**
  * Writes the given table to the debug log.
  */
-void mm_ptable_dump(struct mm_ptable *t, int mode)
+static void mm_ptable_dump(struct mm_ptable *t, int flags)
 {
 	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
-	int max_level = arch_mm_max_level(mode);
-	uint8_t root_table_count = arch_mm_root_table_count(mode);
+	uint8_t max_level = mm_max_level(flags);
+	uint8_t root_table_count = mm_root_table_count(flags);
 	uint8_t i;
 
 	for (i = 0; i < root_table_count; ++i) {
@@ -604,11 +643,12 @@
  * Defragments the given page table by converting page table references to
  * blocks whenever possible.
  */
-void mm_ptable_defrag(struct mm_ptable *t, int mode, struct mpool *ppool)
+static void mm_ptable_defrag(struct mm_ptable *t, int flags,
+			     struct mpool *ppool)
 {
 	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
-	uint8_t level = arch_mm_max_level(mode);
-	uint8_t root_table_count = arch_mm_root_table_count(mode);
+	uint8_t level = mm_max_level(flags);
+	uint8_t root_table_count = mm_root_table_count(flags);
 	uint8_t i;
 	uint64_t j;
 
@@ -688,12 +728,12 @@
 static bool mm_vm_get_attrs(struct mm_ptable *t, ptable_addr_t begin,
 			    ptable_addr_t end, uint64_t *attrs)
 {
-	int mode = 0;
-	uint8_t max_level = arch_mm_max_level(mode);
+	int flags = 0;
+	uint8_t max_level = mm_max_level(flags);
 	uint8_t root_level = max_level + 1;
 	size_t root_table_size = mm_entry_size(root_level);
 	ptable_addr_t ptable_end =
-		arch_mm_root_table_count(mode) * mm_entry_size(root_level);
+		mm_root_table_count(flags) * mm_entry_size(root_level);
 	struct mm_page_table *table;
 	bool got_attrs = false;
 
@@ -720,56 +760,14 @@
 	return got_attrs;
 }
 
-/**
- * Initialises the given page table.
- */
-bool mm_ptable_init(struct mm_ptable *t, int mode, struct mpool *ppool)
+bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool)
 {
-	uint8_t i;
-	size_t j;
-	struct mm_page_table *tables;
-	uint8_t root_table_count = arch_mm_root_table_count(mode);
-
-	tables = mm_alloc_page_tables(root_table_count, ppool);
-	if (tables == NULL) {
-		return false;
-	}
-
-	for (i = 0; i < root_table_count; i++) {
-		for (j = 0; j < MM_PTE_PER_PAGE; j++) {
-			tables[i].entries[j] =
-				arch_mm_absent_pte(arch_mm_max_level(mode));
-		}
-	}
-
-	/*
-	 * TODO: halloc could return a virtual or physical address if mm not
-	 * enabled?
-	 */
-	t->root = pa_init((uintpaddr_t)tables);
-
-	return true;
+	return mm_ptable_init(t, 0, ppool);
 }
 
-/**
- * Frees all memory associated with the give page table.
- */
-void mm_ptable_fini(struct mm_ptable *t, int mode, struct mpool *ppool)
+void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool)
 {
-	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
-	uint8_t level = arch_mm_max_level(mode);
-	uint8_t root_table_count = arch_mm_root_table_count(mode);
-	uint8_t i;
-	uint64_t j;
-
-	for (i = 0; i < root_table_count; ++i) {
-		for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
-			mm_free_page_pte(tables[i].entries[j], level, ppool);
-		}
-	}
-
-	mpool_add_chunk(ppool, tables,
-			sizeof(struct mm_page_table) * root_table_count);
+	mm_ptable_fini(t, 0, ppool);
 }
 
 /**
@@ -780,8 +778,10 @@
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
 			int mode, ipaddr_t *ipa, struct mpool *ppool)
 {
-	bool success = mm_ptable_identity_map(t, begin, end,
-					      mode & ~MM_MODE_STAGE1, ppool);
+	int flags = (mode & MM_MODE_NOINVALIDATE ? MM_FLAG_NOINVALIDATE : 0);
+	bool success = mm_ptable_identity_update(
+		t, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
+		ppool);
 
 	if (success && ipa != NULL) {
 		*ipa = ipa_from_pa(begin);
@@ -791,13 +791,19 @@
 }
 
 /**
- * Updates the VM's table such that the given physical address range is not
- * mapped in the address space.
+ * Updates the VM's table such that the given physical address range has no
+ * connection to the VM.
  */
 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode,
 		 struct mpool *ppool)
 {
-	return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1, ppool);
+	int flags = (mode & MM_MODE_NOINVALIDATE ? MM_FLAG_NOINVALIDATE : 0) |
+		    MM_FLAG_UNMAP;
+	return mm_ptable_identity_update(
+		t, begin, end,
+		arch_mm_mode_to_stage2_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
+					     MM_MODE_SHARED),
+		flags, ppool);
 }
 
 /**
@@ -815,6 +821,22 @@
 }
 
 /**
+ * Write the given page table of a VM to the debug log.
+ */
+void mm_vm_dump(struct mm_ptable *t)
+{
+	mm_ptable_dump(t, 0);
+}
+
+/**
+ * Defragments the VM page table.
+ */
+void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool)
+{
+	mm_ptable_defrag(t, 0, ppool);
+}
+
+/**
  * Gets the mode of the give range of intermediate physical addresses if they
  * are mapped with the same mode.
  *
@@ -841,8 +863,12 @@
  */
 void *mm_identity_map(paddr_t begin, paddr_t end, int mode, struct mpool *ppool)
 {
-	if (mm_ptable_identity_map(&ptable, begin, end, mode | MM_MODE_STAGE1,
-				   ppool)) {
+	int flags = (mode & MM_MODE_NOINVALIDATE ? MM_FLAG_NOINVALIDATE : 0) |
+		    MM_FLAG_STAGE1;
+
+	if (mm_ptable_identity_update(&ptable, begin, end,
+				      arch_mm_mode_to_stage1_attrs(mode), flags,
+				      ppool)) {
 		return ptr_from_va(va_from_pa(begin));
 	}
 
@@ -855,8 +881,13 @@
  */
 bool mm_unmap(paddr_t begin, paddr_t end, int mode, struct mpool *ppool)
 {
-	return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1,
-			       ppool);
+	int flags = (mode & MM_MODE_NOINVALIDATE ? MM_FLAG_NOINVALIDATE : 0) |
+		    MM_FLAG_STAGE1 | MM_FLAG_UNMAP;
+	return mm_ptable_identity_update(
+		&ptable, begin, end,
+		arch_mm_mode_to_stage1_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
+					     MM_MODE_SHARED),
+		flags, ppool);
 }
 
 /**
@@ -871,17 +902,16 @@
 	dlog("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()),
 	     pa_addr(layout_data_end()));
 
-	if (!mm_ptable_init(&ptable, MM_MODE_STAGE1, ppool)) {
+	if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, ppool)) {
 		dlog("Unable to allocate memory for page table.\n");
 		return false;
 	}
 
 	/* Map page for uart. */
 	/* TODO: We may not want to map this. */
-	mm_ptable_identity_map(
-		&ptable, pa_init(PL011_BASE),
-		pa_add(pa_init(PL011_BASE), PAGE_SIZE),
-		MM_MODE_R | MM_MODE_W | MM_MODE_D | MM_MODE_STAGE1, ppool);
+	mm_identity_map(pa_init(PL011_BASE),
+			pa_add(pa_init(PL011_BASE), PAGE_SIZE),
+			MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool);
 
 	/* Map each section. */
 	mm_identity_map(layout_text_begin(), layout_text_end(), MM_MODE_X,
@@ -906,5 +936,5 @@
  */
 void mm_defrag(struct mpool *ppool)
 {
-	mm_ptable_defrag(&ptable, MM_MODE_STAGE1, ppool);
+	mm_ptable_defrag(&ptable, MM_FLAG_STAGE1, ppool);
 }
diff --git a/src/mm_test.cc b/src/mm_test.cc
index c159bba..0f855e5 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -40,7 +40,7 @@
 using ::testing::Truly;
 
 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
-const int TOP_LEVEL = arch_mm_max_level(0);
+const int TOP_LEVEL = arch_mm_stage2_max_level();
 const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
 
 /**
@@ -76,10 +76,10 @@
  * Get an STL representation of the ptable.
  */
 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
-	const struct mm_ptable &ptable, int mode)
+	const struct mm_ptable &ptable)
 {
 	std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
-	const uint8_t root_table_count = arch_mm_root_table_count(mode);
+	const uint8_t root_table_count = arch_mm_stage2_root_table_count();
 	for (uint8_t i = 0; i < root_table_count; ++i) {
 		all.push_back(get_table(
 			pa_add(ptable.root, i * sizeof(struct mm_page_table))));
@@ -111,13 +111,12 @@
  */
 TEST_F(mm, ptable_init_empty)
 {
-	constexpr int mode = MM_MODE_STAGE1;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
-		AllOf(SizeIs(1), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+		get_ptable(ptable),
+		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -125,13 +124,12 @@
  */
 TEST_F(mm, ptable_init_concatenated_empty)
 {
-	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -143,11 +141,11 @@
 	const paddr_t page_begin = pa_init(0);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
 				       nullptr, &ppool));
 
-	auto tables = get_ptable(ptable, mode);
+	auto tables = get_ptable(ptable);
 	EXPECT_THAT(tables, SizeIs(4));
 	ASSERT_THAT(TOP_LEVEL, Eq(2));
 
@@ -173,7 +171,7 @@
 	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
 		    Eq(pa_addr(page_begin)));
 
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -187,12 +185,12 @@
 	const paddr_t map_end = pa_add(map_begin, 268);
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa,
 				       &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
 
-	auto tables = get_ptable(ptable, mode);
+	auto tables = get_ptable(ptable);
 	EXPECT_THAT(tables, SizeIs(4));
 	ASSERT_THAT(TOP_LEVEL, Eq(2));
 
@@ -220,7 +218,7 @@
 						   TOP_LEVEL - 2)),
 		    Eq(0x200'0000'0000 - PAGE_SIZE));
 
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -232,11 +230,11 @@
 	const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
 	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
 				       nullptr, &ppool));
 
-	auto tables = get_ptable(ptable, mode);
+	auto tables = get_ptable(ptable);
 	EXPECT_THAT(tables, SizeIs(4));
 	EXPECT_THAT(std::span(tables).last(2),
 		    Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
@@ -283,7 +281,7 @@
 		pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
 		Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
 
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -293,10 +291,10 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
-	auto tables = get_ptable(ptable, mode);
+	auto tables = get_ptable(ptable);
 	EXPECT_THAT(
 		tables,
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
@@ -310,7 +308,7 @@
 				<< "i=" << i << " j=" << j;
 		}
 	}
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -322,17 +320,17 @@
 	constexpr int mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
 				       mode, &ipa, &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(0));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -344,14 +342,14 @@
 	constexpr int mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
 				       pa_init(0x5000), mode, &ipa, &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -366,12 +364,12 @@
 	constexpr int mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
 				       &ipa, &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(20));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -386,16 +384,16 @@
 	constexpr int mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(
 		&ptable, pa_init(0),
 		pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa,
 		&ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(0));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -406,15 +404,15 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
 				       pa_init(0xf32'0000'0000'0000), mode,
 				       nullptr, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -426,15 +424,15 @@
 	constexpr int mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
 				       pa_init(0xf0'0000'0000'0000), mode, &ipa,
 				       &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, 0, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -447,16 +445,16 @@
 	const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -469,13 +467,13 @@
 	const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
 				       nullptr, &ppool));
-	EXPECT_THAT(get_ptable(ptable, mode),
+	EXPECT_THAT(get_ptable(ptable),
 		    AllOf(SizeIs(4),
 			  Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
 						    TOP_LEVEL)))),
@@ -483,7 +481,7 @@
 				  arch_mm_pte_is_block, _1, TOP_LEVEL)))),
 			  Contains(Contains(Truly(std::bind(
 				  arch_mm_pte_is_table, _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -493,12 +491,12 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -508,13 +506,13 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode,
 				&ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -528,16 +526,16 @@
 	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
 	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
 				       &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
 				       &ppool));
 	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -549,15 +547,15 @@
 	const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
 	const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
 				pa_add(map_begin, 99), mode, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -569,14 +567,14 @@
 	const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
 	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -586,16 +584,16 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
 				mode, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -606,16 +604,16 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
 				mode, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -631,15 +629,15 @@
 	const paddr_t page_begin = pa_init(0x180'0000'0000);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
 				pa_add(page_begin, 50), mode, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -653,7 +651,7 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(
@@ -661,10 +659,10 @@
 			    pa_init(std::numeric_limits<uintpaddr_t>::max()),
 			    mode, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -678,7 +676,7 @@
 	const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
 	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
 				       &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
@@ -686,9 +684,9 @@
 	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, MM_MODE_STAGE1, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -696,13 +694,12 @@
  */
 TEST_F(mm, is_mapped_empty)
 {
-	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -712,13 +709,13 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -730,14 +727,14 @@
 	const paddr_t page_begin = pa_init(0x100'0000'0000);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
 				       nullptr, &ppool));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
 	EXPECT_TRUE(
 		mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -747,14 +744,14 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
 	EXPECT_FALSE(mm_vm_is_mapped(
 		&ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -763,12 +760,11 @@
  */
 TEST_F(mm, get_mode_empty)
 {
-	constexpr int mode = 0;
 	constexpr int default_mode =
 		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
 	struct mm_ptable ptable;
 	int read_mode;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 
 	read_mode = 0;
 	EXPECT_TRUE(
@@ -785,7 +781,7 @@
 				   ipa_init(0x1ff'ffff'ffff), &read_mode));
 	EXPECT_THAT(read_mode, Eq(default_mode));
 
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -799,7 +795,7 @@
 	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
 	struct mm_ptable ptable;
 	int read_mode;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
 				       nullptr, &ppool));
 
@@ -817,7 +813,7 @@
 	EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
 				   ipa_from_pa(map_end), &read_mode));
 	EXPECT_THAT(read_mode, Eq(mode));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -828,7 +824,7 @@
 	constexpr int mode = MM_MODE_UNOWNED;
 	struct mm_ptable ptable;
 	int read_mode;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
@@ -839,7 +835,7 @@
 				    &read_mode));
 	EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
 				    ipa_init(2'0000'0000'0000), &read_mode));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -847,14 +843,13 @@
  */
 TEST_F(mm, defrag_empty)
 {
-	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
-	mm_ptable_defrag(&ptable, mode, &ppool);
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+	mm_vm_defrag(&ptable, &ppool);
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -869,18 +864,18 @@
 	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
 	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
 				       &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
 				       &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
-	mm_ptable_defrag(&ptable, 0, &ppool);
+	mm_vm_defrag(&ptable, &ppool);
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 /**
@@ -894,7 +889,7 @@
 	const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
 	const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode, &ppool));
@@ -902,12 +897,12 @@
 				       &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr,
 				       &ppool));
-	mm_ptable_defrag(&ptable, 0, &ppool);
+	mm_vm_defrag(&ptable, &ppool);
 	EXPECT_THAT(
-		get_ptable(ptable, mode),
+		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode, &ppool);
+	mm_vm_fini(&ptable, &ppool);
 }
 
 } /* namespace */
diff --git a/src/vm.c b/src/vm.c
index 4a1d9d7..4a14eae 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -42,7 +42,7 @@
 	vm->vcpu_count = vcpu_count;
 	vm->mailbox.state = mailbox_state_empty;
 
-	if (!mm_ptable_init(&vm->ptable, 0, ppool)) {
+	if (!mm_vm_init(&vm->ptable, ppool)) {
 		return false;
 	}
 
diff --git a/test/arch/mm_test.c b/test/arch/mm_test.c
index 60f927b..8023f43 100644
--- a/test/arch/mm_test.c
+++ b/test/arch/mm_test.c
@@ -52,8 +52,7 @@
  */
 TEST(arch_mm, max_level_stage1)
 {
-	int mode = MM_MODE_STAGE1;
-	uint8_t max_level = arch_mm_max_level(mode);
+	uint8_t max_level = arch_mm_stage1_max_level();
 	EXPECT_GE(max_level, MAX_LEVEL_LOWER_BOUND);
 	EXPECT_LE(max_level, MAX_LEVEL_UPPER_BOUND);
 }
@@ -86,7 +85,8 @@
 	TEST(arch_mm, invalid_block_properties_level##lvl)                    \
 	{                                                                     \
 		uint8_t level = lvl;                                          \
-		uint64_t attrs = arch_mm_mode_to_attrs(MM_MODE_INVALID);      \
+		uint64_t attrs =                                              \
+			arch_mm_mode_to_stage2_attrs(MM_MODE_INVALID);        \
 		pte_t block_pte;                                              \
                                                                               \
 		/* Test doesn't apply if a block is not allowed. */           \
@@ -112,7 +112,7 @@
 	TEST(arch_mm, valid_block_properties_level##lvl)               \
 	{                                                              \
 		uint8_t level = lvl;                                   \
-		uint64_t attrs = arch_mm_mode_to_attrs(0);             \
+		uint64_t attrs = arch_mm_mode_to_stage2_attrs(0);      \
 		pte_t block_pte;                                       \
                                                                        \
 		/* Test doesn't apply if a block is not allowed. */    \
@@ -174,21 +174,21 @@
 		}                                                            \
                                                                              \
 		addr = pa_init(0);                                           \
-		attrs = arch_mm_mode_to_attrs(0);                            \
+		attrs = arch_mm_mode_to_stage2_attrs(0);                     \
 		block_pte = arch_mm_block_pte(level, addr, attrs);           \
 		EXPECT_EQ(arch_mm_pte_attrs(block_pte, level), attrs);       \
 		EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte, level)), \
 			  pa_addr(addr));                                    \
                                                                              \
 		addr = pa_init(PAGE_SIZE * 17);                              \
-		attrs = arch_mm_mode_to_attrs(MM_MODE_INVALID);              \
+		attrs = arch_mm_mode_to_stage2_attrs(MM_MODE_INVALID);       \
 		block_pte = arch_mm_block_pte(level, addr, attrs);           \
 		EXPECT_EQ(arch_mm_pte_attrs(block_pte, level), attrs);       \
 		EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte, level)), \
 			  pa_addr(addr));                                    \
                                                                              \
 		addr = pa_init(PAGE_SIZE * 500);                             \
-		attrs = arch_mm_mode_to_attrs(MM_MODE_R | MM_MODE_W);        \
+		attrs = arch_mm_mode_to_stage2_attrs(MM_MODE_R | MM_MODE_W); \
 		block_pte = arch_mm_block_pte(level, addr, attrs);           \
 		EXPECT_EQ(arch_mm_pte_attrs(block_pte, level), attrs);       \
 		EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte, level)), \