Type for page table.

A page table is an array of PTEs but a pte_t pointer is overloaded and
causes confusion when reading. This new type makes it explicit that the
pointer is to a page table.

Change-Id: I167ed43098e6ab1df7c8b680604ba076dd9b74db
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 4df8899..0f5a4a6 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -16,6 +16,8 @@
 
 #pragma once
 
+#include <assert.h>
+#include <stdalign.h>
 #include <stdbool.h>
 #include <stdint.h>
 
@@ -23,12 +25,21 @@
 
 #include "hf/addr.h"
 
+#define PAGE_SIZE (1 << PAGE_BITS)
+#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
+
+struct mm_page_table {
+	alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE];
+};
+static_assert(sizeof(struct mm_page_table) == PAGE_SIZE,
+	      "A page table must take exactly one page.");
+static_assert(alignof(struct mm_page_table) == PAGE_SIZE,
+	      "A page table must be page aligned.");
+
 struct mm_ptable {
 	paddr_t table;
 };
 
-#define PAGE_SIZE (1 << PAGE_BITS)
-
 /* The following are arch-independent page mapping modes. */
 #define MM_MODE_R 0x01 /* read */
 #define MM_MODE_W 0x02 /* write */
diff --git a/src/mm.c b/src/mm.c
index 210fa8e..7532825 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -52,15 +52,12 @@
 
 /* clang-format on */
 
-#define NUM_ENTRIES (PAGE_SIZE / sizeof(pte_t))
-
 static struct mm_ptable ptable;
 
 /**
- * Casts a physical address to a pointer. This assumes that it is mapped (to the
- * same address), so should only be used within the mm code.
+ * Get the page table from the physical address.
  */
-static inline void *ptr_from_pa(paddr_t pa)
+static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
 {
 	return ptr_from_va(va_from_pa(pa));
 }
@@ -85,7 +82,7 @@
  * Calculates the size of the address space represented by a page table entry at
  * the given level.
  */
-static inline size_t mm_entry_size(int level)
+static size_t mm_entry_size(int level)
 {
 	return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
 }
@@ -94,7 +91,7 @@
  * For a given address, calculates the maximum (plus one) address that can be
  * represented by the same table at the given level.
  */
-static inline ptable_addr_t mm_level_end(ptable_addr_t addr, int level)
+static ptable_addr_t mm_level_end(ptable_addr_t addr, int level)
 {
 	size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
 	return ((addr >> offset) + 1) << offset;
@@ -104,21 +101,36 @@
  * For a given address, calculates the index at which its entry is stored in a
  * table at the given level.
  */
-static inline size_t mm_index(ptable_addr_t addr, int level)
+static size_t mm_index(ptable_addr_t addr, int level)
 {
 	ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
 	return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
 }
 
 /**
+ * Allocate a new page table.
+ */
+static struct mm_page_table *mm_alloc_page_table(bool sync_alloc)
+{
+	if (sync_alloc) {
+		return halloc_aligned_nosync(sizeof(struct mm_page_table),
+					     alignof(struct mm_page_table));
+	}
+
+	return halloc_aligned(sizeof(struct mm_page_table),
+			      alignof(struct mm_page_table));
+}
+
+/**
  * Populates the provided page table entry with a reference to another table if
  * needed, that is, if it does not yet point to another table.
  *
  * Returns a pointer to the table the entry now points to.
  */
-static pte_t *mm_populate_table_pte(pte_t *pte, int level, bool sync_alloc)
+static struct mm_page_table *mm_populate_table_pte(pte_t *pte, int level,
+						   bool sync_alloc)
 {
-	pte_t *ntable;
+	struct mm_page_table *ntable;
 	pte_t v = *pte;
 	pte_t new_pte;
 	size_t i;
@@ -127,13 +139,12 @@
 
 	/* Just return pointer to table if it's already populated. */
 	if (arch_mm_pte_is_table(v, level)) {
-		return ptr_from_pa(arch_mm_table_from_pte(v));
+		return mm_page_table_from_pa(arch_mm_table_from_pte(v));
 	}
 
 	/* Allocate a new table. */
-	ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)(
-		PAGE_SIZE, PAGE_SIZE);
-	if (!ntable) {
+	ntable = mm_alloc_page_table(sync_alloc);
+	if (ntable == NULL) {
 		dlog("Failed to allocate memory for page table\n");
 		return NULL;
 	}
@@ -150,8 +161,8 @@
 	}
 
 	/* Initialise entries in the new table. */
-	for (i = 0; i < NUM_ENTRIES; i++) {
-		ntable[i] = new_pte;
+	for (i = 0; i < MM_PTE_PER_PAGE; i++) {
+		ntable->entries[i] = new_pte;
 		new_pte += inc;
 	}
 
@@ -171,17 +182,17 @@
  */
 static void mm_free_page_pte(pte_t pte, int level)
 {
-	pte_t *table;
+	struct mm_page_table *table;
 	uint64_t i;
 
 	if (!arch_mm_pte_is_table(pte, level)) {
 		return;
 	}
 
-	table = ptr_from_pa(arch_mm_table_from_pte(pte));
+	table = mm_page_table_from_pa(arch_mm_table_from_pte(pte));
 	/* Recursively free any subtables. */
-	for (i = 0; i < NUM_ENTRIES; ++i) {
-		mm_free_page_pte(table[i], level - 1);
+	for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		mm_free_page_pte(table->entries[i], level - 1);
 	}
 
 	/* Free the table itself. */
@@ -191,12 +202,12 @@
 /**
  * Returns whether all entries in this table are absent.
  */
-static bool mm_ptable_is_empty(pte_t *table, int level)
+static bool mm_ptable_is_empty(struct mm_page_table *table, int level)
 {
 	uint64_t i;
 
-	for (i = 0; i < NUM_ENTRIES; ++i) {
-		if (arch_mm_pte_is_present(table[i], level)) {
+	for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		if (arch_mm_pte_is_present(table->entries[i], level)) {
 			return false;
 		}
 	}
@@ -213,9 +224,10 @@
  * table.
  */
 static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
-			 uint64_t attrs, pte_t *table, int level, int flags)
+			 uint64_t attrs, struct mm_page_table *table, int level,
+			 int flags)
 {
-	pte_t *pte = &table[mm_index(begin, level)];
+	pte_t *pte = &table->entries[mm_index(begin, level)];
 	ptable_addr_t level_end = mm_level_end(begin, level);
 	size_t entry_size = mm_entry_size(level);
 	bool commit = flags & MAP_FLAG_COMMIT;
@@ -259,8 +271,9 @@
 			 * If the entry is already a subtable get it; otherwise
 			 * replace it with an equivalent subtable and get that.
 			 */
-			pte_t *nt = mm_populate_table_pte(pte, level, sync);
-			if (!nt) {
+			struct mm_page_table *nt =
+				mm_populate_table_pte(pte, level, sync);
+			if (nt == NULL) {
 				return false;
 			}
 
@@ -319,7 +332,7 @@
 	uint64_t attrs = arch_mm_mode_to_attrs(mode);
 	int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
 	int level = arch_mm_max_level(mode);
-	pte_t *table = ptr_from_pa(t->table);
+	struct mm_page_table *table = mm_page_table_from_pa(t->table);
 	ptable_addr_t begin;
 	ptable_addr_t end;
 
@@ -357,7 +370,7 @@
 	int flags =
 		((mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC) | MAP_FLAG_UNMAP;
 	int level = arch_mm_max_level(mode);
-	pte_t *table = ptr_from_pa(t->table);
+	struct mm_page_table *table = mm_page_table_from_pa(t->table);
 	ptable_addr_t begin;
 	ptable_addr_t end;
 
@@ -385,20 +398,22 @@
  * Writes the given table to the debug log, calling itself recursively to
  * write sub-tables.
  */
-static void mm_dump_table_recursive(pte_t *table, int level, int max_level)
+static void mm_dump_table_recursive(struct mm_page_table *table, int level,
+				    int max_level)
 {
 	uint64_t i;
-	for (i = 0; i < NUM_ENTRIES; i++) {
-		if (!arch_mm_pte_is_present(table[i], level)) {
+	for (i = 0; i < MM_PTE_PER_PAGE; i++) {
+		if (!arch_mm_pte_is_present(table->entries[i], level)) {
 			continue;
 		}
 
-		dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, table[i]);
+		dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
+		     table->entries[i]);
 
-		if (arch_mm_pte_is_table(table[i], level)) {
+		if (arch_mm_pte_is_table(table->entries[i], level)) {
 			mm_dump_table_recursive(
-				ptr_from_va(va_from_pa(
-					arch_mm_table_from_pte(table[i]))),
+				mm_page_table_from_pa(arch_mm_table_from_pte(
+					table->entries[i])),
 				level - 1, max_level);
 		}
 	}
@@ -409,7 +424,7 @@
  */
 void mm_ptable_dump(struct mm_ptable *t, int mode)
 {
-	pte_t *table = ptr_from_pa(t->table);
+	struct mm_page_table *table = mm_page_table_from_pa(t->table);
 	int max_level = arch_mm_max_level(mode);
 	mm_dump_table_recursive(table, max_level, max_level);
 }
@@ -421,13 +436,14 @@
  */
 static pte_t mm_table_pte_to_absent(pte_t entry, int level)
 {
-	pte_t *subtable = ptr_from_pa(arch_mm_table_from_pte(entry));
+	struct mm_page_table *table =
+		mm_page_table_from_pa(arch_mm_table_from_pte(entry));
 	/*
 	 * Free the subtable. This is safe to do directly (rather than
 	 * using mm_free_page_pte) because we know by this point that it
 	 * doesn't have any subtables of its own.
 	 */
-	hfree(subtable);
+	hfree(table);
 	/* Replace subtable with a single absent entry. */
 	return arch_mm_absent_pte(level);
 }
@@ -440,7 +456,7 @@
  */
 static pte_t mm_table_pte_to_block(pte_t entry, int level)
 {
-	pte_t *subtable;
+	struct mm_page_table *table;
 	uint64_t block_attrs;
 	uint64_t table_attrs;
 	uint64_t combined_attrs;
@@ -450,18 +466,18 @@
 		return entry;
 	}
 
-	subtable = ptr_from_pa(arch_mm_table_from_pte(entry));
+	table = mm_page_table_from_pa(arch_mm_table_from_pte(entry));
 	/*
 	 * Replace subtable with a single block, with equivalent
 	 * attributes.
 	 */
-	block_attrs = arch_mm_pte_attrs(subtable[0]);
+	block_attrs = arch_mm_pte_attrs(table->entries[0]);
 	table_attrs = arch_mm_pte_attrs(entry);
 	combined_attrs =
 		arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
-	block_address = arch_mm_block_from_pte(subtable[0]);
+	block_address = arch_mm_block_from_pte(table->entries[0]);
 	/* Free the subtable. */
-	hfree(subtable);
+	hfree(table);
 	/*
 	 * We can assume that the block is aligned properly
 	 * because all virtual addresses are aligned by
@@ -477,7 +493,7 @@
  */
 static pte_t mm_ptable_defrag_entry(pte_t entry, int level)
 {
-	pte_t *table;
+	struct mm_page_table *table;
 	uint64_t i;
 	uint64_t attrs;
 	bool identical_blocks_so_far = true;
@@ -487,20 +503,21 @@
 		return entry;
 	}
 
-	table = ptr_from_pa(arch_mm_table_from_pte(entry));
+	table = mm_page_table_from_pa(arch_mm_table_from_pte(entry));
 
 	/*
 	 * Check if all entries are blocks with the same flags or are all
 	 * absent.
 	 */
-	attrs = arch_mm_pte_attrs(table[0]);
-	for (i = 0; i < NUM_ENTRIES; ++i) {
+	attrs = arch_mm_pte_attrs(table->entries[0]);
+	for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
 		/*
 		 * First try to defrag the entry, in case it is a subtable.
 		 */
-		table[i] = mm_ptable_defrag_entry(table[i], level - 1);
+		table->entries[i] =
+			mm_ptable_defrag_entry(table->entries[i], level - 1);
 
-		if (arch_mm_pte_is_present(table[i], level - 1)) {
+		if (arch_mm_pte_is_present(table->entries[i], level - 1)) {
 			all_absent_so_far = false;
 		}
 
@@ -508,8 +525,8 @@
 		 * If the entry is a block, check that the flags are the same as
 		 * what we have so far.
 		 */
-		if (!arch_mm_pte_is_block(table[i], level - 1) ||
-		    arch_mm_pte_attrs(table[i]) != attrs) {
+		if (!arch_mm_pte_is_block(table->entries[i], level - 1) ||
+		    arch_mm_pte_attrs(table->entries[i]) != attrs) {
 			identical_blocks_so_far = false;
 		}
 	}
@@ -528,7 +545,7 @@
  */
 void mm_ptable_defrag(struct mm_ptable *t, int mode)
 {
-	pte_t *table = ptr_from_pa(t->table);
+	struct mm_page_table *table = mm_page_table_from_pa(t->table);
 	int level = arch_mm_max_level(mode);
 	uint64_t i;
 
@@ -536,8 +553,9 @@
 	 * Loop through each entry in the table. If it points to another table,
 	 * check if that table can be replaced by a block or an absent entry.
 	 */
-	for (i = 0; i < NUM_ENTRIES; ++i) {
-		table[i] = mm_ptable_defrag_entry(table[i], level);
+	for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		table->entries[i] =
+			mm_ptable_defrag_entry(table->entries[i], level);
 	}
 }
 
@@ -558,8 +576,8 @@
  * Determines if the given address is mapped in the given page table by
  * recursively traversing all levels of the page table.
  */
-static bool mm_is_mapped_recursive(const pte_t *table, ptable_addr_t addr,
-				   int level)
+static bool mm_is_mapped_recursive(struct mm_page_table *table,
+				   ptable_addr_t addr, int level)
 {
 	pte_t pte;
 	ptable_addr_t va_level_end = mm_level_end(addr, level);
@@ -569,7 +587,7 @@
 		return false;
 	}
 
-	pte = table[mm_index(addr, level)];
+	pte = table->entries[mm_index(addr, level)];
 
 	if (arch_mm_pte_is_block(pte, level)) {
 		return true;
@@ -577,8 +595,8 @@
 
 	if (arch_mm_pte_is_table(pte, level)) {
 		return mm_is_mapped_recursive(
-			ptr_from_pa(arch_mm_table_from_pte(pte)), addr,
-			level - 1);
+			mm_page_table_from_pa(arch_mm_table_from_pte(pte)),
+			addr, level - 1);
 	}
 
 	/* The entry is not present. */
@@ -591,7 +609,7 @@
 static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
 				int mode)
 {
-	pte_t *table = ptr_from_pa(t->table);
+	struct mm_page_table *table = mm_page_table_from_pa(t->table);
 	int level = arch_mm_max_level(mode);
 
 	addr = mm_round_down_to_page(addr);
@@ -605,20 +623,15 @@
 bool mm_ptable_init(struct mm_ptable *t, int mode)
 {
 	size_t i;
-	pte_t *table;
+	struct mm_page_table *table;
 
-	if (mode & MM_MODE_NOSYNC) {
-		table = halloc_aligned_nosync(PAGE_SIZE, PAGE_SIZE);
-	} else {
-		table = halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	}
-
-	if (!table) {
+	table = mm_alloc_page_table(mode & MM_MODE_NOSYNC);
+	if (table == NULL) {
 		return false;
 	}
 
-	for (i = 0; i < NUM_ENTRIES; i++) {
-		table[i] = arch_mm_absent_pte(arch_mm_max_level(mode));
+	for (i = 0; i < MM_PTE_PER_PAGE; i++) {
+		table->entries[i] = arch_mm_absent_pte(arch_mm_max_level(mode));
 	}
 
 	/* TODO: halloc could return a virtual or physical address if mm not
@@ -689,7 +702,7 @@
 {
 	if (mm_ptable_identity_map(&ptable, begin, end,
 				   mode | MM_MODE_STAGE1)) {
-		return ptr_from_pa(begin);
+		return ptr_from_va(va_from_pa(begin));
 	}
 
 	return NULL;
diff --git a/src/mm_test.cc b/src/mm_test.cc
index d4c1d32..7990d27 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -31,7 +31,6 @@
 using ::testing::Eq;
 
 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 10;
-constexpr size_t ENTRY_COUNT = PAGE_SIZE / sizeof(pte_t);
 const int TOP_LEVEL = arch_mm_max_level(0);
 const pte_t ABSENT_ENTRY = arch_mm_absent_pte(TOP_LEVEL);
 
@@ -45,22 +44,41 @@
 }
 
 /**
+ * Get the page table from the physical address.
+ */
+struct mm_page_table *page_table_from_pa(paddr_t pa)
+{
+	return reinterpret_cast<struct mm_page_table *>(
+		ptr_from_va(va_from_pa(pa)));
+}
+
+/**
+ * Allocate a page table.
+ */
+struct mm_page_table *alloc_page_table()
+{
+	return reinterpret_cast<struct mm_page_table *>(halloc_aligned(
+		sizeof(struct mm_page_table), alignof(struct mm_page_table)));
+}
+
+/**
  * Fill a ptable with absent entries.
  */
-void init_absent(pte_t *table)
+void init_absent(struct mm_page_table *table)
 {
-	for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
-		table[i] = ABSENT_ENTRY;
+	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		table->entries[i] = ABSENT_ENTRY;
 	}
 }
 
 /**
  * Fill a ptable with block entries.
  */
-void init_blocks(pte_t *table, int level, paddr_t start_address, uint64_t attrs)
+void init_blocks(struct mm_page_table *table, int level, paddr_t start_address,
+		 uint64_t attrs)
 {
-	for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
-		table[i] = arch_mm_block_pte(
+	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		table->entries[i] = arch_mm_block_pte(
 			level, pa_add(start_address, i * mm_entry_size(level)),
 			attrs);
 	}
@@ -74,15 +92,15 @@
 	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
 	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
 
-	pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
+	struct mm_page_table *table = alloc_page_table();
 	init_absent(table);
 	struct mm_ptable ptable;
 	ptable.table = pa_init((uintpaddr_t)table);
 
 	mm_ptable_defrag(&ptable, 0);
 
-	for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
-		EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
+	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
 	}
 }
 
@@ -95,20 +113,20 @@
 	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
 	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
 
-	pte_t *subtable_a = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	pte_t *subtable_aa = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	pte_t *subtable_b = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
+	struct mm_page_table *subtable_a = alloc_page_table();
+	struct mm_page_table *subtable_aa = alloc_page_table();
+	struct mm_page_table *subtable_b = alloc_page_table();
+	struct mm_page_table *table = alloc_page_table();
 	init_absent(subtable_a);
 	init_absent(subtable_aa);
 	init_absent(subtable_b);
 	init_absent(table);
 
-	subtable_a[3] = arch_mm_table_pte(TOP_LEVEL - 1,
-					  pa_init((uintpaddr_t)subtable_aa));
-	table[0] =
+	subtable_a->entries[3] = arch_mm_table_pte(
+		TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
+	table->entries[0] =
 		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
-	table[5] =
+	table->entries[5] =
 		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_b));
 
 	struct mm_ptable ptable;
@@ -116,8 +134,8 @@
 
 	mm_ptable_defrag(&ptable, 0);
 
-	for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
-		EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
+	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
 	}
 }
 
@@ -130,10 +148,10 @@
 	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
 	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
 
-	pte_t *subtable_a = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	pte_t *subtable_aa = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	pte_t *subtable_b = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
+	struct mm_page_table *subtable_a = alloc_page_table();
+	struct mm_page_table *subtable_aa = alloc_page_table();
+	struct mm_page_table *subtable_b = alloc_page_table();
+	struct mm_page_table *table = alloc_page_table();
 	init_blocks(subtable_a, TOP_LEVEL - 1, pa_init(0), 0);
 	init_blocks(subtable_aa, TOP_LEVEL - 2,
 		    pa_init(3 * mm_entry_size(TOP_LEVEL - 1)), 0);
@@ -141,11 +159,11 @@
 		    pa_init(5 * mm_entry_size(TOP_LEVEL)), 0);
 	init_blocks(table, TOP_LEVEL, pa_init(0), 0);
 
-	subtable_a[3] = arch_mm_table_pte(TOP_LEVEL - 1,
-					  pa_init((uintpaddr_t)subtable_aa));
-	table[0] =
+	subtable_a->entries[3] = arch_mm_table_pte(
+		TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
+	table->entries[0] =
 		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
-	table[5] =
+	table->entries[5] =
 		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_b));
 
 	struct mm_ptable ptable;
@@ -153,12 +171,13 @@
 
 	mm_ptable_defrag(&ptable, 0);
 
-	for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
-		EXPECT_TRUE(arch_mm_pte_is_present(table[i], TOP_LEVEL))
+	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		EXPECT_TRUE(
+			arch_mm_pte_is_present(table->entries[i], TOP_LEVEL))
 			<< "i=" << i;
-		EXPECT_TRUE(arch_mm_pte_is_block(table[i], TOP_LEVEL))
+		EXPECT_TRUE(arch_mm_pte_is_block(table->entries[i], TOP_LEVEL))
 			<< "i=" << i;
-		EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table[i])),
+		EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table->entries[i])),
 			    Eq(i * mm_entry_size(TOP_LEVEL)))
 			<< "i=" << i;
 	}
@@ -170,7 +189,7 @@
 	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
 	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
 
-	pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
+	struct mm_page_table *table = alloc_page_table();
 	init_absent(table);
 
 	struct mm_ptable ptable;
@@ -178,8 +197,8 @@
 
 	EXPECT_TRUE(mm_ptable_unmap_hypervisor(&ptable, 0));
 
-	for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
-		EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
+	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
 	}
 }
 
@@ -191,17 +210,18 @@
 	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
 	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
 
-	pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	pte_t *subtable_a = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	pte_t *subtable_aa = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	init_absent(table);
+	struct mm_page_table *subtable_a = alloc_page_table();
+	struct mm_page_table *subtable_aa = alloc_page_table();
+	struct mm_page_table *table = alloc_page_table();
 	init_absent(subtable_a);
 	init_absent(subtable_aa);
+	init_absent(table);
 
-	subtable_aa[0] = arch_mm_block_pte(TOP_LEVEL - 2, pa_init(0), 0);
-	subtable_a[0] = arch_mm_table_pte(TOP_LEVEL - 1,
-					  pa_init((uintpaddr_t)subtable_aa));
-	table[0] =
+	subtable_aa->entries[0] =
+		arch_mm_block_pte(TOP_LEVEL - 2, pa_init(0), 0);
+	subtable_a->entries[0] = arch_mm_table_pte(
+		TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
+	table->entries[0] =
 		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
 
 	struct mm_ptable ptable;
@@ -209,8 +229,8 @@
 
 	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), pa_init(1), 0));
 
-	for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
-		EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
+	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
 	}
 }
 
@@ -223,7 +243,7 @@
 	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
 
 	/* Start with an empty page table. */
-	pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
+	struct mm_page_table *table = alloc_page_table();
 	init_absent(table);
 	struct mm_ptable ptable;
 	ptable.table = pa_init((uintpaddr_t)table);
@@ -235,23 +255,28 @@
 	EXPECT_THAT(ipa_addr(ipa), Eq(0));
 
 	/* Check that the first page is mapped, and nothing else. */
-	for (uint64_t i = 1; i < ENTRY_COUNT; ++i) {
-		EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
+	for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
+		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
 	}
-	ASSERT_TRUE(arch_mm_pte_is_table(table[0], TOP_LEVEL));
-	pte_t *subtable_a = (pte_t *)ptr_from_va(
-		va_from_pa(arch_mm_table_from_pte(table[0])));
-	for (uint64_t i = 1; i < ENTRY_COUNT; ++i) {
-		EXPECT_THAT(subtable_a[i], Eq(ABSENT_ENTRY)) << "i=" << i;
+	ASSERT_TRUE(arch_mm_pte_is_table(table->entries[0], TOP_LEVEL));
+	struct mm_page_table *subtable_a =
+		page_table_from_pa(arch_mm_table_from_pte(table->entries[0]));
+	for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
+		EXPECT_THAT(subtable_a->entries[i], Eq(ABSENT_ENTRY))
+			<< "i=" << i;
 	}
-	ASSERT_TRUE(arch_mm_pte_is_table(subtable_a[0], TOP_LEVEL - 1));
-	pte_t *subtable_aa = (pte_t *)ptr_from_va(
-		va_from_pa(arch_mm_table_from_pte(subtable_a[0])));
-	for (uint64_t i = 1; i < ENTRY_COUNT; ++i) {
-		EXPECT_THAT(subtable_aa[i], Eq(ABSENT_ENTRY)) << "i=" << i;
+	ASSERT_TRUE(
+		arch_mm_pte_is_table(subtable_a->entries[0], TOP_LEVEL - 1));
+	struct mm_page_table *subtable_aa = page_table_from_pa(
+		arch_mm_table_from_pte(subtable_a->entries[0]));
+	for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
+		EXPECT_THAT(subtable_aa->entries[i], Eq(ABSENT_ENTRY))
+			<< "i=" << i;
 	}
-	EXPECT_TRUE(arch_mm_pte_is_block(subtable_aa[0], TOP_LEVEL - 2));
-	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(subtable_aa[0])), Eq(0));
+	EXPECT_TRUE(
+		arch_mm_pte_is_block(subtable_aa->entries[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(subtable_aa->entries[0])),
+		    Eq(0));
 }
 
 /** Mapping a range that is already mapped should be a no-op. */
@@ -261,7 +286,7 @@
 	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
 
 	/* Start with a full page table mapping everything. */
-	pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
+	struct mm_page_table *table = alloc_page_table();
 	init_blocks(table, TOP_LEVEL, pa_init(0), 0);
 	struct mm_ptable ptable;
 	ptable.table = pa_init((uintpaddr_t)table);
@@ -276,8 +301,8 @@
 	 * The table should still be full of blocks, with no subtables or
 	 * anything else.
 	 */
-	for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
-		EXPECT_TRUE(arch_mm_pte_is_block(table[i], TOP_LEVEL))
+	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
+		EXPECT_TRUE(arch_mm_pte_is_block(table->entries[i], TOP_LEVEL))
 			<< "i=" << i;
 	}
 }