Use explicit page pool in page table manipulation.

Change-Id: Ibc3c21f815dfae54a541581941e553f79caaaace
diff --git a/inc/hf/alloc.h b/inc/hf/alloc.h
deleted file mode 100644
index 02dde59..0000000
--- a/inc/hf/alloc.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright 2018 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <stddef.h>
-
-void halloc_init(size_t base, size_t size);
-void *halloc(size_t size);
-void hfree(void *ptr);
-void *halloc_aligned(size_t size, size_t align);
-void *halloc_aligned_nosync(size_t size, size_t align);
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 692fbac..94e3327 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -17,10 +17,12 @@
 #pragma once
 
 #include "hf/cpu.h"
+#include "hf/mpool.h"
 #include "hf/vm.h"
 
 #include "vmapi/hf/call.h"
 
+void api_init(struct mpool *ppool);
 int64_t api_vm_get_count(void);
 int64_t api_vcpu_get_count(uint32_t vm_id, const struct vcpu *current);
 struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
diff --git a/inc/hf/boot_params.h b/inc/hf/boot_params.h
index 7b486d5..db57d56 100644
--- a/inc/hf/boot_params.h
+++ b/inc/hf/boot_params.h
@@ -21,6 +21,7 @@
 #include "hf/arch/cpu.h"
 
 #include "hf/mm.h"
+#include "hf/mpool.h"
 
 #define MAX_MEM_RANGES 20
 
@@ -44,5 +45,5 @@
 	paddr_t initrd_end;
 };
 
-bool plat_get_boot_params(struct boot_params *p);
-bool plat_update_boot_params(struct boot_params_update *p);
+bool plat_get_boot_params(struct boot_params *p, struct mpool *ppool);
+bool plat_update_boot_params(struct boot_params_update *p, struct mpool *ppool);
diff --git a/inc/hf/fdt_handler.h b/inc/hf/fdt_handler.h
index 531290b..3f72218 100644
--- a/inc/hf/fdt_handler.h
+++ b/inc/hf/fdt_handler.h
@@ -19,11 +19,14 @@
 #include "hf/boot_params.h"
 #include "hf/fdt.h"
 #include "hf/mm.h"
+#include "hf/mpool.h"
 
-struct fdt_header *fdt_map(paddr_t fdt_addr, struct fdt_node *n);
-bool fdt_unmap(struct fdt_header *fdt);
+struct fdt_header *fdt_map(paddr_t fdt_addr, struct fdt_node *n,
+			   struct mpool *ppool);
+bool fdt_unmap(struct fdt_header *fdt, struct mpool *ppool);
 void fdt_find_memory_ranges(const struct fdt_node *root, struct boot_params *p);
 bool fdt_find_initrd(struct fdt_node *n, paddr_t *begin, paddr_t *end);
 
 /** Apply an update to the FDT. */
-bool fdt_patch(paddr_t fdt_addr, struct boot_params_update *p);
+bool fdt_patch(paddr_t fdt_addr, struct boot_params_update *p,
+	       struct mpool *ppool);
diff --git a/inc/hf/load.h b/inc/hf/load.h
index 794e752..cecd2f1 100644
--- a/inc/hf/load.h
+++ b/inc/hf/load.h
@@ -23,9 +23,10 @@
 #include "hf/cpio.h"
 #include "hf/memiter.h"
 #include "hf/mm.h"
+#include "hf/mpool.h"
 
 bool load_primary(const struct memiter *cpio, size_t kernel_arg,
-		  struct memiter *initrd);
+		  struct memiter *initrd, struct mpool *ppool);
 bool load_secondary(const struct memiter *cpio,
 		    const struct boot_params *params,
-		    struct boot_params_update *update);
+		    struct boot_params_update *update, struct mpool *ppool);
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 11a7753..472e0e3 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -24,6 +24,7 @@
 #include "hf/arch/mm.h"
 
 #include "hf/addr.h"
+#include "hf/mpool.h"
 
 /* Keep macro alignment */
 /* clang-format off */
@@ -71,24 +72,17 @@
 #define MM_MODE_SHARED  0x0040
 
 /**
- * This flag indicates that memory allocation must not use locks. This is
- * relevant in systems where interlocked operations are only available after
- * virtual memory is enabled.
- */
-#define MM_MODE_NOSYNC 0x0080
-
-/**
  * This flag indicates that the mapping is intended to be used in a first
  * stage translation table, which might have different encodings for the
  * attribute bits than the second stage table.
  */
-#define MM_MODE_STAGE1 0x0100
+#define MM_MODE_STAGE1 0x0080
 
 /**
  * This flag indicates that no TLB invalidations should be issued for the
  * changes in the page table.
  */
-#define MM_MODE_NOINVALIDATE 0x0200
+#define MM_MODE_NOINVALIDATE 0x0100
 
 /* clang-format on */
 
@@ -105,20 +99,22 @@
 	paddr_t root;
 };
 
-bool mm_ptable_init(struct mm_ptable *t, int mode);
-void mm_ptable_fini(struct mm_ptable *t, int mode);
+bool mm_ptable_init(struct mm_ptable *t, int mode, struct mpool *ppool);
+void mm_ptable_fini(struct mm_ptable *t, int mode, struct mpool *ppool);
 void mm_ptable_dump(struct mm_ptable *t, int mode);
-void mm_ptable_defrag(struct mm_ptable *t, int mode);
+void mm_ptable_defrag(struct mm_ptable *t, int mode, struct mpool *ppool);
 
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
-			int mode, ipaddr_t *ipa);
-bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode);
-bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode);
+			int mode, ipaddr_t *ipa, struct mpool *ppool);
+bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode,
+		 struct mpool *ppool);
+bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode, struct mpool *ppool);
 bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode);
 bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa);
 
-bool mm_init(void);
+bool mm_init(struct mpool *ppool);
 bool mm_cpu_init(void);
-void *mm_identity_map(paddr_t begin, paddr_t end, int mode);
-bool mm_unmap(paddr_t begin, paddr_t end, int mode);
-void mm_defrag(void);
+void *mm_identity_map(paddr_t begin, paddr_t end, int mode,
+		      struct mpool *ppool);
+bool mm_unmap(paddr_t begin, paddr_t end, int mode, struct mpool *ppool);
+void mm_defrag(struct mpool *ppool);
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index 69ceac7..6a140b8 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -18,6 +18,7 @@
 
 #include "hf/cpu.h"
 #include "hf/mm.h"
+#include "hf/mpool.h"
 
 enum mailbox_state {
 	/** There is no message in the mailbox. */
@@ -49,7 +50,7 @@
 	struct mailbox mailbox;
 };
 
-bool vm_init(uint32_t vcpu_count, struct vm **new_vm);
+bool vm_init(uint32_t vcpu_count, struct mpool *ppool, struct vm **new_vm);
 uint32_t vm_get_count(void);
 struct vm *vm_get(uint32_t id);
 void vm_start_vcpu(struct vm *vm, size_t index, ipaddr_t entry, uintreg_t arg);
diff --git a/project/reference b/project/reference
index f1aa19e..8e1da4f 160000
--- a/project/reference
+++ b/project/reference
@@ -1 +1 @@
-Subproject commit f1aa19eba6b8160cac7b4c4f2368b2885df2621c
+Subproject commit 8e1da4f139a409fad2086fb9456bfa836fba6c72
diff --git a/src/BUILD.gn b/src/BUILD.gn
index e768245..d3069ae 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -46,7 +46,6 @@
 # sharing.
 source_set("src_testable") {
   sources = [
-    "alloc.c",
     "api.c",
     "cpu.c",
     "fdt_handler.c",
diff --git a/src/alloc.c b/src/alloc.c
deleted file mode 100644
index c34af0c..0000000
--- a/src/alloc.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright 2018 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "hf/alloc.h"
-
-#include "hf/dlog.h"
-#include "hf/spinlock.h"
-
-static size_t alloc_base;
-static size_t alloc_limit;
-static struct spinlock alloc_lock = SPINLOCK_INIT;
-
-/**
- * Initializes the allocator.
- */
-void halloc_init(size_t base, size_t size)
-{
-	alloc_base = base;
-	alloc_limit = base + size;
-}
-
-/**
- * Allocates the requested amount of memory. Returns NULL when there isn't
- * enough free memory.
- */
-void *halloc(size_t size)
-{
-	return halloc_aligned(size, 2 * sizeof(size_t));
-}
-
-/**
- * Frees the provided memory.
- *
- * Currently unimplemented.
- */
-void hfree(void *ptr)
-{
-	dlog("Attempted to free pointer %p\n", ptr);
-}
-
-/**
- * Allocates the requested amount of memory, with the requested alignment.
- *
- * Alignment must be a power of two. Returns NULL when there isn't enough free
- * memory.
- */
-void *halloc_aligned(size_t size, size_t align)
-{
-	void *ret;
-
-	sl_lock(&alloc_lock);
-	ret = halloc_aligned_nosync(size, align);
-	sl_unlock(&alloc_lock);
-
-	return ret;
-}
-
-/**
- * Allocates the requested amount of memory, with the requested alignment, but
- * no synchronisation with other CPUs. The caller is responsible for serialising
- * all such calls.
- *
- * Alignment must be a power of two. Returns NULL when there isn't enough free
- * memory.
- */
-void *halloc_aligned_nosync(size_t size, size_t align)
-{
-	size_t begin;
-	size_t end;
-
-	begin = (alloc_base + align - 1) & ~(align - 1);
-	end = begin + size;
-
-	/* Check for overflows, and that there is enough free mem. */
-	if (end > begin && begin >= alloc_base && end <= alloc_limit) {
-		alloc_base = end;
-	} else {
-		begin = 0;
-	}
-
-	return (void *)begin;
-}
diff --git a/src/api.c b/src/api.c
index 1cba24f..96474a8 100644
--- a/src/api.c
+++ b/src/api.c
@@ -38,6 +38,17 @@
 	      "Currently, a page is mapped for the send and receive buffers so "
 	      "the maximum request is the size of a page.");
 
+static struct mpool api_ppool;
+
+/**
+ * Initialies the API page pool by taking ownership of the contents of the given
+ * page pool.
+ */
+void api_init(struct mpool *ppool)
+{
+	mpool_init_from(&api_ppool, ppool);
+}
+
 /**
  * Switches the physical CPU back to the corresponding vcpu of the primary VM.
  *
@@ -222,8 +233,8 @@
 	pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
 
 	/* Map the send page as read-only in the hypervisor address space. */
-	vm->mailbox.send =
-		mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
+	vm->mailbox.send = mm_identity_map(pa_send_begin, pa_send_end,
+					   MM_MODE_R, &api_ppool);
 	if (!vm->mailbox.send) {
 		ret = -1;
 		goto exit;
@@ -233,11 +244,11 @@
 	 * Map the receive page as writable in the hypervisor address space. On
 	 * failure, unmap the send page before returning.
 	 */
-	vm->mailbox.recv =
-		mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
+	vm->mailbox.recv = mm_identity_map(pa_recv_begin, pa_recv_end,
+					   MM_MODE_W, &api_ppool);
 	if (!vm->mailbox.recv) {
 		vm->mailbox.send = NULL;
-		mm_unmap(pa_send_begin, pa_send_end, 0);
+		mm_unmap(pa_send_begin, pa_send_end, 0, &api_ppool);
 		ret = -1;
 		goto exit;
 	}
diff --git a/src/fdt_handler.c b/src/fdt_handler.c
index cba2a4e..311dec7 100644
--- a/src/fdt_handler.c
+++ b/src/fdt_handler.c
@@ -192,13 +192,14 @@
 	/* TODO: Check for "reserved-memory" nodes. */
 }
 
-struct fdt_header *fdt_map(paddr_t fdt_addr, struct fdt_node *n)
+struct fdt_header *fdt_map(paddr_t fdt_addr, struct fdt_node *n,
+			   struct mpool *ppool)
 {
 	struct fdt_header *fdt;
 
 	/* Map the fdt header in. */
 	fdt = mm_identity_map(fdt_addr, pa_add(fdt_addr, fdt_header_size()),
-			      MM_MODE_R);
+			      MM_MODE_R, ppool);
 	if (!fdt) {
 		dlog("Unable to map FDT header.\n");
 		return NULL;
@@ -211,7 +212,7 @@
 
 	/* Map the rest of the fdt in. */
 	fdt = mm_identity_map(fdt_addr, pa_add(fdt_addr, fdt_total_size(fdt)),
-			      MM_MODE_R);
+			      MM_MODE_R, ppool);
 	if (!fdt) {
 		dlog("Unable to map full FDT.\n");
 		goto fail;
@@ -220,17 +221,19 @@
 	return fdt;
 
 fail:
-	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), 0);
+	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), 0, ppool);
 	return NULL;
 }
 
-bool fdt_unmap(struct fdt_header *fdt)
+bool fdt_unmap(struct fdt_header *fdt, struct mpool *ppool)
 {
 	paddr_t fdt_addr = pa_from_va(va_from_ptr(fdt));
-	return mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_total_size(fdt)), 0);
+	return mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_total_size(fdt)), 0,
+			ppool);
 }
 
-bool fdt_patch(paddr_t fdt_addr, struct boot_params_update *p)
+bool fdt_patch(paddr_t fdt_addr, struct boot_params_update *p,
+	       struct mpool *ppool)
 {
 	struct fdt_header *fdt;
 	struct fdt_node n;
@@ -239,7 +242,7 @@
 
 	/* Map the fdt header in. */
 	fdt = mm_identity_map(fdt_addr, pa_add(fdt_addr, fdt_header_size()),
-			      MM_MODE_R);
+			      MM_MODE_R, ppool);
 	if (!fdt) {
 		dlog("Unable to map FDT header.\n");
 		return false;
@@ -253,7 +256,7 @@
 	/* Map the fdt (+ a page) in r/w mode in preparation for updating it. */
 	fdt = mm_identity_map(fdt_addr,
 			      pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE),
-			      MM_MODE_R | MM_MODE_W);
+			      MM_MODE_R | MM_MODE_W, ppool);
 	if (!fdt) {
 		dlog("Unable to map FDT in r/w mode.\n");
 		goto err_unmap_fdt_header;
@@ -298,13 +301,14 @@
 out_unmap_fdt:
 	/* Unmap FDT. */
 	if (!mm_unmap(fdt_addr,
-		      pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE), 0)) {
+		      pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE), 0,
+		      ppool)) {
 		dlog("Unable to unmap writable FDT.\n");
 		return false;
 	}
 	return ret;
 
 err_unmap_fdt_header:
-	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), 0);
+	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), 0, ppool);
 	return false;
 }
diff --git a/src/fdt_handler_test.cc b/src/fdt_handler_test.cc
index c544454..5477b4e 100644
--- a/src/fdt_handler_test.cc
+++ b/src/fdt_handler_test.cc
@@ -14,17 +14,16 @@
  * limitations under the License.
  */
 
-extern "C" {
-#include "hf/fdt_handler.h"
+#include <gmock/gmock.h>
 
-#include "hf/alloc.h"
+extern "C" {
 #include "hf/boot_params.h"
+#include "hf/fdt_handler.h"
+#include "hf/mpool.h"
 }
 
 #include <memory>
 
-#include <gmock/gmock.h>
-
 namespace
 {
 using ::testing::Eq;
@@ -95,19 +94,22 @@
 
 TEST(fdt, find_memory_ranges)
 {
+	struct mpool ppool;
 	std::unique_ptr<uint8_t[]> test_heap(new uint8_t[TEST_HEAP_SIZE]);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-	ASSERT_TRUE(mm_init());
+
+	mpool_init(&ppool, sizeof(struct mm_page_table));
+	mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
+	ASSERT_TRUE(mm_init(&ppool));
 
 	struct fdt_header *fdt;
 	struct fdt_node n;
 	struct boot_params params = {};
 
-	fdt = fdt_map(pa_init((uintpaddr_t)&test_dtb), &n);
+	fdt = fdt_map(pa_init((uintpaddr_t)&test_dtb), &n, &ppool);
 	ASSERT_THAT(fdt, NotNull());
 	ASSERT_TRUE(fdt_find_child(&n, ""));
 	fdt_find_memory_ranges(&n, &params);
-	ASSERT_TRUE(fdt_unmap(fdt));
+	ASSERT_TRUE(fdt_unmap(fdt, &ppool));
 
 	EXPECT_THAT(params.mem_ranges_count, Eq(3));
 	EXPECT_THAT(pa_addr(params.mem_ranges[0].begin), Eq(0x00000000));
diff --git a/src/load.c b/src/load.c
index 1a894d7..3334726 100644
--- a/src/load.c
+++ b/src/load.c
@@ -38,12 +38,13 @@
  * disabled. When switching to the partitions, the caching is initially disabled
  * so the data must be available without the cache.
  */
-static bool copy_to_unmapped(paddr_t to, const void *from, size_t size)
+static bool copy_to_unmapped(paddr_t to, const void *from, size_t size,
+			     struct mpool *ppool)
 {
 	paddr_t to_end = pa_add(to, size);
 	void *ptr;
 
-	ptr = mm_identity_map(to, to_end, MM_MODE_W);
+	ptr = mm_identity_map(to, to_end, MM_MODE_W, ppool);
 	if (!ptr) {
 		return false;
 	}
@@ -51,7 +52,7 @@
 	memcpy(ptr, from, size);
 	arch_mm_write_back_dcache(ptr, size);
 
-	mm_unmap(to, to_end, 0);
+	mm_unmap(to, to_end, 0, ppool);
 
 	return true;
 }
@@ -106,7 +107,7 @@
  * Loads the primary VM.
  */
 bool load_primary(const struct memiter *cpio, uintreg_t kernel_arg,
-		  struct memiter *initrd)
+		  struct memiter *initrd, struct mpool *ppool)
 {
 	struct memiter it;
 	paddr_t primary_begin = layout_primary_begin();
@@ -117,7 +118,8 @@
 	}
 
 	dlog("Copying primary to %p\n", pa_addr(primary_begin));
-	if (!copy_to_unmapped(primary_begin, it.next, it.limit - it.next)) {
+	if (!copy_to_unmapped(primary_begin, it.next, it.limit - it.next,
+			      ppool)) {
 		dlog("Unable to relocate kernel for primary vm.\n");
 		return false;
 	}
@@ -130,7 +132,7 @@
 	{
 		struct vm *vm;
 
-		if (!vm_init(MAX_CPUS, &vm)) {
+		if (!vm_init(MAX_CPUS, ppool, &vm)) {
 			dlog("Unable to initialise primary vm\n");
 			return false;
 		}
@@ -147,13 +149,13 @@
 			    pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
 			    MM_MODE_R | MM_MODE_W | MM_MODE_X |
 				    MM_MODE_NOINVALIDATE,
-			    NULL)) {
+			    NULL, ppool)) {
 			dlog("Unable to initialise memory for primary vm\n");
 			return false;
 		}
 
-		if (!mm_vm_unmap_hypervisor(&vm->ptable,
-					    MM_MODE_NOINVALIDATE)) {
+		if (!mm_vm_unmap_hypervisor(&vm->ptable, MM_MODE_NOINVALIDATE,
+					    ppool)) {
 			dlog("Unable to unmap hypervisor from primary vm\n");
 			return false;
 		}
@@ -244,7 +246,7 @@
  */
 bool load_secondary(const struct memiter *cpio,
 		    const struct boot_params *params,
-		    struct boot_params_update *update)
+		    struct boot_params_update *update, struct mpool *ppool)
 {
 	struct vm *primary;
 	struct memiter it;
@@ -313,12 +315,12 @@
 		}
 
 		if (!copy_to_unmapped(secondary_mem_begin, kernel.next,
-				      kernel.limit - kernel.next)) {
+				      kernel.limit - kernel.next, ppool)) {
 			dlog("Unable to copy kernel\n");
 			continue;
 		}
 
-		if (!vm_init(cpu, &vm)) {
+		if (!vm_init(cpu, ppool, &vm)) {
 			dlog("Unable to initialise VM\n");
 			continue;
 		}
@@ -329,21 +331,22 @@
 				   pa_add(pa_init(PL011_BASE), PAGE_SIZE),
 				   MM_MODE_R | MM_MODE_W | MM_MODE_D |
 					   MM_MODE_NOINVALIDATE,
-				   NULL);
+				   NULL, ppool);
 
 		/* Grant the VM access to the memory. */
 		if (!mm_vm_identity_map(&vm->ptable, secondary_mem_begin,
 					secondary_mem_end,
 					MM_MODE_R | MM_MODE_W | MM_MODE_X |
 						MM_MODE_NOINVALIDATE,
-					&secondary_entry)) {
+					&secondary_entry, ppool)) {
 			dlog("Unable to initialise memory\n");
 			continue;
 		}
 
 		/* Deny the primary VM access to this memory. */
 		if (!mm_vm_unmap(&primary->ptable, secondary_mem_begin,
-				 secondary_mem_end, MM_MODE_NOINVALIDATE)) {
+				 secondary_mem_end, MM_MODE_NOINVALIDATE,
+				 ppool)) {
 			dlog("Unable to unmap secondary VM from primary VM\n");
 			return false;
 		}
diff --git a/src/main.c b/src/main.c
index e800a1a..a5c1f29 100644
--- a/src/main.c
+++ b/src/main.c
@@ -18,7 +18,6 @@
 #include <stddef.h>
 #include <stdnoreturn.h>
 
-#include "hf/alloc.h"
 #include "hf/api.h"
 #include "hf/boot_params.h"
 #include "hf/cpio.h"
@@ -26,12 +25,15 @@
 #include "hf/dlog.h"
 #include "hf/load.h"
 #include "hf/mm.h"
+#include "hf/mpool.h"
 #include "hf/std.h"
 #include "hf/vm.h"
 
 #include "vmapi/hf/call.h"
 
-char ptable_buf[PAGE_SIZE * HEAP_PAGES];
+alignas(sizeof(
+	struct mm_page_table)) char ptable_buf[sizeof(struct mm_page_table) *
+					       HEAP_PAGES];
 
 /**
  * Blocks the hypervisor.
@@ -67,17 +69,23 @@
 	struct memiter cpio;
 	void *initrd;
 	size_t i;
+	struct mpool ppool;
 
 	dlog_nosync("Initialising hafnium\n");
 
-	cpu_module_init();
-	halloc_init((size_t)ptable_buf, sizeof(ptable_buf));
+	mpool_init(&ppool, sizeof(struct mm_page_table));
+	mpool_add_chunk(&ppool, ptable_buf, sizeof(ptable_buf));
 
-	if (!mm_init()) {
+	cpu_module_init();
+
+	if (!mm_init(&ppool)) {
 		panic("mm_init failed");
 	}
 
-	if (!plat_get_boot_params(&params)) {
+	/* Enable locks now that mm is initialised. */
+	mpool_enable_locks();
+
+	if (!plat_get_boot_params(&params, &ppool)) {
 		panic("unable to retrieve boot params");
 	}
 
@@ -92,7 +100,7 @@
 
 	/* Map initrd in, and initialise cpio parser. */
 	initrd = mm_identity_map(params.initrd_begin, params.initrd_end,
-				 MM_MODE_R);
+				 MM_MODE_R, &ppool);
 	if (!initrd) {
 		panic("unable to map initrd in");
 	}
@@ -101,7 +109,7 @@
 		     pa_addr(params.initrd_end) - pa_addr(params.initrd_begin));
 
 	/* Load all VMs. */
-	if (!load_primary(&cpio, params.kernel_arg, &primary_initrd)) {
+	if (!load_primary(&cpio, params.kernel_arg, &primary_initrd, &ppool)) {
 		panic("unable to load primary VM");
 	}
 
@@ -112,16 +120,19 @@
 	update.initrd_begin = pa_from_va(va_from_ptr(primary_initrd.next));
 	update.initrd_end = pa_from_va(va_from_ptr(primary_initrd.limit));
 	update.reserved_ranges_count = 0;
-	if (!load_secondary(&cpio, &params, &update)) {
+	if (!load_secondary(&cpio, &params, &update, &ppool)) {
 		panic("unable to load secondary VMs");
 	}
 
 	/* Prepare to run by updating bootparams as seen by primary VM. */
-	if (!plat_update_boot_params(&update)) {
+	if (!plat_update_boot_params(&update, &ppool)) {
 		panic("plat_update_boot_params failed");
 	}
 
-	mm_defrag();
+	mm_defrag(&ppool);
+
+	/* Initialise the API page pool. ppool will be empty from now on. */
+	api_init(&ppool);
 
 	dlog("Hafnium initialisation completed\n");
 }
diff --git a/src/mm.c b/src/mm.c
index 8de4732..16a5a93 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -20,7 +20,6 @@
 #include <stdatomic.h>
 #include <stdint.h>
 
-#include "hf/alloc.h"
 #include "hf/dlog.h"
 #include "hf/layout.h"
 
@@ -48,11 +47,10 @@
 /* Keep macro alignment */
 /* clang-format off */
 
-#define MAP_FLAG_NOSYNC 0x01
-#define MAP_FLAG_COMMIT 0x02
-#define MAP_FLAG_UNMAP  0x04
-#define MAP_FLAG_NOBBM  0x08
-#define MAP_FLAG_STAGE1 0x10
+#define MAP_FLAG_COMMIT 0x01
+#define MAP_FLAG_UNMAP  0x02
+#define MAP_FLAG_NOBBM  0x04
+#define MAP_FLAG_STAGE1 0x08
 
 /* clang-format on */
 
@@ -131,16 +129,16 @@
 }
 
 /**
- * Allocate a new page table.
+ * Allocates a new page table.
  */
-static struct mm_page_table *mm_alloc_page_tables(size_t count, bool nosync)
+static struct mm_page_table *mm_alloc_page_tables(size_t count,
+						  struct mpool *ppool)
 {
-	size_t size_and_align = count * sizeof(struct mm_page_table);
-	if (nosync) {
-		return halloc_aligned_nosync(size_and_align, size_and_align);
+	if (count == 1) {
+		return mpool_alloc(ppool);
 	}
 
-	return halloc_aligned(size_and_align, size_and_align);
+	return mpool_alloc_contiguous(ppool, count, count);
 }
 
 /**
@@ -160,7 +158,7 @@
  * Frees all page-table-related memory associated with the given pte at the
  * given level, including any subtables recursively.
  */
-static void mm_free_page_pte(pte_t pte, uint8_t level)
+static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool)
 {
 	struct mm_page_table *table;
 	uint64_t i;
@@ -172,11 +170,11 @@
 	/* Recursively free any subtables. */
 	table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
 	for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		mm_free_page_pte(table->entries[i], level - 1);
+		mm_free_page_pte(table->entries[i], level - 1, ppool);
 	}
 
 	/* Free the table itself. */
-	hfree(table);
+	mpool_free(ppool, table);
 }
 
 /**
@@ -187,7 +185,7 @@
  * TLBs, which may result in issues for example in cache coherency.
  */
 static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
-			     uint8_t level, int flags)
+			     uint8_t level, int flags, struct mpool *ppool)
 {
 	pte_t v = *pte;
 
@@ -206,7 +204,7 @@
 	*pte = new_pte;
 
 	/* Free pages that aren't in use anymore. */
-	mm_free_page_pte(v, level);
+	mm_free_page_pte(v, level, ppool);
 }
 
 /**
@@ -217,7 +215,8 @@
  */
 static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
 						   pte_t *pte, uint8_t level,
-						   int flags)
+						   int flags,
+						   struct mpool *ppool)
 {
 	struct mm_page_table *ntable;
 	pte_t v = *pte;
@@ -232,7 +231,7 @@
 	}
 
 	/* Allocate a new table. */
-	ntable = mm_alloc_page_tables(1, flags & MAP_FLAG_NOSYNC);
+	ntable = mm_alloc_page_tables(1, ppool);
 	if (ntable == NULL) {
 		dlog("Failed to allocate memory for page table\n");
 		return NULL;
@@ -261,7 +260,7 @@
 	/* Replace the pte entry, doing a break-before-make if needed. */
 	mm_replace_entry(begin, pte,
 			 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
-			 level, flags);
+			 level, flags, ppool);
 
 	return ntable;
 }
@@ -293,7 +292,7 @@
  */
 static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
 			 uint64_t attrs, struct mm_page_table *table,
-			 uint8_t level, int flags)
+			 uint8_t level, int flags, struct mpool *ppool)
 {
 	pte_t *pte = &table->entries[mm_index(begin, level)];
 	ptable_addr_t level_end = mm_level_end(begin, level);
@@ -330,15 +329,15 @@
 					      : arch_mm_block_pte(level, pa,
 								  attrs);
 				mm_replace_entry(begin, pte, new_pte, level,
-						 flags);
+						 flags, ppool);
 			}
 		} else {
 			/*
 			 * If the entry is already a subtable get it; otherwise
 			 * replace it with an equivalent subtable and get that.
 			 */
-			struct mm_page_table *nt =
-				mm_populate_table_pte(begin, pte, level, flags);
+			struct mm_page_table *nt = mm_populate_table_pte(
+				begin, pte, level, flags, ppool);
 			if (nt == NULL) {
 				return false;
 			}
@@ -348,7 +347,7 @@
 			 * the subtable.
 			 */
 			if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
-					  flags)) {
+					  flags, ppool)) {
 				return false;
 			}
 
@@ -362,7 +361,7 @@
 			    mm_page_table_is_empty(nt, level - 1)) {
 				pte_t v = *pte;
 				*pte = arch_mm_absent_pte(level);
-				mm_free_page_pte(v, level);
+				mm_free_page_pte(v, level, ppool);
 			}
 		}
 
@@ -381,7 +380,7 @@
  */
 static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
 			ptable_addr_t end, uint64_t attrs, uint8_t root_level,
-			int flags)
+			int flags, struct mpool *ppool)
 {
 	size_t root_table_size = mm_entry_size(root_level);
 	struct mm_page_table *table =
@@ -389,7 +388,7 @@
 
 	while (begin < end) {
 		if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
-				  root_level - 1, flags)) {
+				  root_level - 1, flags, ppool)) {
 			return false;
 		}
 		begin = mm_start_of_next_block(begin, root_table_size);
@@ -405,11 +404,11 @@
  * provided.
  */
 static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
-				      paddr_t pa_end, int mode)
+				      paddr_t pa_end, int mode,
+				      struct mpool *ppool)
 {
 	uint64_t attrs = arch_mm_mode_to_attrs(mode);
-	int flags = (mode & MM_MODE_NOSYNC ? MAP_FLAG_NOSYNC : 0) |
-		    (mode & MM_MODE_NOINVALIDATE ? MAP_FLAG_NOBBM : 0) |
+	int flags = (mode & MM_MODE_NOINVALIDATE ? MAP_FLAG_NOBBM : 0) |
 		    (mode & MM_MODE_STAGE1 ? MAP_FLAG_STAGE1 : 0) |
 		    (mode & MM_MODE_INVALID && mode & MM_MODE_UNOWNED
 			     ? MAP_FLAG_UNMAP
@@ -439,9 +438,9 @@
 	 * state. In such a two-step implementation, the table may be left with
 	 * extra internal tables, but no different mapping on failure.
 	 */
-	if (!mm_map_root(t, begin, end, attrs, root_level, flags) ||
+	if (!mm_map_root(t, begin, end, attrs, root_level, flags, ppool) ||
 	    !mm_map_root(t, begin, end, attrs, root_level,
-			 flags | MAP_FLAG_COMMIT)) {
+			 flags | MAP_FLAG_COMMIT, ppool)) {
 		return false;
 	}
 
@@ -458,9 +457,10 @@
  * into the address space with the architecture-agnostic mode provided.
  */
 static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
-				   paddr_t pa_end, int mode)
+				   paddr_t pa_end, int mode,
+				   struct mpool *ppool)
 {
-	return mm_ptable_identity_update(t, pa_begin, pa_end, mode);
+	return mm_ptable_identity_update(t, pa_begin, pa_end, mode, ppool);
 }
 
 /**
@@ -468,10 +468,11 @@
  * mapped into the address space.
  */
 static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
-			    paddr_t pa_end, int mode)
+			    paddr_t pa_end, int mode, struct mpool *ppool)
 {
 	return mm_ptable_identity_update(
-		t, pa_begin, pa_end, mode | MM_MODE_UNOWNED | MM_MODE_INVALID);
+		t, pa_begin, pa_end, mode | MM_MODE_UNOWNED | MM_MODE_INVALID,
+		ppool);
 }
 
 /**
@@ -518,7 +519,8 @@
  * absent entry with which it can be replaced. Note that `entry` will no longer
  * be valid after calling this function as the subtable will have been freed.
  */
-static pte_t mm_table_pte_to_absent(pte_t entry, uint8_t level)
+static pte_t mm_table_pte_to_absent(pte_t entry, uint8_t level,
+				    struct mpool *ppool)
 {
 	struct mm_page_table *table =
 		mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
@@ -528,7 +530,7 @@
 	 * using mm_free_page_pte) because we know by this point that it
 	 * doesn't have any subtables of its own.
 	 */
-	hfree(table);
+	mpool_free(ppool, table);
 
 	/* Replace subtable with a single absent entry. */
 	return arch_mm_absent_pte(level);
@@ -540,7 +542,8 @@
  * `entry` will no longer be valid after calling this function as the subtable
  * may have been freed.
  */
-static pte_t mm_table_pte_to_block(pte_t entry, uint8_t level)
+static pte_t mm_table_pte_to_block(pte_t entry, uint8_t level,
+				   struct mpool *ppool)
 {
 	struct mm_page_table *table;
 	uint64_t block_attrs;
@@ -553,22 +556,21 @@
 	}
 
 	table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
-	/*
-	 * Replace subtable with a single block, with equivalent
-	 * attributes.
-	 */
+
+	/* Replace subtable with a single block, with equivalent attributes. */
 	block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
 	table_attrs = arch_mm_pte_attrs(entry, level);
 	combined_attrs =
 		arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
 	block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
+
 	/* Free the subtable. */
-	hfree(table);
+	mpool_free(ppool, table);
+
 	/*
-	 * We can assume that the block is aligned properly
-	 * because all virtual addresses are aligned by
-	 * definition, and we have a 1-1 mapping from virtual to
-	 * physical addresses.
+	 * We can assume that the block is aligned properly because all virtual
+	 * addresses are aligned by definition, and we have a 1-1 mapping from
+	 * virtual to physical addresses.
 	 */
 	return arch_mm_block_pte(level, block_address, combined_attrs);
 }
@@ -577,7 +579,8 @@
  * Defragment the given ptable entry by recursively replacing any tables with
  * block or absent entries where possible.
  */
-static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level)
+static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level,
+				    struct mpool *ppool)
 {
 	struct mm_page_table *table;
 	uint64_t i;
@@ -600,8 +603,8 @@
 		/*
 		 * First try to defrag the entry, in case it is a subtable.
 		 */
-		table->entries[i] =
-			mm_ptable_defrag_entry(table->entries[i], level - 1);
+		table->entries[i] = mm_ptable_defrag_entry(table->entries[i],
+							   level - 1, ppool);
 
 		if (arch_mm_pte_is_present(table->entries[i], level - 1)) {
 			all_absent_so_far = false;
@@ -617,10 +620,10 @@
 		}
 	}
 	if (identical_blocks_so_far) {
-		return mm_table_pte_to_block(entry, level);
+		return mm_table_pte_to_block(entry, level, ppool);
 	}
 	if (all_absent_so_far) {
-		return mm_table_pte_to_absent(entry, level);
+		return mm_table_pte_to_absent(entry, level, ppool);
 	}
 	return entry;
 }
@@ -629,7 +632,7 @@
  * Defragments the given page table by converting page table references to
  * blocks whenever possible.
  */
-void mm_ptable_defrag(struct mm_ptable *t, int mode)
+void mm_ptable_defrag(struct mm_ptable *t, int mode, struct mpool *ppool)
 {
 	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
 	uint8_t level = arch_mm_max_level(mode);
@@ -644,7 +647,7 @@
 	for (i = 0; i < root_table_count; ++i) {
 		for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
 			tables[i].entries[j] = mm_ptable_defrag_entry(
-				tables[i].entries[j], level);
+				tables[i].entries[j], level, ppool);
 		}
 	}
 }
@@ -705,14 +708,14 @@
 /**
  * Initialises the given page table.
  */
-bool mm_ptable_init(struct mm_ptable *t, int mode)
+bool mm_ptable_init(struct mm_ptable *t, int mode, struct mpool *ppool)
 {
 	uint8_t i;
 	size_t j;
 	struct mm_page_table *tables;
 	uint8_t root_table_count = arch_mm_root_table_count(mode);
 
-	tables = mm_alloc_page_tables(root_table_count, mode & MM_MODE_NOSYNC);
+	tables = mm_alloc_page_tables(root_table_count, ppool);
 	if (tables == NULL) {
 		return false;
 	}
@@ -734,7 +737,7 @@
 /**
  * Frees all memory associated with the give page table.
  */
-void mm_ptable_fini(struct mm_ptable *t, int mode)
+void mm_ptable_fini(struct mm_ptable *t, int mode, struct mpool *ppool)
 {
 	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
 	uint8_t level = arch_mm_max_level(mode);
@@ -744,11 +747,12 @@
 
 	for (i = 0; i < root_table_count; ++i) {
 		for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
-			mm_free_page_pte(tables[i].entries[j], level);
+			mm_free_page_pte(tables[i].entries[j], level, ppool);
 		}
 	}
 
-	hfree(tables);
+	mpool_add_chunk(ppool, tables,
+			sizeof(struct mm_page_table) * root_table_count);
 }
 
 /**
@@ -757,10 +761,10 @@
  * architecture-agnostic mode provided.
  */
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
-			int mode, ipaddr_t *ipa)
+			int mode, ipaddr_t *ipa, struct mpool *ppool)
 {
-	bool success =
-		mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1);
+	bool success = mm_ptable_identity_map(t, begin, end,
+					      mode & ~MM_MODE_STAGE1, ppool);
 
 	if (success && ipa != NULL) {
 		*ipa = ipa_from_pa(begin);
@@ -773,21 +777,24 @@
  * Updates the VM's table such that the given physical address range is not
  * mapped in the address space.
  */
-bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode)
+bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode,
+		 struct mpool *ppool)
 {
-	return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1);
+	return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1, ppool);
 }
 
 /**
  * Unmaps the hypervisor pages from the given page table.
  */
-bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode)
+bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode, struct mpool *ppool)
 {
 	/* TODO: If we add pages dynamically, they must be included here too. */
-	return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), mode) &&
-	       mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(),
-			   mode) &&
-	       mm_vm_unmap(t, layout_data_begin(), layout_data_end(), mode);
+	return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), mode,
+			   ppool) &&
+	       mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(), mode,
+			   ppool) &&
+	       mm_vm_unmap(t, layout_data_begin(), layout_data_end(), mode,
+			   ppool);
 }
 
 /**
@@ -820,10 +827,10 @@
  * is mapped into the address space at the corresponding address range in the
  * architecture-agnostic mode provided.
  */
-void *mm_identity_map(paddr_t begin, paddr_t end, int mode)
+void *mm_identity_map(paddr_t begin, paddr_t end, int mode, struct mpool *ppool)
 {
-	if (mm_ptable_identity_map(&ptable, begin, end,
-				   mode | MM_MODE_STAGE1)) {
+	if (mm_ptable_identity_map(&ptable, begin, end, mode | MM_MODE_STAGE1,
+				   ppool)) {
 		return ptr_from_va(va_from_pa(begin));
 	}
 
@@ -834,15 +841,16 @@
  * Updates the hypervisor table such that the given physical address range is
  * not mapped in the address space.
  */
-bool mm_unmap(paddr_t begin, paddr_t end, int mode)
+bool mm_unmap(paddr_t begin, paddr_t end, int mode, struct mpool *ppool)
 {
-	return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
+	return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1,
+			       ppool);
 }
 
 /**
  * Initialises memory management for the hypervisor itself.
  */
-bool mm_init(void)
+bool mm_init(struct mpool *ppool)
 {
 	dlog_nosync("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()),
 		    pa_addr(layout_text_end()));
@@ -851,27 +859,27 @@
 	dlog_nosync("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()),
 		    pa_addr(layout_data_end()));
 
-	if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
+	if (!mm_ptable_init(&ptable, MM_MODE_STAGE1, ppool)) {
 		dlog_nosync("Unable to allocate memory for page table.\n");
 		return false;
 	}
 
 	/* Map page for uart. */
 	/* TODO: We may not want to map this. */
-	mm_ptable_identity_map(&ptable, pa_init(PL011_BASE),
-			       pa_add(pa_init(PL011_BASE), PAGE_SIZE),
-			       MM_MODE_R | MM_MODE_W | MM_MODE_D |
-				       MM_MODE_NOSYNC | MM_MODE_STAGE1);
+	mm_ptable_identity_map(
+		&ptable, pa_init(PL011_BASE),
+		pa_add(pa_init(PL011_BASE), PAGE_SIZE),
+		MM_MODE_R | MM_MODE_W | MM_MODE_D | MM_MODE_STAGE1, ppool);
 
 	/* Map each section. */
-	mm_identity_map(layout_text_begin(), layout_text_end(),
-			MM_MODE_X | MM_MODE_NOSYNC);
+	mm_identity_map(layout_text_begin(), layout_text_end(), MM_MODE_X,
+			ppool);
 
-	mm_identity_map(layout_rodata_begin(), layout_rodata_end(),
-			MM_MODE_R | MM_MODE_NOSYNC);
+	mm_identity_map(layout_rodata_begin(), layout_rodata_end(), MM_MODE_R,
+			ppool);
 
 	mm_identity_map(layout_data_begin(), layout_data_end(),
-			MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
+			MM_MODE_R | MM_MODE_W, ppool);
 
 	return arch_mm_init(ptable.root, true);
 }
@@ -884,7 +892,7 @@
 /**
  * Defragments the hypervisor page table.
  */
-void mm_defrag(void)
+void mm_defrag(struct mpool *ppool)
 {
-	mm_ptable_defrag(&ptable, MM_MODE_STAGE1);
+	mm_ptable_defrag(&ptable, MM_MODE_STAGE1, ppool);
 }
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 2a952dc..69164f4 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -14,12 +14,13 @@
  * limitations under the License.
  */
 
-extern "C" {
-#include "hf/mm.h"
+#include <gmock/gmock.h>
 
+extern "C" {
 #include "hf/arch/mm.h"
 
-#include "hf/alloc.h"
+#include "hf/mm.h"
+#include "hf/mpool.h"
 }
 
 #include <limits>
@@ -27,8 +28,6 @@
 #include <span>
 #include <vector>
 
-#include <gmock/gmock.h>
-
 namespace
 {
 using namespace ::std::placeholders;
@@ -87,10 +86,14 @@
 		 * sanitizers are more effective.
 		 */
 		test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-		halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
+		mpool_init(&ppool, sizeof(struct mm_page_table));
+		mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
 	}
 
 	std::unique_ptr<uint8_t[]> test_heap;
+
+       protected:
+	struct mpool ppool;
 };
 
 /**
@@ -100,11 +103,11 @@
 {
 	constexpr int mode = MM_MODE_STAGE1;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(1), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -114,11 +117,11 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -130,9 +133,9 @@
 	const paddr_t page_begin = pa_init(0);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
-				       nullptr));
+				       nullptr, &ppool));
 
 	auto tables = get_ptable(ptable, mode);
 	EXPECT_THAT(tables, SizeIs(4));
@@ -160,7 +163,7 @@
 	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
 		    Eq(pa_addr(page_begin)));
 
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -174,9 +177,9 @@
 	const paddr_t map_end = pa_add(map_begin, 268);
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	ASSERT_TRUE(
-		mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa,
+				       &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
 
 	auto tables = get_ptable(ptable, mode);
@@ -207,7 +210,7 @@
 						   TOP_LEVEL - 2)),
 		    Eq(0x200'0000'0000 - PAGE_SIZE));
 
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -219,9 +222,9 @@
 	const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
 	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	ASSERT_TRUE(
-		mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
+				       nullptr, &ppool));
 
 	auto tables = get_ptable(ptable, mode);
 	EXPECT_THAT(tables, SizeIs(4));
@@ -270,7 +273,7 @@
 		pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
 		Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
 
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -280,9 +283,9 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	auto tables = get_ptable(ptable, mode);
 	EXPECT_THAT(
 		tables,
@@ -297,7 +300,7 @@
 				<< "i=" << i << " j=" << j;
 		}
 	}
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -309,17 +312,17 @@
 	constexpr int mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
-				       mode, &ipa));
+				       mode, &ipa, &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(0));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -331,14 +334,14 @@
 	constexpr int mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
-				       pa_init(0x5000), mode, &ipa));
+				       pa_init(0x5000), mode, &ipa, &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -353,12 +356,12 @@
 	constexpr int mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
-				       &ipa));
+				       &ipa, &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(20));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa, mode));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -373,15 +376,16 @@
 	constexpr int mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(
 		&ptable, pa_init(0),
-		pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa));
+		pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa,
+		&ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(0));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -392,15 +396,15 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
 				       pa_init(0xf32'0000'0000'0000), mode,
-				       nullptr));
+				       nullptr, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -412,14 +416,15 @@
 	constexpr int mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	ASSERT_TRUE(mm_vm_identity_map(
-		&ptable, VM_MEM_END, pa_init(0xf0'0000'0000'0000), mode, &ipa));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
+				       pa_init(0xf0'0000'0000'0000), mode, &ipa,
+				       &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, 0);
+	mm_ptable_fini(&ptable, 0, &ppool);
 }
 
 /**
@@ -432,16 +437,16 @@
 	const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -454,12 +459,12 @@
 	const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode));
+				       nullptr, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	EXPECT_THAT(get_ptable(ptable, mode),
 		    AllOf(SizeIs(4),
 			  Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
@@ -468,7 +473,7 @@
 				  arch_mm_pte_is_block, _1, TOP_LEVEL)))),
 			  Contains(Contains(Truly(std::bind(
 				  arch_mm_pte_is_table, _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -478,12 +483,12 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -493,13 +498,13 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	EXPECT_TRUE(
-		mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode,
+				&ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -513,16 +518,16 @@
 	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
 	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	ASSERT_TRUE(
-		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
-	ASSERT_TRUE(
-		mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr));
-	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
+				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
+				       &ppool));
+	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -534,15 +539,15 @@
 	const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
 	const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	ASSERT_TRUE(
-		mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
+				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
-				pa_add(map_begin, 99), mode));
+				pa_add(map_begin, 99), mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -554,14 +559,14 @@
 	const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
 	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	ASSERT_TRUE(
-		mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
+				       nullptr, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -571,16 +576,16 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
-				mode));
+				mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -591,16 +596,16 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
-				mode));
+				mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -616,15 +621,15 @@
 	const paddr_t page_begin = pa_init(0x180'0000'0000);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
-				pa_add(page_begin, 50), mode));
+				pa_add(page_begin, 50), mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -638,17 +643,18 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr));
-	ASSERT_TRUE(mm_vm_unmap(
-		&ptable, pa_init(0),
-		pa_init(std::numeric_limits<uintpaddr_t>::max()), mode));
+				       nullptr, &ppool));
+	ASSERT_TRUE(
+		mm_vm_unmap(&ptable, pa_init(0),
+			    pa_init(std::numeric_limits<uintpaddr_t>::max()),
+			    mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -662,17 +668,17 @@
 	const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
 	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	ASSERT_TRUE(
-		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
-	ASSERT_TRUE(
-		mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
+				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
+				       &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, MM_MODE_STAGE1);
+	mm_ptable_fini(&ptable, MM_MODE_STAGE1, &ppool);
 }
 
 /**
@@ -682,11 +688,11 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344), mode));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073), mode));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -696,13 +702,13 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3), mode));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b), mode));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -714,14 +720,14 @@
 	const paddr_t page_begin = pa_init(0x100'0000'0000);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin), mode));
 	EXPECT_TRUE(mm_vm_is_mapped(
 		&ptable, ipa_from_pa(pa_add(page_begin, 127)), mode));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end), mode));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -731,16 +737,16 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr));
+				       nullptr, &ppool));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END), mode));
 	EXPECT_FALSE(
 		mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123), mode));
 	EXPECT_FALSE(mm_vm_is_mapped(
 		&ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max()),
 		mode));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -750,12 +756,12 @@
 {
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	mm_ptable_defrag(&ptable, mode);
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	mm_ptable_defrag(&ptable, mode, &ppool);
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -770,18 +776,18 @@
 	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
 	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
-	ASSERT_TRUE(
-		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
-	ASSERT_TRUE(
-		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode));
-	mm_ptable_defrag(&ptable, 0);
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
+				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
+				       &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
+	mm_ptable_defrag(&ptable, 0, &ppool);
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 /**
@@ -795,18 +801,20 @@
 	const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
 	const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr));
-	mm_ptable_defrag(&ptable, 0);
+				       nullptr, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr,
+				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr,
+				       &ppool));
+	mm_ptable_defrag(&ptable, 0, &ppool);
 	EXPECT_THAT(
 		get_ptable(ptable, mode),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
 							   _1, TOP_LEVEL))))));
-	mm_ptable_fini(&ptable, mode);
+	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
 } /* namespace */
diff --git a/src/plat.c b/src/plat.c
index c6c1018..d7d0566 100644
--- a/src/plat.c
+++ b/src/plat.c
@@ -38,8 +38,10 @@
  * by the loader.
  */
 #pragma weak plat_get_initrd_range
-void plat_get_initrd_range(paddr_t *begin, paddr_t *end)
+void plat_get_initrd_range(paddr_t *begin, paddr_t *end, struct mpool *ppool)
 {
+	(void)ppool;
+
 	*begin = layout_initrd_begin();
 	*end = layout_initrd_end();
 }
@@ -61,17 +63,17 @@
  * initrd is provided separately.
  */
 #pragma weak plat_get_boot_params
-bool plat_get_boot_params(struct boot_params *p)
+bool plat_get_boot_params(struct boot_params *p, struct mpool *ppool)
 {
 	struct fdt_header *fdt;
 	struct fdt_node n;
 	bool ret = false;
 
-	plat_get_initrd_range(&p->initrd_begin, &p->initrd_end);
+	plat_get_initrd_range(&p->initrd_begin, &p->initrd_end, ppool);
 	p->kernel_arg = plat_get_kernel_arg();
 
 	/* Get the memory map from the FDT. */
-	fdt = fdt_map(plat_get_fdt_addr(), &n);
+	fdt = fdt_map(plat_get_fdt_addr(), &n, ppool);
 	if (!fdt) {
 		return false;
 	}
@@ -87,7 +89,7 @@
 	ret = true;
 
 out_unmap_fdt:
-	if (!fdt_unmap(fdt)) {
+	if (!fdt_unmap(fdt, ppool)) {
 		dlog("Unable to unmap fdt.");
 		return false;
 	}
@@ -106,7 +108,7 @@
  * another loader can load the data for it.
  */
 #pragma weak plat_update_boot_params
-bool plat_update_boot_params(struct boot_params_update *p)
+bool plat_update_boot_params(struct boot_params_update *p, struct mpool *ppool)
 {
-	return fdt_patch(plat_get_fdt_addr(), p);
+	return fdt_patch(plat_get_fdt_addr(), p, ppool);
 }
diff --git a/src/vm.c b/src/vm.c
index 824f1b7..f1fcbb6 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -25,7 +25,7 @@
 static struct vm vms[MAX_VMS];
 static uint32_t vm_count;
 
-bool vm_init(uint32_t vcpu_count, struct vm **new_vm)
+bool vm_init(uint32_t vcpu_count, struct mpool *ppool, struct vm **new_vm)
 {
 	uint32_t i;
 	struct vm *vm;
@@ -50,7 +50,7 @@
 	++vm_count;
 	*new_vm = vm;
 
-	return mm_ptable_init(&vm->ptable, 0);
+	return mm_ptable_init(&vm->ptable, 0, ppool);
 }
 
 uint32_t vm_get_count(void)