Put return parameter last and add more documentation.

This makes the parallel between mm_vm_identity_prepare and
mm_vm_identity_commit clearer.

Change-Id: Ieee0065ee1dd66648a85b03940a865ca84c06dc7
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 69fa78a..1a0bfd6 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -108,11 +108,11 @@
 bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
 void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
-			uint32_t mode, ipaddr_t *ipa, struct mpool *ppool);
+			uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
 bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
 			    uint32_t mode, struct mpool *ppool);
 void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
-			   uint32_t mode, ipaddr_t *ipa, struct mpool *ppool);
+			   uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
 		 struct mpool *ppool);
 bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool);
diff --git a/src/api.c b/src/api.c
index 19cbbd1..163f3dd 100644
--- a/src/api.c
+++ b/src/api.c
@@ -717,14 +717,14 @@
 	if (!mm_vm_identity_map(
 		    &vm_locked.vm->ptable, pa_send_begin, pa_send_end,
 		    MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W,
-		    NULL, &local_page_pool)) {
+		    &local_page_pool, NULL)) {
 		goto fail;
 	}
 
 	if (!mm_vm_identity_map(&vm_locked.vm->ptable, pa_recv_begin,
 				pa_recv_end,
 				MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R,
-				NULL, &local_page_pool)) {
+				&local_page_pool, NULL)) {
 		/* TODO: partial defrag of failed range. */
 		/* Recover any memory consumed in failed mapping. */
 		mm_vm_defrag(&vm_locked.vm->ptable, &local_page_pool);
@@ -746,13 +746,13 @@
 	 */
 fail_undo_send_and_recv:
 	CHECK(mm_vm_identity_map(&vm_locked.vm->ptable, pa_recv_begin,
-				 pa_recv_end, orig_recv_mode, NULL,
-				 &local_page_pool));
+				 pa_recv_end, orig_recv_mode, &local_page_pool,
+				 NULL));
 
 fail_undo_send:
 	CHECK(mm_vm_identity_map(&vm_locked.vm->ptable, pa_send_begin,
-				 pa_send_end, orig_send_mode, NULL,
-				 &local_page_pool));
+				 pa_send_end, orig_send_mode, &local_page_pool,
+				 NULL));
 
 fail:
 	ret = false;
@@ -1539,7 +1539,7 @@
 	 * the recipient.
 	 */
 	if (!mm_vm_identity_map(&from->ptable, pa_begin, pa_end, from_mode,
-				NULL, &local_page_pool)) {
+				&local_page_pool, NULL)) {
 		ret = spci_error(SPCI_NO_MEMORY);
 		goto out;
 	}
@@ -1551,15 +1551,15 @@
 
 		/* Return memory to the sender. */
 		CHECK(mm_vm_identity_map(&from->ptable, pa_begin, pa_end,
-					 orig_from_mode, NULL,
-					 &local_page_pool));
+					 orig_from_mode, &local_page_pool,
+					 NULL));
 
 		goto out;
 	}
 
 	/* Complete the transfer by mapping the memory into the recipient. */
-	if (!mm_vm_identity_map(&to->ptable, pa_begin, pa_end, to_mode, NULL,
-				&local_page_pool)) {
+	if (!mm_vm_identity_map(&to->ptable, pa_begin, pa_end, to_mode,
+				&local_page_pool, NULL)) {
 		/* TODO: partial defrag of failed range. */
 		/* Recover any memory consumed in failed mapping. */
 		mm_vm_defrag(&from->ptable, &local_page_pool);
@@ -1567,8 +1567,8 @@
 		ret = spci_error(SPCI_NO_MEMORY);
 
 		CHECK(mm_vm_identity_map(&from->ptable, pa_begin, pa_end,
-					 orig_from_mode, NULL,
-					 &local_page_pool));
+					 orig_from_mode, &local_page_pool,
+					 NULL));
 
 		goto out;
 	}
diff --git a/src/load.c b/src/load.c
index 5c7fa2a..bd4879d 100644
--- a/src/load.c
+++ b/src/load.c
@@ -149,8 +149,8 @@
 	 */
 	if (!mm_vm_identity_map(&vm->ptable, pa_init(0),
 				pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
-				MM_MODE_R | MM_MODE_W | MM_MODE_D, NULL,
-				ppool)) {
+				MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool,
+				NULL)) {
 		dlog("Unable to initialise address space for primary vm\n");
 		return false;
 	}
@@ -160,7 +160,7 @@
 		if (!mm_vm_identity_map(
 			    &vm->ptable, params->mem_ranges[i].begin,
 			    params->mem_ranges[i].end,
-			    MM_MODE_R | MM_MODE_W | MM_MODE_X, NULL, ppool)) {
+			    MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool, NULL)) {
 			dlog("Unable to initialise memory for primary vm\n");
 			return false;
 		}
@@ -207,8 +207,8 @@
 
 	/* Grant the VM access to the memory. */
 	if (!mm_vm_identity_map(&vm->ptable, mem_begin, mem_end,
-				MM_MODE_R | MM_MODE_W | MM_MODE_X,
-				&secondary_entry, ppool)) {
+				MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
+				&secondary_entry)) {
 		dlog("Unable to initialise memory.\n");
 		return false;
 	}
diff --git a/src/mm.c b/src/mm.c
index 30f4ca7..7fd2151 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -851,6 +851,8 @@
  * See `mm_ptable_identity_prepare`.
  *
  * This must be called before `mm_vm_identity_commit` for the same mapping.
+ *
+ * Returns true on success, or false if the update would fail.
  */
 bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
 			    uint32_t mode, struct mpool *ppool)
@@ -868,7 +870,7 @@
  * `mm_vm_identity_prepare` must be called before this for the same mapping.
  */
 void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
-			   uint32_t mode, ipaddr_t *ipa, struct mpool *ppool)
+			   uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
 {
 	int flags = mm_mode_to_flags(mode);
 
@@ -885,9 +887,17 @@
  * Updates a VM's page table such that the given physical address range is
  * mapped in the address space at the corresponding address range in the
  * architecture-agnostic mode provided.
+ *
+ * mm_vm_defrag should always be called after a series of page table updates,
+ * whether they succeed or fail. This is because on failure extra page table
+ * entries may have been allocated and then not used, while on success it may be
+ * possible to compact the page table by merging several entries into a block.
+ *
+ * Returns true on success, or false if the update failed and no changes were
+ * made.
  */
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
-			uint32_t mode, ipaddr_t *ipa, struct mpool *ppool)
+			uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
 {
 	int flags = mm_mode_to_flags(mode);
 	bool success = mm_ptable_identity_update(
@@ -910,7 +920,7 @@
 {
 	uint32_t mode = MM_MODE_UNMAPPED_MASK;
 
-	return mm_vm_identity_map(t, begin, end, mode, NULL, ppool);
+	return mm_vm_identity_map(t, begin, end, mode, ppool, NULL);
 }
 
 /**
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 5989dfc..839a6e7 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -144,7 +144,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 
 	auto tables = get_ptable(ptable);
 	EXPECT_THAT(tables, SizeIs(4));
@@ -187,8 +187,8 @@
 	ipaddr_t ipa = ipa_init(-1);
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa,
-				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
+				       &ppool, &ipa));
 	EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
 
 	auto tables = get_ptable(ptable);
@@ -233,7 +233,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 
 	auto tables = get_ptable(ptable);
 	EXPECT_THAT(tables, SizeIs(4));
@@ -294,7 +294,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	auto tables = get_ptable(ptable);
 	EXPECT_THAT(
 		tables,
@@ -323,9 +323,9 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
-				       mode, &ipa, &ppool));
+				       mode, &ppool, &ipa));
 	EXPECT_THAT(ipa_addr(ipa), Eq(0));
 	EXPECT_THAT(
 		get_ptable(ptable),
@@ -345,7 +345,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
-				       pa_init(0x5000), mode, &ipa, &ppool));
+				       pa_init(0x5000), mode, &ppool, &ipa));
 	EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
 	EXPECT_THAT(
 		get_ptable(ptable),
@@ -367,7 +367,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
-				       &ipa, &ppool));
+				       &ppool, &ipa));
 	EXPECT_THAT(ipa_addr(ipa), Eq(20));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
 	mm_vm_fini(&ptable, &ppool);
@@ -388,8 +388,8 @@
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(
 		&ptable, pa_init(0),
-		pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa,
-		&ppool));
+		pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool,
+		&ipa));
 	EXPECT_THAT(ipa_addr(ipa), Eq(0));
 	EXPECT_THAT(
 		get_ptable(ptable),
@@ -408,7 +408,7 @@
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
 				       pa_init(0xf32'0000'0000'0000), mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
@@ -427,8 +427,8 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
-				       pa_init(0xf0'0000'0000'0000), mode, &ipa,
-				       &ppool));
+				       pa_init(0xf0'0000'0000'0000), mode,
+				       &ppool, &ipa));
 	EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
 	EXPECT_THAT(
 		get_ptable(ptable),
@@ -448,9 +448,9 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
@@ -470,10 +470,10 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	EXPECT_THAT(get_ptable(ptable),
 		    AllOf(SizeIs(4),
 			  Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
@@ -498,12 +498,12 @@
 	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
-				       &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
-				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
+				       nullptr));
 	EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
-				       MM_MODE_UNMAPPED_MASK, nullptr, &ppool));
+				       MM_MODE_UNMAPPED_MASK, &ppool, nullptr));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -522,8 +522,8 @@
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
 					   &ppool));
-	mm_vm_identity_commit(&ptable, page_begin, page_end, mode, nullptr,
-			      &ppool);
+	mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool,
+			      nullptr);
 
 	auto tables = get_ptable(ptable);
 	EXPECT_THAT(tables, SizeIs(4));
@@ -570,10 +570,10 @@
 					   mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
 					   &ppool));
-	mm_vm_identity_commit(&ptable, first_begin, first_end, mode, nullptr,
-			      &ppool);
-	mm_vm_identity_commit(&ptable, last_begin, last_end, mode, nullptr,
-			      &ppool);
+	mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool,
+			      nullptr);
+	mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool,
+			      nullptr);
 
 	auto tables = get_ptable(ptable);
 	EXPECT_THAT(tables, SizeIs(4));
@@ -642,10 +642,10 @@
 					   &ppool));
 	ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
 					   &ppool));
-	mm_vm_identity_commit(&ptable, high_begin, map_end, mode, nullptr,
-			      &ppool);
-	mm_vm_identity_commit(&ptable, low_begin, map_end, mode, nullptr,
-			      &ppool);
+	mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool,
+			      nullptr);
+	mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool,
+			      nullptr);
 
 	auto tables = get_ptable(ptable);
 	EXPECT_THAT(tables, SizeIs(4));
@@ -738,10 +738,10 @@
 	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
-				       &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
-				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
+				       nullptr));
 	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
@@ -761,7 +761,7 @@
 
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
 				pa_add(map_begin, 99), &ppool));
 
@@ -808,7 +808,7 @@
 
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
 
 	auto tables = get_ptable(ptable);
@@ -860,7 +860,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
 				&ppool));
 	EXPECT_THAT(
@@ -880,7 +880,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
 				&ppool));
 	EXPECT_THAT(
@@ -905,7 +905,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
 				pa_add(page_begin, 50), &ppool));
 
@@ -947,7 +947,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(
 		&ptable, pa_init(0),
 		pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
@@ -970,10 +970,10 @@
 	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
-				       &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
-				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
+				       nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
 	EXPECT_THAT(get_ptable(ptable),
@@ -1004,7 +1004,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
@@ -1022,7 +1022,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
 	EXPECT_TRUE(
 		mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
@@ -1039,7 +1039,7 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
 	EXPECT_FALSE(mm_vm_is_mapped(
@@ -1090,7 +1090,7 @@
 	uint32_t read_mode;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 
 	read_mode = 0;
 	EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
@@ -1119,7 +1119,7 @@
 	uint32_t read_mode;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
 				    ipa_from_pa(pa_add(VM_MEM_END, 1)),
 				    &read_mode));
@@ -1158,10 +1158,10 @@
 	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
-				       &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
-				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
+				       nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
 	mm_vm_defrag(&ptable, &ppool);
@@ -1184,12 +1184,12 @@
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
-				       nullptr, &ppool));
+				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr,
-				       &ppool));
-	ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr,
-				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool,
+				       nullptr));
 	mm_vm_defrag(&ptable, &ppool);
 	EXPECT_THAT(
 		get_ptable(ptable),