Initial IOMMU driver hooks.

This is intentially stripped back to initialization and a hook to the
driver when the VM's memory map is changing. This will work as the
baseline from which changes can be made to fit the needs of the drivers.

Change-Id: I904279f511e2d6e4b1c062fb49a2892042b79005
diff --git a/Makefile b/Makefile
index b2fa20f..52fa980 100644
--- a/Makefile
+++ b/Makefile
@@ -46,6 +46,7 @@
 	--ignore BRACES,SPDX_LICENSE_TAG,VOLATILE,SPLIT_STRING,AVOID_EXTERNS,USE_SPINLOCK_T,NEW_TYPEDEFS,INITIALISED_STATIC,FILE_PATH_CHANGES,EMBEDDED_FUNCTION_NAME,SINGLE_STATEMENT_DO_WHILE_MACRO,MACRO_WITH_FLOW_CONTROL --quiet
 
 # Specifies the grep pattern for ignoring specific files in checkpatch.
+# C++ headers, *.hh, are automatically excluded.
 # Separate the different items in the list with a grep or (\|).
 # debug_el1.c : uses XMACROS, which checkpatch doesn't understand.
 # perfmon.c : uses XMACROS, which checkpatch doesn't understand.
diff --git a/build/toolchain/BUILD.gn b/build/toolchain/BUILD.gn
index cdceaf9..07a35e2 100644
--- a/build/toolchain/BUILD.gn
+++ b/build/toolchain/BUILD.gn
@@ -41,5 +41,6 @@
     plat_arch = "fake"
     plat_boot_flow = "//src/arch/fake:boot_flow"
     plat_console = "//src/arch/fake:console"
+    plat_iommu = "//src/iommu:absent"
   }
 }
diff --git a/build/toolchain/embedded.gni b/build/toolchain/embedded.gni
index 31f7894..fa4aeae 100644
--- a/build/toolchain/embedded.gni
+++ b/build/toolchain/embedded.gni
@@ -293,6 +293,7 @@
     toolchain_args = {
       plat_boot_flow = invoker.boot_flow
       plat_console = invoker.console
+      plat_iommu = invoker.iommu
       forward_variables_from(invoker.toolchain_args, "*")
     }
   }
@@ -314,6 +315,7 @@
                              "origin_address",
                              "boot_flow",
                              "console",
+                             "iommu",
                              "gic_version",
                              "gicd_base_address",
                              "gicr_base_address",
@@ -342,6 +344,7 @@
     cpu = "${invoker.cpu}+fp"
     boot_flow = "//src/arch/fake:boot_flow"
     console = "//src/arch/aarch64/hftest:console"
+    iommu = "//src/iommu:absent"
 
     # Nonsense values because they are required but shouldn't be used.
     heap_pages = 0
diff --git a/build/toolchain/host.gni b/build/toolchain/host.gni
index 535ec61..feffa11 100644
--- a/build/toolchain/host.gni
+++ b/build/toolchain/host.gni
@@ -152,6 +152,7 @@
         plat_arch = "fake"
         plat_boot_flow = "//src/arch/fake:boot_flow"
         plat_console = "//src/arch/fake:console"
+        plat_iommu = "//src/iommu:absent"
         plat_heap_pages = invoker.heap_pages
         plat_max_cpus = invoker.max_cpus
         plat_max_vms = invoker.max_vms
diff --git a/build/toolchain/platform.gni b/build/toolchain/platform.gni
index b99ca64..7ff4b5a 100644
--- a/build/toolchain/platform.gni
+++ b/build/toolchain/platform.gni
@@ -26,6 +26,9 @@
   # Console driver to be used for the platform, specified as build target.
   plat_console = ""
 
+  # IOMMU driver to be used for the platform, specified as build target.
+  plat_iommu = ""
+
   # The number of pages to allocate for the hypervisor heap.
   plat_heap_pages = 0
 
diff --git a/inc/hf/boot_flow.h b/inc/hf/boot_flow.h
index 3307fb0..39d7060 100644
--- a/inc/hf/boot_flow.h
+++ b/inc/hf/boot_flow.h
@@ -21,9 +21,8 @@
 #include "hf/memiter.h"
 #include "hf/mm.h"
 
-bool boot_flow_init(struct mm_stage1_locked stage1_locked,
-		    struct manifest *manifest, struct boot_params *boot_params,
-		    struct mpool *ppool);
+bool boot_flow_init(const struct fdt_node *fdt_root, struct manifest *manifest,
+		    struct boot_params *boot_params);
 
 bool boot_flow_update(struct mm_stage1_locked stage1_locked,
 		      const struct manifest *manifest,
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 1a0bfd6..43d4c79 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -115,7 +115,6 @@
 			   uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
 		 struct mpool *ppool);
-bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool);
 void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool);
 void mm_vm_dump(struct mm_ptable *t);
 bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
diff --git a/inc/hf/plat/iommu.h b/inc/hf/plat/iommu.h
new file mode 100644
index 0000000..9ef791d
--- /dev/null
+++ b/inc/hf/plat/iommu.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/addr.h"
+#include "hf/fdt.h"
+#include "hf/vm.h"
+
+/**
+ * Initializes the platform IOMMU driver. The root node of the FDT is provided
+ * so that the driver can read from it. This can be used to map IOMMU devices
+ * into the hypervisor's address space so they are accessible by the driver.
+ */
+bool plat_iommu_init(const struct fdt_node *fdt_root,
+		     struct mm_stage1_locked stage1_locked,
+		     struct mpool *ppool);
+
+/**
+ * Maps the address range with the given mode for the given VM in the IOMMU.
+ *
+ * Assumes the identity map cannot fail. This may not always be true and if it
+ * isn't it will require careful thought on how to safely handle error cases
+ * when intermingled with MMU updates but it gives a starting point for drivers
+ * until those problems are understood.
+ *
+ * The modes are the same as the memory management modes but it is only required
+ * that read and write modes are enforced by the IOMMU driver.
+ */
+void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin,
+			     paddr_t end, uint32_t mode);
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index 4762a2e..d6d919a 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -134,3 +134,13 @@
 struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2);
 void vm_unlock(struct vm_locked *locked);
 struct vcpu *vm_get_vcpu(struct vm *vm, spci_vcpu_index_t vcpu_index);
+
+bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+		     uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+			 uint32_t mode, struct mpool *ppool);
+void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+			uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+	      struct mpool *ppool);
+bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool);
diff --git a/project/reference b/project/reference
index 3eeaa40..e2a525d 160000
--- a/project/reference
+++ b/project/reference
@@ -1 +1 @@
-Subproject commit 3eeaa4002c7ab8dc0ed02b0dadfa8ba7ab3ad76f
+Subproject commit e2a525d24927401f8b2d190f234af6ea24c85664
diff --git a/src/BUILD.gn b/src/BUILD.gn
index 3859871..5c9e56f 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -37,6 +37,7 @@
     "//project/${project}/${plat_name}",
     plat_boot_flow,
     plat_console,
+    plat_iommu,
   ]
 }
 
@@ -66,6 +67,7 @@
     "//src/arch/${plat_arch}/hypervisor",
     plat_boot_flow,
     plat_console,
+    plat_iommu,
   ]
 
   if (is_debug) {
@@ -159,6 +161,7 @@
     "mm_test.cc",
     "mpool_test.cc",
     "string_test.cc",
+    "vm_test.cc",
   ]
   sources += [ "layout_fake.c" ]
   cflags_cc = [
diff --git a/src/api.c b/src/api.c
index bbec545..210359b 100644
--- a/src/api.c
+++ b/src/api.c
@@ -710,17 +710,16 @@
 	mpool_init_with_fallback(&local_page_pool, &api_page_pool);
 
 	/* Take memory ownership away from the VM and mark as shared. */
-	if (!mm_vm_identity_map(
-		    &vm_locked.vm->ptable, pa_send_begin, pa_send_end,
+	if (!vm_identity_map(
+		    vm_locked, pa_send_begin, pa_send_end,
 		    MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W,
 		    &local_page_pool, NULL)) {
 		goto fail;
 	}
 
-	if (!mm_vm_identity_map(&vm_locked.vm->ptable, pa_recv_begin,
-				pa_recv_end,
-				MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R,
-				&local_page_pool, NULL)) {
+	if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
+			     MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R,
+			     &local_page_pool, NULL)) {
 		/* TODO: partial defrag of failed range. */
 		/* Recover any memory consumed in failed mapping. */
 		mm_vm_defrag(&vm_locked.vm->ptable, &local_page_pool);
@@ -741,14 +740,12 @@
 	 * in the local pool.
 	 */
 fail_undo_send_and_recv:
-	CHECK(mm_vm_identity_map(&vm_locked.vm->ptable, pa_recv_begin,
-				 pa_recv_end, orig_recv_mode, &local_page_pool,
-				 NULL));
+	CHECK(vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
+			      orig_recv_mode, &local_page_pool, NULL));
 
 fail_undo_send:
-	CHECK(mm_vm_identity_map(&vm_locked.vm->ptable, pa_send_begin,
-				 pa_send_end, orig_send_mode, &local_page_pool,
-				 NULL));
+	CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
+			      orig_send_mode, &local_page_pool, NULL));
 
 fail:
 	ret = false;
diff --git a/src/boot_flow/common.c b/src/boot_flow/common.c
index b295824..834f746 100644
--- a/src/boot_flow/common.c
+++ b/src/boot_flow/common.c
@@ -38,49 +38,26 @@
  * Parses information from FDT needed to initialize Hafnium.
  * FDT is mapped at the beginning and unmapped before exiting the function.
  */
-bool boot_flow_init(struct mm_stage1_locked stage1_locked,
-		    struct manifest *manifest, struct boot_params *boot_params,
-		    struct mpool *ppool)
+bool boot_flow_init(const struct fdt_node *fdt_root, struct manifest *manifest,
+		    struct boot_params *boot_params)
 {
-	bool ret = false;
-	struct fdt_header *fdt;
-	struct fdt_node fdt_root;
 	enum manifest_return_code manifest_ret;
 
 	/* Get the memory map from the FDT. */
-	fdt = fdt_map(stage1_locked, plat_boot_flow_get_fdt_addr(), &fdt_root,
-		      ppool);
-	if (fdt == NULL) {
-		dlog("Unable to map FDT.\n");
-		return false;
-	}
 
-	if (!fdt_find_child(&fdt_root, "")) {
-		dlog("Unable to find FDT root node.\n");
-		goto out_unmap_fdt;
-	}
-
-	manifest_ret = manifest_init(manifest, &fdt_root);
+	manifest_ret = manifest_init(manifest, fdt_root);
 	if (manifest_ret != MANIFEST_SUCCESS) {
 		dlog("Could not parse manifest: %s.\n",
 		     manifest_strerror(manifest_ret));
-		goto out_unmap_fdt;
+		return false;
 	}
 
-	if (!boot_params_init(boot_params, &fdt_root)) {
+	if (!boot_params_init(boot_params, fdt_root)) {
 		dlog("Could not parse boot params.\n");
-		goto out_unmap_fdt;
+		return false;
 	}
 
-	ret = true;
-
-out_unmap_fdt:
-	if (!fdt_unmap(stage1_locked, fdt, ppool)) {
-		dlog("Unable to unmap FDT.\n");
-		ret = false;
-	}
-
-	return ret;
+	return true;
 }
 
 /**
diff --git a/src/init.c b/src/init.c
index 6d9e8ad..897039f 100644
--- a/src/init.c
+++ b/src/init.c
@@ -25,11 +25,14 @@
 #include "hf/cpio.h"
 #include "hf/cpu.h"
 #include "hf/dlog.h"
+#include "hf/fdt_handler.h"
 #include "hf/load.h"
 #include "hf/mm.h"
 #include "hf/mpool.h"
 #include "hf/panic.h"
+#include "hf/plat/boot_flow.h"
 #include "hf/plat/console.h"
+#include "hf/plat/iommu.h"
 #include "hf/std.h"
 #include "hf/vm.h"
 
@@ -68,6 +71,8 @@
  */
 void one_time_init(void)
 {
+	struct fdt_header *fdt;
+	struct fdt_node fdt_root;
 	struct manifest manifest;
 	struct boot_params params;
 	struct boot_params_update update;
@@ -84,10 +89,28 @@
 
 	mm_stage1_locked = mm_lock_stage1();
 
-	if (!boot_flow_init(mm_stage1_locked, &manifest, &params, &ppool)) {
+	fdt = fdt_map(mm_stage1_locked, plat_boot_flow_get_fdt_addr(),
+		      &fdt_root, &ppool);
+	if (fdt == NULL) {
+		panic("Unable to map FDT.\n");
+	}
+
+	if (!fdt_find_child(&fdt_root, "")) {
+		panic("Unable to find FDT root node.\n");
+	}
+
+	if (!boot_flow_init(&fdt_root, &manifest, &params)) {
 		panic("Could not parse data from FDT.");
 	}
 
+	if (!plat_iommu_init(&fdt_root, mm_stage1_locked, &ppool)) {
+		panic("Could not initialize IOMMUs.");
+	}
+
+	if (!fdt_unmap(mm_stage1_locked, fdt, &ppool)) {
+		panic("Unable to unmap FDT.\n");
+	}
+
 	cpu_module_init(params.cpu_ids, params.cpu_count);
 
 	for (i = 0; i < params.mem_ranges_count; ++i) {
diff --git a/src/iommu/BUILD.gn b/src/iommu/BUILD.gn
new file mode 100644
index 0000000..00004f3
--- /dev/null
+++ b/src/iommu/BUILD.gn
@@ -0,0 +1,19 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source_set("absent") {
+  sources = [
+    "absent.c",
+  ]
+}
diff --git a/src/iommu/absent.c b/src/iommu/absent.c
new file mode 100644
index 0000000..c89e9bb
--- /dev/null
+++ b/src/iommu/absent.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/plat/iommu.h"
+
+bool plat_iommu_init(const struct fdt_node *fdt_root,
+		     struct mm_stage1_locked stage1_locked, struct mpool *ppool)
+{
+	(void)fdt_root;
+	(void)stage1_locked;
+	(void)ppool;
+
+	return true;
+}
+
+void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin,
+			     paddr_t end, uint32_t mode)
+{
+	(void)vm_locked;
+	(void)begin;
+	(void)end;
+	(void)mode;
+}
diff --git a/src/load.c b/src/load.c
index e62476f..9f311a9 100644
--- a/src/load.c
+++ b/src/load.c
@@ -117,8 +117,10 @@
 {
 	paddr_t primary_begin = layout_primary_begin();
 	struct vm *vm;
+	struct vm_locked vm_locked;
 	struct vcpu_locked vcpu_locked;
 	size_t i;
+	bool ret;
 
 	/*
 	 * TODO: This bound is currently meaningless but will be addressed when
@@ -142,8 +144,11 @@
 		return false;
 	}
 
+	vm_locked = vm_lock(vm);
+
 	if (!load_common(manifest_vm, vm)) {
-		return false;
+		ret = false;
+		goto out;
 	}
 
 	/*
@@ -152,35 +157,41 @@
 	 *
 	 * TODO: We should do a whitelist rather than a blacklist.
 	 */
-	if (!mm_vm_identity_map(&vm->ptable, pa_init(0),
-				pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
-				MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool,
-				NULL)) {
+	if (!vm_identity_map(vm_locked, pa_init(0),
+			     pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
+			     MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
 		dlog("Unable to initialise address space for primary vm\n");
-		return false;
+		ret = false;
+		goto out;
 	}
 
 	/* Map normal memory as such to permit caching, execution, etc. */
 	for (i = 0; i < params->mem_ranges_count; ++i) {
-		if (!mm_vm_identity_map(
-			    &vm->ptable, params->mem_ranges[i].begin,
-			    params->mem_ranges[i].end,
-			    MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool, NULL)) {
+		if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
+				     params->mem_ranges[i].end,
+				     MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
+				     NULL)) {
 			dlog("Unable to initialise memory for primary vm\n");
-			return false;
+			ret = false;
+			goto out;
 		}
 	}
 
-	if (!mm_vm_unmap_hypervisor(&vm->ptable, ppool)) {
+	if (!vm_unmap_hypervisor(vm_locked, ppool)) {
 		dlog("Unable to unmap hypervisor from primary vm\n");
-		return false;
+		ret = false;
+		goto out;
 	}
 
 	vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
 	vcpu_on(vcpu_locked, ipa_from_pa(primary_begin), params->kernel_arg);
 	vcpu_unlock(&vcpu_locked);
+	ret = true;
 
-	return true;
+out:
+	vm_unlock(&vm_locked);
+
+	return ret;
 }
 
 /*
@@ -192,8 +203,10 @@
 			   const struct memiter *cpio, struct mpool *ppool)
 {
 	struct vm *vm;
+	struct vm_locked vm_locked;
 	struct vcpu *vcpu;
 	ipaddr_t secondary_entry;
+	bool ret;
 
 	if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm, cpio,
 			 ppool)) {
@@ -210,12 +223,15 @@
 		return false;
 	}
 
+	vm_locked = vm_lock(vm);
+
 	/* Grant the VM access to the memory. */
-	if (!mm_vm_identity_map(&vm->ptable, mem_begin, mem_end,
-				MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
-				&secondary_entry)) {
+	if (!vm_identity_map(vm_locked, mem_begin, mem_end,
+			     MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
+			     &secondary_entry)) {
 		dlog("Unable to initialise memory.\n");
-		return false;
+		ret = false;
+		goto out;
 	}
 
 	dlog("Loaded with %u vcpus, entry at %#x.\n",
@@ -224,8 +240,12 @@
 	vcpu = vm_get_vcpu(vm, 0);
 	vcpu_secondary_reset_and_start(vcpu, secondary_entry,
 				       pa_difference(mem_begin, mem_end));
+	ret = true;
 
-	return true;
+out:
+	vm_unlock(&vm_locked);
+
+	return ret;
 }
 
 /**
@@ -314,7 +334,9 @@
 {
 	struct vm *primary;
 	struct mem_range mem_ranges_available[MAX_MEM_RANGES];
+	struct vm_locked primary_vm_locked;
 	size_t i;
+	bool success = true;
 
 	if (!load_primary(stage1_locked, &manifest->vm[HF_PRIMARY_VM_INDEX],
 			  cpio, params, ppool)) {
@@ -331,14 +353,15 @@
 	memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
 		 params->mem_ranges, sizeof(params->mem_ranges));
 
-	primary = vm_find(HF_PRIMARY_VM_ID);
-
 	/* Round the last addresses down to the page size. */
 	for (i = 0; i < params->mem_ranges_count; ++i) {
 		mem_ranges_available[i].end = pa_init(align_down(
 			pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
 	}
 
+	primary = vm_find(HF_PRIMARY_VM_ID);
+	primary_vm_locked = vm_lock(primary);
+
 	for (i = 0; i < manifest->vm_count; ++i) {
 		const struct manifest_vm *manifest_vm = &manifest->vm[i];
 		spci_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
@@ -370,13 +393,20 @@
 		}
 
 		/* Deny the primary VM access to this memory. */
-		if (!mm_vm_unmap(&primary->ptable, secondary_mem_begin,
-				 secondary_mem_end, ppool)) {
+		if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
+			      secondary_mem_end, ppool)) {
 			dlog("Unable to unmap secondary VM from primary VM.\n");
-			return false;
+			success = false;
+			break;
 		}
 	}
 
+	vm_unlock(&primary_vm_locked);
+
+	if (!success) {
+		return false;
+	}
+
 	/*
 	 * Add newly reserved areas to update params by looking at the
 	 * difference between the available ranges from the original params and
diff --git a/src/mm.c b/src/mm.c
index 7fd2151..83bf9bd 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -924,18 +924,6 @@
 }
 
 /**
- * Unmaps the hypervisor pages from the given page table.
- */
-bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool)
-{
-	/* TODO: If we add pages dynamically, they must be included here too. */
-	return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), ppool) &&
-	       mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(),
-			   ppool) &&
-	       mm_vm_unmap(t, layout_data_begin(), layout_data_end(), ppool);
-}
-
-/**
  * Write the given page table of a VM to the debug log.
  */
 void mm_vm_dump(struct mm_ptable *t)
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 839a6e7..887d16c 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -28,6 +28,8 @@
 #include <span>
 #include <vector>
 
+#include "mm_test.hh"
+
 namespace
 {
 using namespace ::std::placeholders;
@@ -40,6 +42,8 @@
 using ::testing::SizeIs;
 using ::testing::Truly;
 
+using ::mm_test::get_ptable;
+
 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
 const int TOP_LEVEL = arch_mm_stage2_max_level();
 const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
@@ -73,21 +77,6 @@
 	return std::span<pte_t>(table->entries, std::end(table->entries));
 }
 
-/**
- * Get an STL representation of the ptable.
- */
-std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
-	const struct mm_ptable &ptable)
-{
-	std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
-	const uint8_t root_table_count = arch_mm_stage2_root_table_count();
-	for (uint8_t i = 0; i < root_table_count; ++i) {
-		all.push_back(get_table(
-			pa_add(ptable.root, i * sizeof(struct mm_page_table))));
-	}
-	return all;
-}
-
 class mm : public ::testing::Test
 {
 	void SetUp() override
@@ -698,20 +687,6 @@
 }
 
 /**
- * If nothing is mapped, unmapping the hypervisor has no effect.
- */
-TEST_F(mm, vm_unmap_hypervisor_not_mapped)
-{
-	struct mm_ptable ptable;
-	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
-	EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, &ppool));
-	EXPECT_THAT(
-		get_ptable(ptable),
-		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
-	mm_vm_fini(&ptable, &ppool);
-}
-
-/**
  * If range is not mapped, unmapping has no effect.
  */
 TEST_F(mm, unmap_not_mapped)
@@ -1199,3 +1174,22 @@
 }
 
 } /* namespace */
+
+namespace mm_test
+{
+/**
+ * Get an STL representation of the ptable.
+ */
+std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
+	const struct mm_ptable &ptable)
+{
+	std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
+	const uint8_t root_table_count = arch_mm_stage2_root_table_count();
+	for (uint8_t i = 0; i < root_table_count; ++i) {
+		all.push_back(get_table(
+			pa_add(ptable.root, i * sizeof(struct mm_page_table))));
+	}
+	return all;
+}
+
+} /* namespace mm_test */
diff --git a/src/mm_test.hh b/src/mm_test.hh
new file mode 100644
index 0000000..2e906aa
--- /dev/null
+++ b/src/mm_test.hh
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <span>
+#include <vector>
+
+namespace mm_test
+{
+std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
+	const struct mm_ptable &ptable);
+
+} /* namespace mm_test */
diff --git a/src/spci_architected_message.c b/src/spci_architected_message.c
index 7c9d2b7..2c3ff89 100644
--- a/src/spci_architected_message.c
+++ b/src/spci_architected_message.c
@@ -19,6 +19,7 @@
 #include "hf/dlog.h"
 #include "hf/spci_internal.h"
 #include "hf/std.h"
+#include "hf/vm.h"
 
 /**
  * Obtain the next mode to apply to the two VMs.
@@ -316,8 +317,8 @@
  * made to memory mappings.
  */
 static bool spci_region_group_identity_map(
-	struct mm_ptable *t, struct spci_memory_region *memory_region, int mode,
-	struct mpool *ppool, bool commit)
+	struct vm_locked vm_locked, struct spci_memory_region *memory_region,
+	int mode, struct mpool *ppool, bool commit)
 {
 	struct spci_memory_region_constituent *constituents =
 		spci_memory_region_get_constituents(memory_region);
@@ -331,10 +332,10 @@
 		paddr_t pa_end = pa_add(pa_begin, size);
 
 		if (commit) {
-			mm_vm_identity_commit(t, pa_begin, pa_end, mode, ppool,
-					      NULL);
-		} else if (!mm_vm_identity_prepare(t, pa_begin, pa_end, mode,
-						   ppool)) {
+			vm_identity_commit(vm_locked, pa_begin, pa_end, mode,
+					   ppool, NULL);
+		} else if (!vm_identity_prepare(vm_locked, pa_begin, pa_end,
+						mode, ppool)) {
 			return false;
 		}
 	}
@@ -501,9 +502,9 @@
 	 * sure the entire operation will succeed without exhausting the page
 	 * pool.
 	 */
-	if (!spci_region_group_identity_map(&from->ptable, memory_region,
+	if (!spci_region_group_identity_map(from_locked, memory_region,
 					    from_mode, api_page_pool, false) ||
-	    !spci_region_group_identity_map(&to->ptable, memory_region, to_mode,
+	    !spci_region_group_identity_map(to_locked, memory_region, to_mode,
 					    api_page_pool, false)) {
 		/* TODO: partial defrag of failed range. */
 		ret = spci_error(SPCI_NO_MEMORY);
@@ -516,9 +517,8 @@
 	 * already prepared above, but may free pages in the case that a whole
 	 * block is being unmapped that was previously partially mapped.
 	 */
-	CHECK(spci_region_group_identity_map(&from->ptable, memory_region,
-					     from_mode, &local_page_pool,
-					     true));
+	CHECK(spci_region_group_identity_map(
+		from_locked, memory_region, from_mode, &local_page_pool, true));
 
 	/* Clear the memory so no VM or device can see the previous contents. */
 	if ((memory_region->flags & SPCI_MEMORY_REGION_FLAG_CLEAR) &&
@@ -529,9 +529,9 @@
 		 * `local_page_pool` by the call above, but will never allocate
 		 * more pages than that so can never fail.
 		 */
-		CHECK(spci_region_group_identity_map(
-			&from->ptable, memory_region, orig_from_mode,
-			&local_page_pool, true));
+		CHECK(spci_region_group_identity_map(from_locked, memory_region,
+						     orig_from_mode,
+						     &local_page_pool, true));
 
 		ret = spci_error(SPCI_NO_MEMORY);
 		goto out;
@@ -542,8 +542,8 @@
 	 * won't allocate because the transaction was already prepared above, so
 	 * it doesn't need to use the `local_page_pool`.
 	 */
-	CHECK(spci_region_group_identity_map(&to->ptable, memory_region,
-					     to_mode, api_page_pool, true));
+	CHECK(spci_region_group_identity_map(to_locked, memory_region, to_mode,
+					     api_page_pool, true));
 
 	ret = (struct spci_value){.func = SPCI_SUCCESS_32};
 
diff --git a/src/vm.c b/src/vm.c
index 530ff6b..da2460b 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -19,6 +19,8 @@
 #include "hf/api.h"
 #include "hf/check.h"
 #include "hf/cpu.h"
+#include "hf/layout.h"
+#include "hf/plat/iommu.h"
 #include "hf/spci.h"
 #include "hf/std.h"
 
@@ -145,3 +147,85 @@
 	CHECK(vcpu_index < vm->vcpu_count);
 	return &vm->vcpus[vcpu_index];
 }
+
+/**
+ * Map a range of addresses to the VM in both the MMU and the IOMMU.
+ *
+ * mm_vm_defrag should always be called after a series of page table updates,
+ * whether they succeed or fail. This is because on failure extra page table
+ * entries may have been allocated and then not used, while on success it may be
+ * possible to compact the page table by merging several entries into a block.
+ *
+ * Returns true on success, or false if the update failed and no changes were
+ * made.
+ *
+ */
+bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+		     uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
+{
+	if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
+		return false;
+	}
+
+	vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
+
+	return true;
+}
+
+/**
+ * Prepares the given VM for the given address mapping such that it will be able
+ * to commit the change without failure.
+ *
+ * In particular, multiple calls to this function will result in the
+ * corresponding calls to commit the changes to succeed.
+ *
+ * Returns true on success, or false if the update failed and no changes were
+ * made.
+ */
+bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+			 uint32_t mode, struct mpool *ppool)
+{
+	return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
+				      ppool);
+}
+
+/**
+ * Commits the given address mapping to the VM assuming the operation cannot
+ * fail. `vm_identity_prepare` must used correctly before this to ensure
+ * this condition.
+ */
+void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+			uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
+{
+	mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, ppool,
+			      ipa);
+	plat_iommu_identity_map(vm_locked, begin, end, mode);
+}
+
+/**
+ * Unmap a range of addresses from the VM.
+ *
+ * Returns true on success, or false if the update failed and no changes were
+ * made.
+ */
+bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+	      struct mpool *ppool)
+{
+	uint32_t mode = MM_MODE_UNMAPPED_MASK;
+
+	return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
+}
+
+/**
+ * Unmaps the hypervisor pages from the given page table.
+ */
+bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
+{
+	/* TODO: If we add pages dynamically, they must be included here too. */
+	return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
+			ppool) &&
+	       vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
+			ppool) &&
+	       vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
+			ppool);
+}
diff --git a/src/vm_test.cc b/src/vm_test.cc
new file mode 100644
index 0000000..fb33ec4
--- /dev/null
+++ b/src/vm_test.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gmock/gmock.h>
+
+extern "C" {
+#include "hf/mpool.h"
+#include "hf/vm.h"
+}
+
+#include <memory>
+#include <span>
+#include <vector>
+
+#include "mm_test.hh"
+
+namespace
+{
+using namespace ::std::placeholders;
+
+using ::testing::AllOf;
+using ::testing::Each;
+using ::testing::SizeIs;
+
+using struct_vm = struct vm;
+
+constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
+const int TOP_LEVEL = arch_mm_stage2_max_level();
+
+class vm : public ::testing::Test
+{
+	void SetUp() override
+	{
+		/*
+		 * TODO: replace with direct use of stdlib allocator so
+		 * sanitizers are more effective.
+		 */
+		test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
+		mpool_init(&ppool, sizeof(struct mm_page_table));
+		mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
+	}
+
+	std::unique_ptr<uint8_t[]> test_heap;
+
+       protected:
+	struct mpool ppool;
+};
+
+/**
+ * If nothing is mapped, unmapping the hypervisor has no effect.
+ */
+TEST_F(vm, vm_unmap_hypervisor_not_mapped)
+{
+	struct_vm *vm;
+	struct vm_locked vm_locked;
+
+	vm_init(1, &ppool, &vm);
+	vm_locked = vm_lock(vm);
+	ASSERT_TRUE(mm_vm_init(&vm->ptable, &ppool));
+	EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
+	EXPECT_THAT(
+		mm_test::get_ptable(vm->ptable),
+		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+	mm_vm_fini(&vm->ptable, &ppool);
+	vm_unlock(&vm_locked);
+}
+
+} /* namespace */