Add locking for the hypervisor's page table.

Bug: 133217279

Change-Id: I04216838006b02b03a8f3f900d4dbd18521d5db2
diff --git a/inc/hf/boot_params.h b/inc/hf/boot_params.h
index 8c9220b..591e353 100644
--- a/inc/hf/boot_params.h
+++ b/inc/hf/boot_params.h
@@ -47,5 +47,7 @@
 	paddr_t initrd_end;
 };
 
-bool plat_get_boot_params(struct boot_params *p, struct mpool *ppool);
-bool plat_update_boot_params(struct boot_params_update *p, struct mpool *ppool);
+bool plat_get_boot_params(struct mm_stage1_locked stage1_locked,
+			  struct boot_params *p, struct mpool *ppool);
+bool plat_update_boot_params(struct mm_stage1_locked stage1_locked,
+			     struct boot_params_update *p, struct mpool *ppool);
diff --git a/inc/hf/fdt_handler.h b/inc/hf/fdt_handler.h
index a454713..bcb8c8b 100644
--- a/inc/hf/fdt_handler.h
+++ b/inc/hf/fdt_handler.h
@@ -21,14 +21,16 @@
 #include "hf/mm.h"
 #include "hf/mpool.h"
 
-struct fdt_header *fdt_map(paddr_t fdt_addr, struct fdt_node *n,
+struct fdt_header *fdt_map(struct mm_stage1_locked stage1_locked,
+			   paddr_t fdt_addr, struct fdt_node *n,
 			   struct mpool *ppool);
-bool fdt_unmap(struct fdt_header *fdt, struct mpool *ppool);
+bool fdt_unmap(struct mm_stage1_locked stage1_locked, struct fdt_header *fdt,
+	       struct mpool *ppool);
 void fdt_find_cpus(const struct fdt_node *root, uint64_t *cpu_ids,
 		   size_t *cpu_count);
 void fdt_find_memory_ranges(const struct fdt_node *root, struct boot_params *p);
 bool fdt_find_initrd(struct fdt_node *n, paddr_t *begin, paddr_t *end);
 
 /** Apply an update to the FDT. */
-bool fdt_patch(paddr_t fdt_addr, struct boot_params_update *p,
-	       struct mpool *ppool);
+bool fdt_patch(struct mm_stage1_locked stage1_locked, paddr_t fdt_addr,
+	       struct boot_params_update *p, struct mpool *ppool);
diff --git a/inc/hf/load.h b/inc/hf/load.h
index f0b13b7..2d9394a 100644
--- a/inc/hf/load.h
+++ b/inc/hf/load.h
@@ -25,8 +25,10 @@
 #include "hf/mm.h"
 #include "hf/mpool.h"
 
-bool load_primary(const struct memiter *cpio, uintreg_t kernel_arg,
+bool load_primary(struct mm_stage1_locked stage1_locked,
+		  const struct memiter *cpio, uintreg_t kernel_arg,
 		  struct memiter *initrd, struct mpool *ppool);
-bool load_secondary(const struct memiter *cpio,
+bool load_secondary(struct mm_stage1_locked stage1_locked,
+		    const struct memiter *cpio,
 		    const struct boot_params *params,
 		    struct boot_params_update *update, struct mpool *ppool);
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index ed595a7..b34fb2c 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -86,6 +86,11 @@
 	paddr_t root;
 };
 
+/** Represents the curretly locked stage-1 page table of the hypervisor. */
+struct mm_stage1_locked {
+	struct mm_ptable *ptable;
+};
+
 void mm_vm_enable_invalidation(void);
 
 bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
@@ -100,9 +105,13 @@
 bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
 		    int *mode);
 
+struct mm_stage1_locked mm_lock_stage1(void);
+void mm_unlock_stage1(struct mm_stage1_locked *lock);
+void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
+		      paddr_t end, int mode, struct mpool *ppool);
+bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
+	      struct mpool *ppool);
+void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
+
 bool mm_init(struct mpool *ppool);
 bool mm_cpu_init(void);
-void *mm_identity_map(paddr_t begin, paddr_t end, int mode,
-		      struct mpool *ppool);
-bool mm_unmap(paddr_t begin, paddr_t end, struct mpool *ppool);
-void mm_defrag(struct mpool *ppool);
diff --git a/inc/hf/plat/console.h b/inc/hf/plat/console.h
index 604ac26..dbce103 100644
--- a/inc/hf/plat/console.h
+++ b/inc/hf/plat/console.h
@@ -16,6 +16,7 @@
 
 #pragma once
 
+#include "hf/mm.h"
 #include "hf/mpool.h"
 #include "hf/vm.h"
 
@@ -23,7 +24,8 @@
 void plat_console_init(void);
 
 /** Initialises any memory mappings that the console driver needs. */
-void plat_console_mm_init(struct mpool *ppool);
+void plat_console_mm_init(struct mm_stage1_locked stage1_locked,
+			  struct mpool *ppool);
 
 /** Initialises any per-VM memory mappings that the console driver needs. */
 void plat_console_vm_mm_init(struct vm *vm, struct mpool *ppool);
diff --git a/project/reference b/project/reference
index 808531e..105d519 160000
--- a/project/reference
+++ b/project/reference
@@ -1 +1 @@
-Subproject commit 808531e100759dc273f284e0a0f0bd2ff7c51a53
+Subproject commit 105d51940c5068f7f07ccecd96885b4572589396
diff --git a/src/api.c b/src/api.c
index 91b582d..9bd8f4e 100644
--- a/src/api.c
+++ b/src/api.c
@@ -35,10 +35,10 @@
  * acquisition of locks held concurrently by the same physical CPU. Our current
  * ordering requirements are as follows:
  *
- * vm::lock -> vcpu::lock
+ * vm::stage1_locked -> vcpu::stage1_locked -> mm_stage1_lock
  *
- * Locks of the same kind require the lock of lowest address to be locked first,
- * see `sl_lock_both()`.
+ * Locks of the same kind require the stage1_locked of lowest address to be
+ * locked first, see `sl_lock_both()`.
  */
 
 static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
@@ -354,13 +354,13 @@
 	/*
 	 * Wait until the registers become available. All locks must be
 	 * released between iterations of this loop to avoid potential deadlocks
-	 * if, on any path, a lock needs to be taken after taking the decision
-	 * to switch context but before the registers have been saved.
+	 * if, on any path, a stage1_locked needs to be taken after taking the
+	 * decision to switch context but before the registers have been saved.
 	 *
-	 * The VM lock is not needed in the common case so it must only be taken
-	 * when it is going to be needed. This ensures there are no inter-vCPU
-	 * dependencies in the common run case meaning the sensitive context
-	 * switch performance is consistent.
+	 * The VM stage1_locked is not needed in the common case so it must only
+	 * be taken when it is going to be needed. This ensures there are no
+	 * inter-vCPU dependencies in the common run case meaning the sensitive
+	 * context switch performance is consistent.
 	 */
 	for (;;) {
 		sl_lock(&vcpu->lock);
@@ -612,6 +612,7 @@
 {
 	struct vm *vm = current->vm;
 	struct vm_locked locked;
+	struct mm_stage1_locked mm_stage1_locked;
 	paddr_t pa_send_begin;
 	paddr_t pa_send_end;
 	paddr_t pa_recv_begin;
@@ -639,7 +640,16 @@
 		return -1;
 	}
 
+	/*
+	 * The hypervisor's memory map must be locked for the duration of this
+	 * operation to ensure there will be sufficient memory to recover from
+	 * any failures.
+	 *
+	 * TODO: the scope of the can be reduced but will require restructuring
+	 *       to keep a single unlock point.
+	 */
 	locked = vm_lock(vm);
+	mm_stage1_locked = mm_lock_stage1();
 
 	/* We only allow these to be setup once. */
 	if (vm->mailbox.send || vm->mailbox.recv) {
@@ -690,12 +700,13 @@
 	}
 
 	/* Map the send page as read-only in the hypervisor address space. */
-	vm->mailbox.send = mm_identity_map(pa_send_begin, pa_send_end,
-					   MM_MODE_R, &local_page_pool);
+	vm->mailbox.send =
+		mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
+				MM_MODE_R, &local_page_pool);
 	if (!vm->mailbox.send) {
 		/* TODO: partial defrag of failed range. */
 		/* Recover any memory consumed in failed mapping. */
-		mm_defrag(&local_page_pool);
+		mm_defrag(mm_stage1_locked, &local_page_pool);
 		goto fail_undo_send_and_recv;
 	}
 
@@ -703,12 +714,13 @@
 	 * Map the receive page as writable in the hypervisor address space. On
 	 * failure, unmap the send page before returning.
 	 */
-	vm->mailbox.recv = mm_identity_map(pa_recv_begin, pa_recv_end,
-					   MM_MODE_W, &local_page_pool);
+	vm->mailbox.recv =
+		mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
+				MM_MODE_W, &local_page_pool);
 	if (!vm->mailbox.recv) {
 		/* TODO: partial defrag of failed range. */
 		/* Recover any memory consumed in failed mapping. */
-		mm_defrag(&local_page_pool);
+		mm_defrag(mm_stage1_locked, &local_page_pool);
 		goto fail_undo_all;
 	}
 
@@ -722,7 +734,8 @@
 	 */
 fail_undo_all:
 	vm->mailbox.send = NULL;
-	mm_unmap(pa_send_begin, pa_send_end, &local_page_pool);
+	mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
+		 &local_page_pool);
 
 fail_undo_send_and_recv:
 	mm_vm_identity_map(&vm->ptable, pa_recv_begin, pa_recv_end,
@@ -739,6 +752,7 @@
 	ret = -1;
 
 exit:
+	mm_unlock_stage1(&mm_stage1_locked);
 	vm_unlock(&locked);
 
 	return ret;
@@ -1203,21 +1217,33 @@
 	 *       the changes to stage-1 tables and will allow only local
 	 *       invalidation.
 	 */
-	void *ptr = mm_identity_map(begin, end, MM_MODE_W, ppool);
+	bool ret;
+	struct mm_stage1_locked stage1_locked = mm_lock_stage1();
+	void *ptr =
+		mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool);
 	size_t size = pa_difference(begin, end);
 
 	if (!ptr) {
 		/* TODO: partial defrag of failed range. */
 		/* Recover any memory consumed in failed mapping. */
-		mm_defrag(ppool);
-		return false;
+		mm_defrag(stage1_locked, ppool);
+		goto fail;
 	}
 
 	memset_s(ptr, size, 0, size);
 	arch_mm_write_back_dcache(ptr, size);
-	mm_unmap(begin, end, ppool);
+	mm_unmap(stage1_locked, begin, end, ppool);
 
-	return true;
+	ret = true;
+	goto out;
+
+fail:
+	ret = false;
+
+out:
+	mm_unlock_stage1(&stage1_locked);
+
+	return ret;
 }
 
 /**
diff --git a/src/arch/aarch64/pl011/pl011.c b/src/arch/aarch64/pl011/pl011.c
index fdf5a7d..bf7186c 100644
--- a/src/arch/aarch64/pl011/pl011.c
+++ b/src/arch/aarch64/pl011/pl011.c
@@ -37,10 +37,11 @@
 	/* No hardware initialisation required. */
 }
 
-void plat_console_mm_init(struct mpool *ppool)
+void plat_console_mm_init(struct mm_stage1_locked stage1_locked,
+			  struct mpool *ppool)
 {
 	/* Map page for UART. */
-	mm_identity_map(pa_init(PL011_BASE),
+	mm_identity_map(stage1_locked, pa_init(PL011_BASE),
 			pa_add(pa_init(PL011_BASE), PAGE_SIZE),
 			MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool);
 }
diff --git a/src/arch/fake/console.c b/src/arch/fake/console.c
index d906352..036cced 100644
--- a/src/arch/fake/console.c
+++ b/src/arch/fake/console.c
@@ -25,7 +25,8 @@
 {
 }
 
-void plat_console_mm_init(struct mpool *ppool)
+void plat_console_mm_init(struct mm_stage1_locked stage1_locked,
+			  struct mpool *ppool)
 {
 }
 
diff --git a/src/fdt_handler.c b/src/fdt_handler.c
index fa789b2..a87226e 100644
--- a/src/fdt_handler.c
+++ b/src/fdt_handler.c
@@ -245,14 +245,16 @@
 	/* TODO: Check for "reserved-memory" nodes. */
 }
 
-struct fdt_header *fdt_map(paddr_t fdt_addr, struct fdt_node *n,
+struct fdt_header *fdt_map(struct mm_stage1_locked stage1_locked,
+			   paddr_t fdt_addr, struct fdt_node *n,
 			   struct mpool *ppool)
 {
 	struct fdt_header *fdt;
 
 	/* Map the fdt header in. */
-	fdt = mm_identity_map(fdt_addr, pa_add(fdt_addr, fdt_header_size()),
-			      MM_MODE_R, ppool);
+	fdt = mm_identity_map(stage1_locked, fdt_addr,
+			      pa_add(fdt_addr, fdt_header_size()), MM_MODE_R,
+			      ppool);
 	if (!fdt) {
 		dlog("Unable to map FDT header.\n");
 		return NULL;
@@ -264,8 +266,9 @@
 	}
 
 	/* Map the rest of the fdt in. */
-	fdt = mm_identity_map(fdt_addr, pa_add(fdt_addr, fdt_total_size(fdt)),
-			      MM_MODE_R, ppool);
+	fdt = mm_identity_map(stage1_locked, fdt_addr,
+			      pa_add(fdt_addr, fdt_total_size(fdt)), MM_MODE_R,
+			      ppool);
 	if (!fdt) {
 		dlog("Unable to map full FDT.\n");
 		goto fail;
@@ -274,19 +277,22 @@
 	return fdt;
 
 fail:
-	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), ppool);
+	mm_unmap(stage1_locked, fdt_addr, pa_add(fdt_addr, fdt_header_size()),
+		 ppool);
 	return NULL;
 }
 
-bool fdt_unmap(struct fdt_header *fdt, struct mpool *ppool)
+bool fdt_unmap(struct mm_stage1_locked stage1_locked, struct fdt_header *fdt,
+	       struct mpool *ppool)
 {
 	paddr_t fdt_addr = pa_from_va(va_from_ptr(fdt));
 
-	return mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_total_size(fdt)), ppool);
+	return mm_unmap(stage1_locked, fdt_addr,
+			pa_add(fdt_addr, fdt_total_size(fdt)), ppool);
 }
 
-bool fdt_patch(paddr_t fdt_addr, struct boot_params_update *p,
-	       struct mpool *ppool)
+bool fdt_patch(struct mm_stage1_locked stage1_locked, paddr_t fdt_addr,
+	       struct boot_params_update *p, struct mpool *ppool)
 {
 	struct fdt_header *fdt;
 	struct fdt_node n;
@@ -294,8 +300,9 @@
 	size_t i;
 
 	/* Map the fdt header in. */
-	fdt = mm_identity_map(fdt_addr, pa_add(fdt_addr, fdt_header_size()),
-			      MM_MODE_R, ppool);
+	fdt = mm_identity_map(stage1_locked, fdt_addr,
+			      pa_add(fdt_addr, fdt_header_size()), MM_MODE_R,
+			      ppool);
 	if (!fdt) {
 		dlog("Unable to map FDT header.\n");
 		return false;
@@ -307,7 +314,7 @@
 	}
 
 	/* Map the fdt (+ a page) in r/w mode in preparation for updating it. */
-	fdt = mm_identity_map(fdt_addr,
+	fdt = mm_identity_map(stage1_locked, fdt_addr,
 			      pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE),
 			      MM_MODE_R | MM_MODE_W, ppool);
 	if (!fdt) {
@@ -363,7 +370,7 @@
 
 out_unmap_fdt:
 	/* Unmap FDT. */
-	if (!mm_unmap(fdt_addr,
+	if (!mm_unmap(stage1_locked, fdt_addr,
 		      pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE),
 		      ppool)) {
 		dlog("Unable to unmap writable FDT.\n");
@@ -372,6 +379,7 @@
 	return ret;
 
 err_unmap_fdt_header:
-	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), ppool);
+	mm_unmap(stage1_locked, fdt_addr, pa_add(fdt_addr, fdt_header_size()),
+		 ppool);
 	return false;
 }
diff --git a/src/fdt_handler_test.cc b/src/fdt_handler_test.cc
index e259ee9..eed42c0 100644
--- a/src/fdt_handler_test.cc
+++ b/src/fdt_handler_test.cc
@@ -105,11 +105,14 @@
 	struct fdt_node n;
 	struct boot_params params = {};
 
-	fdt = fdt_map(pa_init((uintpaddr_t)&test_dtb), &n, &ppool);
+	struct mm_stage1_locked mm_stage1_locked = mm_lock_stage1();
+	fdt = fdt_map(mm_stage1_locked, pa_init((uintpaddr_t)&test_dtb), &n,
+		      &ppool);
 	ASSERT_THAT(fdt, NotNull());
 	ASSERT_TRUE(fdt_find_child(&n, ""));
 	fdt_find_memory_ranges(&n, &params);
-	ASSERT_TRUE(fdt_unmap(fdt, &ppool));
+	ASSERT_TRUE(fdt_unmap(mm_stage1_locked, fdt, &ppool));
+	mm_unlock_stage1(&mm_stage1_locked);
 
 	EXPECT_THAT(params.mem_ranges_count, Eq(3));
 	EXPECT_THAT(pa_addr(params.mem_ranges[0].begin), Eq(0x00000000));
diff --git a/src/load.c b/src/load.c
index 639291a..7556533 100644
--- a/src/load.c
+++ b/src/load.c
@@ -39,13 +39,13 @@
  * disabled. When switching to the partitions, the caching is initially disabled
  * so the data must be available without the cache.
  */
-static bool copy_to_unmapped(paddr_t to, const void *from, size_t size,
-			     struct mpool *ppool)
+static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
+			     const void *from, size_t size, struct mpool *ppool)
 {
 	paddr_t to_end = pa_add(to, size);
 	void *ptr;
 
-	ptr = mm_identity_map(to, to_end, MM_MODE_W, ppool);
+	ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
 	if (!ptr) {
 		return false;
 	}
@@ -53,7 +53,7 @@
 	memcpy_s(ptr, size, from, size);
 	arch_mm_write_back_dcache(ptr, size);
 
-	mm_unmap(to, to_end, ppool);
+	mm_unmap(stage1_locked, to, to_end, ppool);
 
 	return true;
 }
@@ -107,7 +107,8 @@
 /**
  * Loads the primary VM.
  */
-bool load_primary(const struct memiter *cpio, uintreg_t kernel_arg,
+bool load_primary(struct mm_stage1_locked stage1_locked,
+		  const struct memiter *cpio, uintreg_t kernel_arg,
 		  struct memiter *initrd, struct mpool *ppool)
 {
 	struct memiter it;
@@ -119,8 +120,8 @@
 	}
 
 	dlog("Copying primary to %p\n", pa_addr(primary_begin));
-	if (!copy_to_unmapped(primary_begin, it.next, it.limit - it.next,
-			      ppool)) {
+	if (!copy_to_unmapped(stage1_locked, primary_begin, it.next,
+			      it.limit - it.next, ppool)) {
 		dlog("Unable to relocate kernel for primary vm.\n");
 		return false;
 	}
@@ -247,7 +248,8 @@
  * Loads all secondary VMs into the memory ranges from the given params.
  * Memory reserved for the VMs is added to the `reserved_ranges` of `update`.
  */
-bool load_secondary(const struct memiter *cpio,
+bool load_secondary(struct mm_stage1_locked stage1_locked,
+		    const struct memiter *cpio,
 		    const struct boot_params *params,
 		    struct boot_params_update *update, struct mpool *ppool)
 {
@@ -317,8 +319,9 @@
 			continue;
 		}
 
-		if (!copy_to_unmapped(secondary_mem_begin, kernel.next,
-				      kernel.limit - kernel.next, ppool)) {
+		if (!copy_to_unmapped(stage1_locked, secondary_mem_begin,
+				      kernel.next, kernel.limit - kernel.next,
+				      ppool)) {
 			dlog("Unable to copy kernel\n");
 			continue;
 		}
diff --git a/src/main.c b/src/main.c
index 79cadc9..9a66975 100644
--- a/src/main.c
+++ b/src/main.c
@@ -50,6 +50,7 @@
 	void *initrd;
 	size_t i;
 	struct mpool ppool;
+	struct mm_stage1_locked mm_stage1_locked;
 
 	/* Make sure the console is initialised before calling dlog. */
 	plat_console_init();
@@ -69,7 +70,9 @@
 	dlog_enable_lock();
 	mpool_enable_locks();
 
-	if (!plat_get_boot_params(&params, &ppool)) {
+	mm_stage1_locked = mm_lock_stage1();
+
+	if (!plat_get_boot_params(mm_stage1_locked, &params, &ppool)) {
 		panic("unable to retrieve boot params");
 	}
 
@@ -85,8 +88,8 @@
 	     pa_addr(params.initrd_end) - 1);
 
 	/* Map initrd in, and initialise cpio parser. */
-	initrd = mm_identity_map(params.initrd_begin, params.initrd_end,
-				 MM_MODE_R, &ppool);
+	initrd = mm_identity_map(mm_stage1_locked, params.initrd_begin,
+				 params.initrd_end, MM_MODE_R, &ppool);
 	if (!initrd) {
 		panic("unable to map initrd in");
 	}
@@ -95,7 +98,8 @@
 		     pa_difference(params.initrd_begin, params.initrd_end));
 
 	/* Load all VMs. */
-	if (!load_primary(&cpio, params.kernel_arg, &primary_initrd, &ppool)) {
+	if (!load_primary(mm_stage1_locked, &cpio, params.kernel_arg,
+			  &primary_initrd, &ppool)) {
 		panic("unable to load primary VM");
 	}
 
@@ -106,16 +110,18 @@
 	update.initrd_begin = pa_from_va(va_from_ptr(primary_initrd.next));
 	update.initrd_end = pa_from_va(va_from_ptr(primary_initrd.limit));
 	update.reserved_ranges_count = 0;
-	if (!load_secondary(&cpio, &params, &update, &ppool)) {
+	if (!load_secondary(mm_stage1_locked, &cpio, &params, &update,
+			    &ppool)) {
 		panic("unable to load secondary VMs");
 	}
 
 	/* Prepare to run by updating bootparams as seen by primary VM. */
-	if (!plat_update_boot_params(&update, &ppool)) {
+	if (!plat_update_boot_params(mm_stage1_locked, &update, &ppool)) {
 		panic("plat_update_boot_params failed");
 	}
 
-	mm_defrag(&ppool);
+	mm_defrag(mm_stage1_locked, &ppool);
+	mm_unlock_stage1(&mm_stage1_locked);
 
 	/* Initialise the API page pool. ppool will be empty from now on. */
 	api_init(&ppool);
diff --git a/src/mm.c b/src/mm.c
index 50f4aa1..be87e69 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -55,6 +55,7 @@
 /* clang-format on */
 
 static struct mm_ptable ptable;
+static struct spinlock ptable_lock;
 
 static bool mm_stage2_invalidate = false;
 
@@ -861,14 +862,33 @@
 	return ret;
 }
 
+static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
+{
+	return (struct mm_stage1_locked){.ptable = &ptable};
+}
+
+struct mm_stage1_locked mm_lock_stage1(void)
+{
+	sl_lock(&ptable_lock);
+	return mm_stage1_lock_unsafe();
+}
+
+void mm_unlock_stage1(struct mm_stage1_locked *lock)
+{
+	assert(lock->ptable == &ptable);
+	sl_unlock(&ptable_lock);
+	lock->ptable = NULL;
+}
+
 /**
  * Updates the hypervisor page table such that the given physical address range
  * is mapped into the address space at the corresponding address range in the
  * architecture-agnostic mode provided.
  */
-void *mm_identity_map(paddr_t begin, paddr_t end, int mode, struct mpool *ppool)
+void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
+		      paddr_t end, int mode, struct mpool *ppool)
 {
-	if (mm_ptable_identity_update(&ptable, begin, end,
+	if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
 				      arch_mm_mode_to_stage1_attrs(mode),
 				      MM_FLAG_STAGE1, ppool)) {
 		return ptr_from_va(va_from_pa(begin));
@@ -881,20 +901,32 @@
  * Updates the hypervisor table such that the given physical address range is
  * not mapped in the address space.
  */
-bool mm_unmap(paddr_t begin, paddr_t end, struct mpool *ppool)
+bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
+	      struct mpool *ppool)
 {
 	return mm_ptable_identity_update(
-		&ptable, begin, end,
+		stage1_locked.ptable, begin, end,
 		arch_mm_mode_to_stage1_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
 					     MM_MODE_SHARED),
 		MM_FLAG_STAGE1 | MM_FLAG_UNMAP, ppool);
 }
 
 /**
+ * Defragments the hypervisor page table.
+ */
+void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
+{
+	mm_ptable_defrag(stage1_locked.ptable, MM_FLAG_STAGE1, ppool);
+}
+
+/**
  * Initialises memory management for the hypervisor itself.
  */
 bool mm_init(struct mpool *ppool)
 {
+	/* Locking is not enabled yet so fake it, */
+	struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
+
 	dlog("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()),
 	     pa_addr(layout_text_end()));
 	dlog("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()),
@@ -908,16 +940,16 @@
 	}
 
 	/* Let console driver map pages for itself. */
-	plat_console_mm_init(ppool);
+	plat_console_mm_init(stage1_locked, ppool);
 
 	/* Map each section. */
-	mm_identity_map(layout_text_begin(), layout_text_end(), MM_MODE_X,
-			ppool);
+	mm_identity_map(stage1_locked, layout_text_begin(), layout_text_end(),
+			MM_MODE_X, ppool);
 
-	mm_identity_map(layout_rodata_begin(), layout_rodata_end(), MM_MODE_R,
-			ppool);
+	mm_identity_map(stage1_locked, layout_rodata_begin(),
+			layout_rodata_end(), MM_MODE_R, ppool);
 
-	mm_identity_map(layout_data_begin(), layout_data_end(),
+	mm_identity_map(stage1_locked, layout_data_begin(), layout_data_end(),
 			MM_MODE_R | MM_MODE_W, ppool);
 
 	return arch_mm_init(ptable.root, true);
@@ -927,11 +959,3 @@
 {
 	return arch_mm_init(ptable.root, false);
 }
-
-/**
- * Defragments the hypervisor page table.
- */
-void mm_defrag(struct mpool *ppool)
-{
-	mm_ptable_defrag(&ptable, MM_FLAG_STAGE1, ppool);
-}
diff --git a/src/plat.c b/src/plat.c
index 6a0ba54..d6c5a4a 100644
--- a/src/plat.c
+++ b/src/plat.c
@@ -38,8 +38,10 @@
  * by the loader.
  */
 #pragma weak plat_get_initrd_range
-void plat_get_initrd_range(paddr_t *begin, paddr_t *end, struct mpool *ppool)
+void plat_get_initrd_range(struct mm_stage1_locked stage1_locked,
+			   paddr_t *begin, paddr_t *end, struct mpool *ppool)
 {
+	(void)stage1_locked;
 	(void)ppool;
 
 	*begin = layout_initrd_begin();
@@ -63,17 +65,19 @@
  * initrd is provided separately.
  */
 #pragma weak plat_get_boot_params
-bool plat_get_boot_params(struct boot_params *p, struct mpool *ppool)
+bool plat_get_boot_params(struct mm_stage1_locked stage1_locked,
+			  struct boot_params *p, struct mpool *ppool)
 {
 	struct fdt_header *fdt;
 	struct fdt_node n;
 	bool ret = false;
 
-	plat_get_initrd_range(&p->initrd_begin, &p->initrd_end, ppool);
+	plat_get_initrd_range(stage1_locked, &p->initrd_begin, &p->initrd_end,
+			      ppool);
 	p->kernel_arg = plat_get_kernel_arg();
 
 	/* Get the memory map from the FDT. */
-	fdt = fdt_map(plat_get_fdt_addr(), &n, ppool);
+	fdt = fdt_map(stage1_locked, plat_get_fdt_addr(), &n, ppool);
 	if (!fdt) {
 		return false;
 	}
@@ -91,7 +95,7 @@
 	ret = true;
 
 out_unmap_fdt:
-	if (!fdt_unmap(fdt, ppool)) {
+	if (!fdt_unmap(stage1_locked, fdt, ppool)) {
 		dlog("Unable to unmap fdt.");
 		return false;
 	}
@@ -110,7 +114,8 @@
  * another loader can load the data for it.
  */
 #pragma weak plat_update_boot_params
-bool plat_update_boot_params(struct boot_params_update *p, struct mpool *ppool)
+bool plat_update_boot_params(struct mm_stage1_locked stage1_locked,
+			     struct boot_params_update *p, struct mpool *ppool)
 {
-	return fdt_patch(plat_get_fdt_addr(), p, ppool);
+	return fdt_patch(stage1_locked, plat_get_fdt_addr(), p, ppool);
 }