Enable caching early in assembly.

By doing this early, more code is able to make assumptions about using
the cache so there are fewer areas where manual cache management is
required.

Moving to assembly means less assumptions are made about the state of
the cache before it is enabled. Doing this in C means there are concerns
about the state of the stack if the cache is ever not clean and invalid.

Bug: 141103913
Bug: 139269163
Change-Id: I8ff01c2c2a0c035f147d2d1a8372dbd8bec7ce73
diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h
index d470d08..a7a155a 100644
--- a/inc/hf/arch/mm.h
+++ b/inc/hf/arch/mm.h
@@ -161,9 +161,4 @@
 /**
  * Initializes the arch specific memory management.
  */
-bool arch_mm_init(void);
-
-/**
- * Enables the current CPU with arch specific memory management state.
- */
-void arch_mm_enable(paddr_t table);
+bool arch_mm_init(paddr_t table);
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 9d7a433..44b51b8 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -123,4 +123,3 @@
 void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
 
 bool mm_init(struct mpool *ppool);
-void mm_cpu_init(void);
diff --git a/src/BUILD.gn b/src/BUILD.gn
index e87ac1b..0f17b0a 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -29,6 +29,7 @@
   sources = [
     "boot_params.c",
     "cpio.c",
+    "init.c",
     "load.c",
     "main.c",
   ]
diff --git a/src/arch/aarch64/hypervisor/BUILD.gn b/src/arch/aarch64/hypervisor/BUILD.gn
index 36c6baa..f1d10e3 100644
--- a/src/arch/aarch64/hypervisor/BUILD.gn
+++ b/src/arch/aarch64/hypervisor/BUILD.gn
@@ -16,7 +16,6 @@
 source_set("hypervisor") {
   public_configs = [ "//src/arch/aarch64:config" ]
   sources = [
-    "cpu_entry.S",
     "exceptions.S",
     "hypervisor_entry.S",
     "plat_entry.S",
diff --git a/src/arch/aarch64/hypervisor/cpu_entry.S b/src/arch/aarch64/hypervisor/cpu_entry.S
deleted file mode 100644
index 301a1a1..0000000
--- a/src/arch/aarch64/hypervisor/cpu_entry.S
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright 2018 The Hafnium Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "offsets.h"
-
-.section .text.cpu_entry, "ax"
-.global cpu_entry
-cpu_entry:
-	/* Disable interrupts. */
-	msr DAIFSet, #0xf
-
-	/* Use SPx (instead of SP0). */
-	msr spsel, #1
-
-	/* Prepare the stack. */
-	ldr x30, [x0, #CPU_STACK_BOTTOM]
-	mov sp, x30
-
-	/* Configure exception handlers. */
-	adrp x30, vector_table_el2
-	add x30, x30, :lo12:vector_table_el2
-	msr vbar_el2, x30
-
-	/* Call into C code, x0 holds the cpu pointer. */
-	bl cpu_main
-
-	/* Run the vcpu returned by cpu_main. */
-	bl vcpu_restore_all_and_run
-
-	/* Loop forever waiting for interrupts. */
-0:	wfi
-	b 0b
diff --git a/src/arch/aarch64/hypervisor/hypervisor_entry.S b/src/arch/aarch64/hypervisor/hypervisor_entry.S
index 3ab25c2..ce76aa5 100644
--- a/src/arch/aarch64/hypervisor/hypervisor_entry.S
+++ b/src/arch/aarch64/hypervisor/hypervisor_entry.S
@@ -16,6 +16,11 @@
 
 #include "offsets.h"
 
+/**
+ * Called only on first boot after the image has been relocated and BSS zeroed.
+ *
+ * It is required that caches be clean and invalid.
+ */
 .section .init.image_entry, "ax"
 .global image_entry
 image_entry:
@@ -23,14 +28,117 @@
 	bl plat_boot_flow_hook
 
 	/* Get pointer to first cpu. */
-	adrp x0, cpus
-	add x0, x0, :lo12:cpus
+	adrp x28, cpus
+	add x28, x28, :lo12:cpus
 
 	/* Set the ID of this cpu from the affinity bits of mpidr. */
 	mrs x30, mpidr_el1
 	ubfx x29, x30, 0, 24
 	ubfx x30, x30, 32, 8
 	orr x30, x29, x30
-	str x30, [x0, CPU_ID]
+	str x30, [x28, CPU_ID]
 
-	b cpu_entry
+	mov x0, x28
+	bl prepare_for_c
+
+	/*
+	 * Call into C to initialize the memory management configuration with
+	 * MMU and caches disabled. Result will be stored in `arch_mm_config`.
+	 */
+	bl one_time_init_mm
+
+	/* Enable MMU and caches before running the rest of initialization. */
+	bl mm_enable
+	bl one_time_init
+
+	/* Begin steady state operation. */
+	mov x0, x28
+	b cpu_init
+
+/**
+ * Entry point for all cases other than the first boot e.g. secondary CPUs and
+ * resuming from suspend.
+ *
+ * It is required that caches be coherent but not necessarily clean or invalid.
+ *
+ * x0 points to the current CPU.
+ */
+.section .text.entry, "ax"
+.global cpu_entry
+cpu_entry:
+	bl mm_enable
+	bl prepare_for_c
+
+	/* Intentional fallthrough. */
+
+cpu_init:
+	/* Call into C code, x0 holds the cpu pointer. */
+	bl cpu_main
+
+	/* Run the vcpu returned by cpu_main. */
+	bl vcpu_restore_all_and_run
+
+	/* Loop forever waiting for interrupts. */
+0:	wfi
+	b 0b
+
+/**
+ * Set up CPU environment for executing C code. This is called on first boot
+ * with caches disabled but subsequent calls will have caches enabled.
+ *
+ * x0 points to the current CPU on entry and exit.
+ */
+prepare_for_c:
+	/* Use SPx (instead of SP0). */
+	msr spsel, #1
+
+	/* Prepare the stack. */
+	ldr x1, [x0, #CPU_STACK_BOTTOM]
+	mov sp, x1
+
+	/* Configure exception handlers. */
+	adr x2, vector_table_el2
+	msr vbar_el2, x2
+	ret
+
+/**
+ * Applies the memory management configuration to the CPU, preserving x0 along
+ * the way.
+ */
+mm_enable:
+	/*
+	 * Invalidate any potentially stale local TLB entries for the
+	 * hypervisor's stage-1 and the VM's stage-2 before they start being
+	 * used. The VM's stage-1 is invalidated as a side effect but it wasn't
+	 * using it yet anyway.
+	 */
+	tlbi alle2
+	tlbi vmalls12e1
+
+	/*
+	 * Load and apply the memory management configuration. Order depends on
+	 * `struct arch_mm_config.
+	 */
+	adrp x6, arch_mm_config
+	add x6, x6, :lo12:arch_mm_config
+
+	ldp x1, x2, [x6]
+	ldp x3, x4, [x6, #16]
+	ldr x5, [x6, #32]
+
+	msr ttbr0_el2, x1
+	msr vtcr_el2, x2
+	msr mair_el2, x3
+	msr tcr_el2, x4
+
+	/* Ensure everything before this point has completed. */
+	dsb sy
+	isb
+
+	/*
+	 * Configure sctlr_el2 to enable MMU and cache and don't proceed until
+	 * this has completed.
+	 */
+	msr sctlr_el2, x5
+	isb
+	ret
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 595f9c0..7f41a10 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -115,14 +115,24 @@
 /** Mask for the attribute bits of the pte. */
 #define PTE_ATTR_MASK (~(PTE_ADDR_MASK | (UINT64_C(1) << 1)))
 
+/**
+ * Configuration information for memory management. Order is important as this
+ * is read from assembly.
+ *
+ * It must only be written to from `arch_mm_one_time_init()` to avoid cache and
+ * synchronization problems.
+ */
+struct arch_mm_config {
+	uintreg_t ttbr0_el2;
+	uintreg_t vtcr_el2;
+	uintreg_t mair_el2;
+	uintreg_t tcr_el2;
+	uintreg_t sctlr_el2;
+} arch_mm_config;
+
 static uint8_t mm_s2_max_level;
 static uint8_t mm_s2_root_table_count;
 
-static uintreg_t mm_vtcr_el2;
-static uintreg_t mm_mair_el2;
-static uintreg_t mm_tcr_el2;
-static uintreg_t mm_sctlr_el2;
-
 /**
  * Returns the encoding of a page table entry that isn't present.
  */
@@ -530,7 +540,40 @@
 	return mm_s2_root_table_count;
 }
 
-bool arch_mm_init(void)
+/**
+ * Given the attrs from a table at some level and the attrs from all the blocks
+ * in that table, returns equivalent attrs to use for a block which will replace
+ * the entire table.
+ */
+uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
+					   uint64_t block_attrs)
+{
+	/*
+	 * Only stage 1 table descriptors have attributes, but the bits are res0
+	 * for stage 2 table descriptors so this code is safe for both.
+	 */
+	if (table_attrs & TABLE_NSTABLE) {
+		block_attrs |= STAGE1_NS;
+	}
+	if (table_attrs & TABLE_APTABLE1) {
+		block_attrs |= STAGE1_AP2;
+	}
+	if (table_attrs & TABLE_APTABLE0) {
+		block_attrs &= ~STAGE1_AP1;
+	}
+	if (table_attrs & TABLE_XNTABLE) {
+		block_attrs |= STAGE1_XN;
+	}
+	if (table_attrs & TABLE_PXNTABLE) {
+		block_attrs |= STAGE1_PXN;
+	}
+	return block_attrs;
+}
+
+/**
+ * This is called early in initialization without MMU or caches enabled.
+ */
+bool arch_mm_init(paddr_t table)
 {
 	static const int pa_bits_table[16] = {32, 36, 40, 42, 44, 48};
 	uint64_t features = read_msr(id_aa64mmfr0_el1);
@@ -588,94 +631,55 @@
 	dlog("Stage 2 has %d page table levels with %d pages at the root.\n",
 	     mm_s2_max_level + 1, mm_s2_root_table_count);
 
-	mm_vtcr_el2 = (1u << 31) |		 /* RES1. */
-		      ((features & 0xf) << 16) | /* PS, matching features. */
-		      (0 << 14) |		 /* TG0: 4 KB granule. */
-		      (3 << 12) |		 /* SH0: inner shareable. */
-		      (1 << 10) |	     /* ORGN0: normal, cacheable ... */
-		      (1 << 8) |	      /* IRGN0: normal, cacheable ... */
-		      (sl0 << 6) |	    /* SL0. */
-		      ((64 - pa_bits) << 0) | /* T0SZ: dependent on PS. */
-		      0;
+	arch_mm_config = (struct arch_mm_config){
+		.ttbr0_el2 = pa_addr(table),
 
-	/*
-	 * 0    -> Device-nGnRnE memory
-	 * 0xff -> Normal memory, Inner/Outer Write-Back Non-transient,
-	 *         Write-Alloc, Read-Alloc.
-	 */
-	mm_mair_el2 = (0 << (8 * STAGE1_DEVICEINDX)) |
-		      (0xff << (8 * STAGE1_NORMALINDX));
+		.vtcr_el2 =
+			(1u << 31) |		   /* RES1. */
+			((features & 0xf) << 16) | /* PS, matching features. */
+			(0 << 14) |		   /* TG0: 4 KB granule. */
+			(3 << 12) |		   /* SH0: inner shareable. */
+			(1 << 10) |  /* ORGN0: normal, cacheable ... */
+			(1 << 8) |   /* IRGN0: normal, cacheable ... */
+			(sl0 << 6) | /* SL0. */
+			((64 - pa_bits) << 0) | /* T0SZ: dependent on PS. */
+			0,
 
-	/*
-	 * Configure tcr_el2.
-	 */
-	mm_tcr_el2 = (1 << 20) |		/* TBI, top byte ignored. */
-		     ((features & 0xf) << 16) | /* PS. */
-		     (0 << 14) |		/* TG0, granule size, 4KB. */
-		     (3 << 12) |		/* SH0, inner shareable. */
-		     (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */
-		     (1 << 8) |  /* IRGN0, normal mem, WB RA WA Cacheable. */
-		     (25 << 0) | /* T0SZ, input address is 2^39 bytes. */
-		     0;
+		/*
+		 * 0    -> Device-nGnRnE memory
+		 * 0xff -> Normal memory, Inner/Outer Write-Back Non-transient,
+		 *         Write-Alloc, Read-Alloc.
+		 */
+		.mair_el2 = (0 << (8 * STAGE1_DEVICEINDX)) |
+			    (0xff << (8 * STAGE1_NORMALINDX)),
 
-	mm_sctlr_el2 = (1 << 0) |  /* M, enable stage 1 EL2 MMU. */
-		       (1 << 1) |  /* A, enable alignment check faults. */
-		       (1 << 2) |  /* C, data cache enable. */
-		       (1 << 3) |  /* SA, enable stack alignment check. */
-		       (3 << 4) |  /* RES1 bits. */
-		       (1 << 11) | /* RES1 bit. */
-		       (1 << 12) | /* I, instruction cache enable. */
-		       (1 << 16) | /* RES1 bit. */
-		       (1 << 18) | /* RES1 bit. */
-		       (1 << 19) | /* WXN bit, writable execute never. */
-		       (3 << 22) | /* RES1 bits. */
-		       (3 << 28) | /* RES1 bits. */
-		       0;
+		/*
+		 * Configure tcr_el2.
+		 */
+		.tcr_el2 =
+			(1 << 20) |		   /* TBI, top byte ignored. */
+			((features & 0xf) << 16) | /* PS. */
+			(0 << 14) |		   /* TG0, granule size, 4KB. */
+			(3 << 12) |		   /* SH0, inner shareable. */
+			(1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */
+			(1 << 8) |  /* IRGN0, normal mem, WB RA WA Cacheable. */
+			(25 << 0) | /* T0SZ, input address is 2^39 bytes. */
+			0,
+
+		.sctlr_el2 = (1 << 0) |  /* M, enable stage 1 EL2 MMU. */
+			     (1 << 1) |  /* A, enable alignment check faults. */
+			     (1 << 2) |  /* C, data cache enable. */
+			     (1 << 3) |  /* SA, enable stack alignment check. */
+			     (3 << 4) |  /* RES1 bits. */
+			     (1 << 11) | /* RES1 bit. */
+			     (1 << 12) | /* I, instruction cache enable. */
+			     (1 << 16) | /* RES1 bit. */
+			     (1 << 18) | /* RES1 bit. */
+			     (1 << 19) | /* WXN bit, writable execute never. */
+			     (3 << 22) | /* RES1 bits. */
+			     (3 << 28) | /* RES1 bits. */
+			     0,
+	};
 
 	return true;
 }
-
-void arch_mm_enable(paddr_t table)
-{
-	/* Configure translation management registers. */
-	write_msr(ttbr0_el2, pa_addr(table));
-	write_msr(vtcr_el2, mm_vtcr_el2);
-	write_msr(mair_el2, mm_mair_el2);
-	write_msr(tcr_el2, mm_tcr_el2);
-
-	/* Configure sctlr_el2 to enable MMU and cache. */
-	dsb(sy);
-	isb();
-	write_msr(sctlr_el2, mm_sctlr_el2);
-	isb();
-}
-
-/**
- * Given the attrs from a table at some level and the attrs from all the blocks
- * in that table, returns equivalent attrs to use for a block which will replace
- * the entire table.
- */
-uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
-					   uint64_t block_attrs)
-{
-	/*
-	 * Only stage 1 table descriptors have attributes, but the bits are res0
-	 * for stage 2 table descriptors so this code is safe for both.
-	 */
-	if (table_attrs & TABLE_NSTABLE) {
-		block_attrs |= STAGE1_NS;
-	}
-	if (table_attrs & TABLE_APTABLE1) {
-		block_attrs |= STAGE1_AP2;
-	}
-	if (table_attrs & TABLE_APTABLE0) {
-		block_attrs &= ~STAGE1_AP1;
-	}
-	if (table_attrs & TABLE_XNTABLE) {
-		block_attrs |= STAGE1_XN;
-	}
-	if (table_attrs & TABLE_PXNTABLE) {
-		block_attrs |= STAGE1_PXN;
-	}
-	return block_attrs;
-}
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index c788518..4778beb 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -165,14 +165,9 @@
 	return attrs >> PTE_ATTR_MODE_SHIFT;
 }
 
-bool arch_mm_init(void)
+bool arch_mm_init(paddr_t table)
 {
 	/* No initialization required. */
-	return true;
-}
-
-void arch_mm_enable(paddr_t table)
-{
-	/* There's no modelling of the MMU. */
 	(void)table;
+	return true;
 }
diff --git a/src/fdt_handler_test.cc b/src/fdt_handler_test.cc
index eed42c0..acd355e 100644
--- a/src/fdt_handler_test.cc
+++ b/src/fdt_handler_test.cc
@@ -99,7 +99,7 @@
 
 	mpool_init(&ppool, sizeof(struct mm_page_table));
 	mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
-	ASSERT_TRUE(mm_init(&ppool));
+	mm_init(&ppool);
 
 	struct fdt_header *fdt;
 	struct fdt_node n;
diff --git a/src/init.c b/src/init.c
new file mode 100644
index 0000000..8a13610
--- /dev/null
+++ b/src/init.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/init.h"
+
+#include <stdalign.h>
+#include <stddef.h>
+
+#include "hf/api.h"
+#include "hf/boot_flow.h"
+#include "hf/boot_params.h"
+#include "hf/cpio.h"
+#include "hf/cpu.h"
+#include "hf/dlog.h"
+#include "hf/load.h"
+#include "hf/mm.h"
+#include "hf/mpool.h"
+#include "hf/panic.h"
+#include "hf/plat/console.h"
+#include "hf/std.h"
+#include "hf/vm.h"
+
+#include "vmapi/hf/call.h"
+
+alignas(alignof(
+	struct mm_page_table)) char ptable_buf[sizeof(struct mm_page_table) *
+					       HEAP_PAGES];
+
+static struct mpool ppool;
+
+/**
+ * Performs one-time initialisation of memory management for the hypervisor.
+ *
+ * This is the only C code entry point called with MMU and caching disabled. The
+ * page table returned is used to set up the MMU and caches for all subsequent
+ * code.
+ */
+void one_time_init_mm(void)
+{
+	/* Make sure the console is initialised before calling dlog. */
+	plat_console_init();
+
+	dlog("Initialising hafnium\n");
+
+	mpool_init(&ppool, sizeof(struct mm_page_table));
+	mpool_add_chunk(&ppool, ptable_buf, sizeof(ptable_buf));
+
+	if (!mm_init(&ppool)) {
+		panic("mm_init failed");
+	}
+}
+
+/**
+ * Performs one-time initialisation of the hypervisor.
+ */
+void one_time_init(void)
+{
+	struct manifest manifest;
+	struct boot_params params;
+	struct boot_params_update update;
+	struct memiter primary_initrd;
+	struct memiter cpio;
+	void *initrd;
+	size_t i;
+	struct mm_stage1_locked mm_stage1_locked;
+
+	arch_one_time_init();
+
+	/* Enable locks now that mm is initialised. */
+	dlog_enable_lock();
+	mpool_enable_locks();
+
+	mm_stage1_locked = mm_lock_stage1();
+
+	if (!boot_flow_init(mm_stage1_locked, &manifest, &params, &ppool)) {
+		panic("Could not parse data from FDT.");
+	}
+
+	cpu_module_init(params.cpu_ids, params.cpu_count);
+
+	for (i = 0; i < params.mem_ranges_count; ++i) {
+		dlog("Memory range:  %#x - %#x\n",
+		     pa_addr(params.mem_ranges[i].begin),
+		     pa_addr(params.mem_ranges[i].end) - 1);
+	}
+
+	dlog("Ramdisk range: %#x - %#x\n", pa_addr(params.initrd_begin),
+	     pa_addr(params.initrd_end) - 1);
+
+	/* Map initrd in, and initialise cpio parser. */
+	initrd = mm_identity_map(mm_stage1_locked, params.initrd_begin,
+				 params.initrd_end, MM_MODE_R, &ppool);
+	if (!initrd) {
+		panic("unable to map initrd in");
+	}
+
+	memiter_init(&cpio, initrd,
+		     pa_difference(params.initrd_begin, params.initrd_end));
+
+	/* Load all VMs. */
+	if (!load_primary(mm_stage1_locked, &cpio, params.kernel_arg,
+			  &primary_initrd, &ppool)) {
+		panic("unable to load primary VM");
+	}
+
+	/*
+	 * load_secondary will add regions assigned to the secondary VMs from
+	 * mem_ranges to reserved_ranges.
+	 */
+	update.initrd_begin = pa_from_va(va_from_ptr(primary_initrd.next));
+	update.initrd_end = pa_from_va(va_from_ptr(primary_initrd.limit));
+	update.reserved_ranges_count = 0;
+	if (!load_secondary(mm_stage1_locked, &manifest, &cpio, &params,
+			    &update, &ppool)) {
+		panic("unable to load secondary VMs");
+	}
+
+	/* Prepare to run by updating bootparams as seen by primary VM. */
+	if (!boot_params_patch_fdt(mm_stage1_locked, &update, &ppool)) {
+		panic("plat_update_boot_params failed");
+	}
+
+	mm_defrag(mm_stage1_locked, &ppool);
+	mm_unlock_stage1(&mm_stage1_locked);
+
+	/* Initialise the API page pool. ppool will be empty from now on. */
+	api_init(&ppool);
+
+	/* Enable TLB invalidation for VM page table updates. */
+	mm_vm_enable_invalidation();
+
+	dlog("Hafnium initialisation completed\n");
+}
diff --git a/src/main.c b/src/main.c
index 9b81be5..7b8df60 100644
--- a/src/main.c
+++ b/src/main.c
@@ -14,128 +14,9 @@
  * limitations under the License.
  */
 
-#include <stdalign.h>
-#include <stddef.h>
-
-#include "hf/arch/init.h"
-
-#include "hf/api.h"
-#include "hf/boot_flow.h"
-#include "hf/boot_params.h"
-#include "hf/cpio.h"
 #include "hf/cpu.h"
-#include "hf/dlog.h"
-#include "hf/load.h"
-#include "hf/mm.h"
-#include "hf/mpool.h"
-#include "hf/panic.h"
-#include "hf/plat/console.h"
-#include "hf/std.h"
 #include "hf/vm.h"
 
-#include "vmapi/hf/call.h"
-
-alignas(alignof(
-	struct mm_page_table)) char ptable_buf[sizeof(struct mm_page_table) *
-					       HEAP_PAGES];
-
-/**
- * Performs one-time initialisation of the hypervisor.
- */
-static void one_time_init(void)
-{
-	struct manifest manifest;
-	struct boot_params params;
-	struct boot_params_update update;
-	struct memiter primary_initrd;
-	struct memiter cpio;
-	void *initrd;
-	size_t i;
-	struct mpool ppool;
-	struct mm_stage1_locked mm_stage1_locked;
-
-	/* Make sure the console is initialised before calling dlog. */
-	plat_console_init();
-
-	dlog("Initialising hafnium\n");
-
-	arch_one_time_init();
-
-	mpool_init(&ppool, sizeof(struct mm_page_table));
-	mpool_add_chunk(&ppool, ptable_buf, sizeof(ptable_buf));
-
-	if (!mm_init(&ppool)) {
-		panic("mm_init failed");
-	}
-
-	mm_cpu_init();
-
-	/* Enable locks now that mm is initialised. */
-	dlog_enable_lock();
-	mpool_enable_locks();
-
-	mm_stage1_locked = mm_lock_stage1();
-
-	if (!boot_flow_init(mm_stage1_locked, &manifest, &params, &ppool)) {
-		panic("Could not parse data from FDT.");
-	}
-
-	cpu_module_init(params.cpu_ids, params.cpu_count);
-
-	for (i = 0; i < params.mem_ranges_count; ++i) {
-		dlog("Memory range:  %#x - %#x\n",
-		     pa_addr(params.mem_ranges[i].begin),
-		     pa_addr(params.mem_ranges[i].end) - 1);
-	}
-
-	dlog("Ramdisk range: %#x - %#x\n", pa_addr(params.initrd_begin),
-	     pa_addr(params.initrd_end) - 1);
-
-	/* Map initrd in, and initialise cpio parser. */
-	initrd = mm_identity_map(mm_stage1_locked, params.initrd_begin,
-				 params.initrd_end, MM_MODE_R, &ppool);
-	if (!initrd) {
-		panic("unable to map initrd in");
-	}
-
-	memiter_init(&cpio, initrd,
-		     pa_difference(params.initrd_begin, params.initrd_end));
-
-	/* Load all VMs. */
-	if (!load_primary(mm_stage1_locked, &cpio, params.kernel_arg,
-			  &primary_initrd, &ppool)) {
-		panic("unable to load primary VM");
-	}
-
-	/*
-	 * load_secondary will add regions assigned to the secondary VMs from
-	 * mem_ranges to reserved_ranges.
-	 */
-	update.initrd_begin = pa_from_va(va_from_ptr(primary_initrd.next));
-	update.initrd_end = pa_from_va(va_from_ptr(primary_initrd.limit));
-	update.reserved_ranges_count = 0;
-	if (!load_secondary(mm_stage1_locked, &manifest, &cpio, &params,
-			    &update, &ppool)) {
-		panic("unable to load secondary VMs");
-	}
-
-	/* Prepare to run by updating bootparams as seen by primary VM. */
-	if (!boot_params_patch_fdt(mm_stage1_locked, &update, &ppool)) {
-		panic("plat_update_boot_params failed");
-	}
-
-	mm_defrag(mm_stage1_locked, &ppool);
-	mm_unlock_stage1(&mm_stage1_locked);
-
-	/* Initialise the API page pool. ppool will be empty from now on. */
-	api_init(&ppool);
-
-	/* Enable TLB invalidation for VM page table updates. */
-	mm_vm_enable_invalidation();
-
-	dlog("Hafnium initialisation completed\n");
-}
-
 /**
  * The entry point of CPUs when they are turned on. It is supposed to initialise
  * all state and return the first vCPU to run.
@@ -145,19 +26,6 @@
 	struct vcpu *vcpu;
 	struct vm *vm;
 
-	/*
-	 * Do global one-time initialisation just once. We avoid using atomics
-	 * by only touching the variable from cpu 0.
-	 */
-	static volatile bool inited = false;
-
-	if (cpu_index(c) == 0 && !inited) {
-		inited = true;
-		one_time_init();
-	} else {
-		mm_cpu_init();
-	}
-
 	vcpu = vm_get_vcpu(vm_find(HF_PRIMARY_VM_ID), cpu_index(c));
 	vm = vcpu->vm;
 	vcpu->cpu = c;
diff --git a/src/mm.c b/src/mm.c
index 886779f..b3d06e2 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -950,10 +950,5 @@
 	mm_identity_map(stage1_locked, layout_data_begin(), layout_data_end(),
 			MM_MODE_R | MM_MODE_W, ppool);
 
-	return arch_mm_init();
-}
-
-void mm_cpu_init(void)
-{
-	arch_mm_enable(ptable.root);
+	return arch_mm_init(ptable.root);
 }