Separate mm initialization from enablement.

The configuration is static so it doesn't matter where it is originally
calculated. There were previously multiple paths for enablement, one
being implicit in initialization, so this splits them apart.

Bug: 139269163
Change-Id: Ic82c351b6e84b7d46e79f5886b39d07e53e6fde6
diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h
index d43f4b9..7156e4d 100644
--- a/inc/hf/arch/mm.h
+++ b/inc/hf/arch/mm.h
@@ -158,6 +158,11 @@
 int arch_mm_stage2_attrs_to_mode(uint64_t attrs);
 
 /**
- * Initializes the arch specific memory management state.
+ * Initializes the arch specific memory management.
  */
-bool arch_mm_init(paddr_t table, bool first);
+bool arch_mm_init(void);
+
+/**
+ * Enables the current CPU with arch specific memory management state.
+ */
+void arch_mm_enable(paddr_t table);
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 8b2fb8f..9d7a433 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -123,4 +123,4 @@
 void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
 
 bool mm_init(struct mpool *ppool);
-bool mm_cpu_init(void);
+void mm_cpu_init(void);
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 471c323..b9f7270 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -116,6 +116,11 @@
 static uint8_t mm_s2_max_level;
 static uint8_t mm_s2_root_table_count;
 
+static uintreg_t mm_vtcr_el2;
+static uintreg_t mm_mair_el2;
+static uintreg_t mm_tcr_el2;
+static uintreg_t mm_sctlr_el2;
+
 /**
  * Returns the encoding of a page table entry that isn't present.
  */
@@ -518,11 +523,10 @@
 	return mm_s2_root_table_count;
 }
 
-bool arch_mm_init(paddr_t table, bool first)
+bool arch_mm_init(void)
 {
 	static const int pa_bits_table[16] = {32, 36, 40, 42, 44, 48};
 	uint64_t features = read_msr(id_aa64mmfr0_el1);
-	uint64_t v;
 	int pa_bits = pa_bits_table[features & 0xf];
 	int extend_bits;
 	int sl0;
@@ -540,9 +544,7 @@
 		return false;
 	}
 
-	if (first) {
-		dlog("Supported bits in physical address: %d\n", pa_bits);
-	}
+	dlog("Supported bits in physical address: %d\n", pa_bits);
 
 	/*
 	 * Determine sl0, starting level of the page table, based on the number
@@ -576,67 +578,71 @@
 	}
 	mm_s2_root_table_count = 1 << extend_bits;
 
-	if (first) {
-		dlog("Stage 2 has %d page table levels with %d pages at the "
-		     "root.\n",
-		     mm_s2_max_level + 1, mm_s2_root_table_count);
-	}
+	dlog("Stage 2 has %d page table levels with %d pages at the root.\n",
+	     mm_s2_max_level + 1, mm_s2_root_table_count);
 
-	v = (1u << 31) |	       /* RES1. */
-	    ((features & 0xf) << 16) | /* PS, matching features. */
-	    (0 << 14) |		       /* TG0: 4 KB granule. */
-	    (3 << 12) |		       /* SH0: inner shareable. */
-	    (1 << 10) |		       /* ORGN0: normal, cacheable ... */
-	    (1 << 8) |		       /* IRGN0: normal, cacheable ... */
-	    (sl0 << 6) |	       /* SL0. */
-	    ((64 - pa_bits) << 0);     /* T0SZ: dependent on PS. */
-	write_msr(vtcr_el2, v);
+	mm_vtcr_el2 = (1u << 31) |		 /* RES1. */
+		      ((features & 0xf) << 16) | /* PS, matching features. */
+		      (0 << 14) |		 /* TG0: 4 KB granule. */
+		      (3 << 12) |		 /* SH0: inner shareable. */
+		      (1 << 10) |	     /* ORGN0: normal, cacheable ... */
+		      (1 << 8) |	      /* IRGN0: normal, cacheable ... */
+		      (sl0 << 6) |	    /* SL0. */
+		      ((64 - pa_bits) << 0) | /* T0SZ: dependent on PS. */
+		      0;
 
 	/*
 	 * 0    -> Device-nGnRnE memory
 	 * 0xff -> Normal memory, Inner/Outer Write-Back Non-transient,
 	 *         Write-Alloc, Read-Alloc.
 	 */
-	write_msr(mair_el2, (0 << (8 * STAGE1_DEVICEINDX)) |
-				    (0xff << (8 * STAGE1_NORMALINDX)));
-
-	write_msr(ttbr0_el2, pa_addr(table));
+	mm_mair_el2 = (0 << (8 * STAGE1_DEVICEINDX)) |
+		      (0xff << (8 * STAGE1_NORMALINDX));
 
 	/*
 	 * Configure tcr_el2.
 	 */
-	v = (1 << 20) |		       /* TBI, top byte ignored. */
-	    ((features & 0xf) << 16) | /* PS. */
-	    (0 << 14) |		       /* TG0, granule size, 4KB. */
-	    (3 << 12) |		       /* SH0, inner shareable. */
-	    (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */
-	    (1 << 8) |  /* IRGN0, normal mem, WB RA WA Cacheable. */
-	    (25 << 0) | /* T0SZ, input address is 2^39 bytes. */
-	    0;
-	write_msr(tcr_el2, v);
+	mm_tcr_el2 = (1 << 20) |		/* TBI, top byte ignored. */
+		     ((features & 0xf) << 16) | /* PS. */
+		     (0 << 14) |		/* TG0, granule size, 4KB. */
+		     (3 << 12) |		/* SH0, inner shareable. */
+		     (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */
+		     (1 << 8) |  /* IRGN0, normal mem, WB RA WA Cacheable. */
+		     (25 << 0) | /* T0SZ, input address is 2^39 bytes. */
+		     0;
 
-	v = (1 << 0) |  /* M, enable stage 1 EL2 MMU. */
-	    (1 << 1) |  /* A, enable alignment check faults. */
-	    (1 << 2) |  /* C, data cache enable. */
-	    (1 << 3) |  /* SA, enable stack alignment check. */
-	    (3 << 4) |  /* RES1 bits. */
-	    (1 << 11) | /* RES1 bit. */
-	    (1 << 12) | /* I, instruction cache enable. */
-	    (1 << 16) | /* RES1 bit. */
-	    (1 << 18) | /* RES1 bit. */
-	    (1 << 19) | /* WXN bit, writable execute never. */
-	    (3 << 22) | /* RES1 bits. */
-	    (3 << 28) | /* RES1 bits. */
-	    0;
-
-	dsb(sy);
-	isb();
-	write_msr(sctlr_el2, v);
-	isb();
+	mm_sctlr_el2 = (1 << 0) |  /* M, enable stage 1 EL2 MMU. */
+		       (1 << 1) |  /* A, enable alignment check faults. */
+		       (1 << 2) |  /* C, data cache enable. */
+		       (1 << 3) |  /* SA, enable stack alignment check. */
+		       (3 << 4) |  /* RES1 bits. */
+		       (1 << 11) | /* RES1 bit. */
+		       (1 << 12) | /* I, instruction cache enable. */
+		       (1 << 16) | /* RES1 bit. */
+		       (1 << 18) | /* RES1 bit. */
+		       (1 << 19) | /* WXN bit, writable execute never. */
+		       (3 << 22) | /* RES1 bits. */
+		       (3 << 28) | /* RES1 bits. */
+		       0;
 
 	return true;
 }
 
+void arch_mm_enable(paddr_t table)
+{
+	/* Configure translation management registers. */
+	write_msr(ttbr0_el2, pa_addr(table));
+	write_msr(vtcr_el2, mm_vtcr_el2);
+	write_msr(mair_el2, mm_mair_el2);
+	write_msr(tcr_el2, mm_tcr_el2);
+
+	/* Configure sctlr_el2 to enable MMU and cache. */
+	dsb(sy);
+	isb();
+	write_msr(sctlr_el2, mm_sctlr_el2);
+	isb();
+}
+
 /**
  * Given the attrs from a table at some level and the attrs from all the blocks
  * in that table, returns equivalent attrs to use for a block which will replace
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index e409f97..4cda7cf 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -165,10 +165,14 @@
 	return attrs >> PTE_ATTR_MODE_SHIFT;
 }
 
-bool arch_mm_init(paddr_t table, bool first)
+bool arch_mm_init(void)
 {
 	/* No initialization required. */
-	(void)table;
-	(void)first;
 	return true;
 }
+
+void arch_mm_enable(paddr_t table)
+{
+	/* There's no modelling of the MMU. */
+	(void)table;
+}
diff --git a/src/main.c b/src/main.c
index fc6cc55..3ccf6d6 100644
--- a/src/main.c
+++ b/src/main.c
@@ -66,6 +66,8 @@
 		panic("mm_init failed");
 	}
 
+	mm_cpu_init();
+
 	/* Enable locks now that mm is initialised. */
 	dlog_enable_lock();
 	mpool_enable_locks();
@@ -150,10 +152,8 @@
 	if (cpu_index(c) == 0 && !inited) {
 		inited = true;
 		one_time_init();
-	}
-
-	if (!mm_cpu_init()) {
-		panic("mm_cpu_init failed");
+	} else {
+		mm_cpu_init();
 	}
 
 	vcpu = vm_get_vcpu(vm_find(HF_PRIMARY_VM_ID), cpu_index(c));
diff --git a/src/mm.c b/src/mm.c
index 995e77d..886779f 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -950,10 +950,10 @@
 	mm_identity_map(stage1_locked, layout_data_begin(), layout_data_end(),
 			MM_MODE_R | MM_MODE_W, ppool);
 
-	return arch_mm_init(ptable.root, true);
+	return arch_mm_init();
 }
 
-bool mm_cpu_init(void)
+void mm_cpu_init(void)
 {
-	return arch_mm_init(ptable.root, false);
+	arch_mm_enable(ptable.root);
 }