Remove MM_MODE_NOINVALIDATE.

This flag exists to avoid paying the cost required for consistency when
consistency is not required. It was used for stage-2 allocation when the
VMs are initially being created i.e. there is a point before which all
stage-2 updates don't invalidate and after which they do. Given this, it
can move to the same approach used by the locks for dlog and mpool
allowing the mode clutter to be removed from where is doesn't belong.

Change-Id: I9be9e42c1daaf8570d7555bb3f37876aa7c3bd51
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index d7a1c4c..ea71f9a 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -71,12 +71,6 @@
 #define MM_MODE_UNOWNED 0x0020
 #define MM_MODE_SHARED  0x0040
 
-/**
- * This flag indicates that no TLB invalidations should be issued for the
- * changes in the page table.
- */
-#define MM_MODE_NOINVALIDATE 0x0080
-
 /* clang-format on */
 
 struct mm_page_table {
@@ -92,13 +86,15 @@
 	paddr_t root;
 };
 
+void mm_vm_enable_invalidation(void);
+
 bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
 void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
 			int mode, ipaddr_t *ipa, struct mpool *ppool);
-bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode,
+bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
 		 struct mpool *ppool);
-bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode, struct mpool *ppool);
+bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool);
 void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool);
 void mm_vm_dump(struct mm_ptable *t);
 bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
@@ -108,5 +104,5 @@
 bool mm_cpu_init(void);
 void *mm_identity_map(paddr_t begin, paddr_t end, int mode,
 		      struct mpool *ppool);
-bool mm_unmap(paddr_t begin, paddr_t end, int mode, struct mpool *ppool);
+bool mm_unmap(paddr_t begin, paddr_t end, struct mpool *ppool);
 void mm_defrag(struct mpool *ppool);
diff --git a/src/api.c b/src/api.c
index fa26e1d..e1d36ae 100644
--- a/src/api.c
+++ b/src/api.c
@@ -380,7 +380,7 @@
 	 */
 fail_undo_all:
 	vm->mailbox.send = NULL;
-	mm_unmap(pa_send_begin, pa_send_end, 0, &local_page_pool);
+	mm_unmap(pa_send_begin, pa_send_end, &local_page_pool);
 
 fail_undo_send_and_recv:
 	mm_vm_identity_map(&vm->ptable, pa_recv_begin, pa_recv_end,
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index ffe52f2..1e45a1c 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -144,15 +144,13 @@
 
 uint64_t arch_mm_mode_to_stage1_attrs(int mode)
 {
-	mode &= ~MM_MODE_NOINVALIDATE;
-
 	return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
 }
 
 uint64_t arch_mm_mode_to_stage2_attrs(int mode)
 {
 	/* Stage-2 ignores the device mode. */
-	mode &= ~MM_MODE_NOINVALIDATE & ~MM_MODE_D;
+	mode &= ~MM_MODE_D;
 
 	return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
 }
diff --git a/src/fdt_handler.c b/src/fdt_handler.c
index d0400a8..4b0adf9 100644
--- a/src/fdt_handler.c
+++ b/src/fdt_handler.c
@@ -222,7 +222,7 @@
 	return fdt;
 
 fail:
-	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), 0, ppool);
+	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), ppool);
 	return NULL;
 }
 
@@ -230,8 +230,7 @@
 {
 	paddr_t fdt_addr = pa_from_va(va_from_ptr(fdt));
 
-	return mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_total_size(fdt)), 0,
-			ppool);
+	return mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_total_size(fdt)), ppool);
 }
 
 bool fdt_patch(paddr_t fdt_addr, struct boot_params_update *p,
@@ -303,7 +302,7 @@
 out_unmap_fdt:
 	/* Unmap FDT. */
 	if (!mm_unmap(fdt_addr,
-		      pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE), 0,
+		      pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE),
 		      ppool)) {
 		dlog("Unable to unmap writable FDT.\n");
 		return false;
@@ -311,6 +310,6 @@
 	return ret;
 
 err_unmap_fdt_header:
-	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), 0, ppool);
+	mm_unmap(fdt_addr, pa_add(fdt_addr, fdt_header_size()), ppool);
 	return false;
 }
diff --git a/src/load.c b/src/load.c
index 38159a0..a9c0e5d 100644
--- a/src/load.c
+++ b/src/load.c
@@ -52,7 +52,7 @@
 	memcpy(ptr, from, size);
 	arch_mm_write_back_dcache(ptr, size);
 
-	mm_unmap(to, to_end, 0, ppool);
+	mm_unmap(to, to_end, ppool);
 
 	return true;
 }
@@ -147,15 +147,12 @@
 		if (!mm_vm_identity_map(
 			    &vm->ptable, pa_init(0),
 			    pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
-			    MM_MODE_R | MM_MODE_W | MM_MODE_X |
-				    MM_MODE_NOINVALIDATE,
-			    NULL, ppool)) {
+			    MM_MODE_R | MM_MODE_W | MM_MODE_X, NULL, ppool)) {
 			dlog("Unable to initialise memory for primary vm\n");
 			return false;
 		}
 
-		if (!mm_vm_unmap_hypervisor(&vm->ptable, MM_MODE_NOINVALIDATE,
-					    ppool)) {
+		if (!mm_vm_unmap_hypervisor(&vm->ptable, ppool)) {
 			dlog("Unable to unmap hypervisor from primary vm\n");
 			return false;
 		}
@@ -331,14 +328,12 @@
 		/* Grant VM access to uart. */
 		mm_vm_identity_map(&vm->ptable, pa_init(PL011_BASE),
 				   pa_add(pa_init(PL011_BASE), PAGE_SIZE),
-				   MM_MODE_R | MM_MODE_W | MM_MODE_NOINVALIDATE,
-				   NULL, ppool);
+				   MM_MODE_R | MM_MODE_W, NULL, ppool);
 
 		/* Grant the VM access to the memory. */
 		if (!mm_vm_identity_map(&vm->ptable, secondary_mem_begin,
 					secondary_mem_end,
-					MM_MODE_R | MM_MODE_W | MM_MODE_X |
-						MM_MODE_NOINVALIDATE,
+					MM_MODE_R | MM_MODE_W | MM_MODE_X,
 					&secondary_entry, ppool)) {
 			dlog("Unable to initialise memory\n");
 			continue;
@@ -346,8 +341,7 @@
 
 		/* Deny the primary VM access to this memory. */
 		if (!mm_vm_unmap(&primary->ptable, secondary_mem_begin,
-				 secondary_mem_end, MM_MODE_NOINVALIDATE,
-				 ppool)) {
+				 secondary_mem_end, ppool)) {
 			dlog("Unable to unmap secondary VM from primary VM\n");
 			return false;
 		}
diff --git a/src/main.c b/src/main.c
index e1555cc..a88e67a 100644
--- a/src/main.c
+++ b/src/main.c
@@ -135,6 +135,9 @@
 	/* Initialise the API page pool. ppool will be empty from now on. */
 	api_init(&ppool);
 
+	/* Enable TLB invalidation for VM page table updates. */
+	mm_vm_enable_invalidation();
+
 	dlog("Hafnium initialisation completed\n");
 }
 
diff --git a/src/mm.c b/src/mm.c
index 28f5428..e728aac 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -49,13 +49,23 @@
 
 #define MM_FLAG_COMMIT       0x01
 #define MM_FLAG_UNMAP        0x02
-#define MM_FLAG_NOINVALIDATE 0x04
-#define MM_FLAG_STAGE1       0x08
+#define MM_FLAG_STAGE1       0x04
 
 /* clang-format on */
 
 static struct mm_ptable ptable;
 
+static bool mm_stage2_invalidate = false;
+
+/**
+ * After calling this function, modifications to stage-2 page tables will use
+ * break-before-make and invalidate the TLB for the affected range.
+ */
+void mm_vm_enable_invalidation(void)
+{
+	mm_stage2_invalidate = true;
+}
+
 /**
  * Get the page table from the physical address.
  */
@@ -264,7 +274,8 @@
 	 * We need to do the break-before-make sequence if both values are
 	 * present, and if it hasn't been inhibited by the NOBBM flag.
 	 */
-	if (!(flags & MM_FLAG_NOINVALIDATE) && arch_mm_pte_is_valid(v, level) &&
+	if (((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) &&
+	    arch_mm_pte_is_valid(v, level) &&
 	    arch_mm_pte_is_valid(new_pte, level)) {
 		*pte = arch_mm_absent_pte(level);
 		mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags);
@@ -509,7 +520,7 @@
 	}
 
 	/* Invalidate the tlb. */
-	if (!(flags & MM_FLAG_NOINVALIDATE)) {
+	if ((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) {
 		mm_invalidate_tlb(begin, end, flags);
 	}
 
@@ -778,7 +789,7 @@
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
 			int mode, ipaddr_t *ipa, struct mpool *ppool)
 {
-	int flags = (mode & MM_MODE_NOINVALIDATE ? MM_FLAG_NOINVALIDATE : 0);
+	int flags = 0;
 	bool success = mm_ptable_identity_update(
 		t, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
 		ppool);
@@ -794,30 +805,26 @@
  * Updates the VM's table such that the given physical address range has no
  * connection to the VM.
  */
-bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode,
+bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
 		 struct mpool *ppool)
 {
-	int flags = (mode & MM_MODE_NOINVALIDATE ? MM_FLAG_NOINVALIDATE : 0) |
-		    MM_FLAG_UNMAP;
 	return mm_ptable_identity_update(
 		t, begin, end,
 		arch_mm_mode_to_stage2_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
 					     MM_MODE_SHARED),
-		flags, ppool);
+		MM_FLAG_UNMAP, ppool);
 }
 
 /**
  * Unmaps the hypervisor pages from the given page table.
  */
-bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode, struct mpool *ppool)
+bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool)
 {
 	/* TODO: If we add pages dynamically, they must be included here too. */
-	return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), mode,
+	return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), ppool) &&
+	       mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(),
 			   ppool) &&
-	       mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(), mode,
-			   ppool) &&
-	       mm_vm_unmap(t, layout_data_begin(), layout_data_end(), mode,
-			   ppool);
+	       mm_vm_unmap(t, layout_data_begin(), layout_data_end(), ppool);
 }
 
 /**
@@ -863,12 +870,9 @@
  */
 void *mm_identity_map(paddr_t begin, paddr_t end, int mode, struct mpool *ppool)
 {
-	int flags = (mode & MM_MODE_NOINVALIDATE ? MM_FLAG_NOINVALIDATE : 0) |
-		    MM_FLAG_STAGE1;
-
 	if (mm_ptable_identity_update(&ptable, begin, end,
-				      arch_mm_mode_to_stage1_attrs(mode), flags,
-				      ppool)) {
+				      arch_mm_mode_to_stage1_attrs(mode),
+				      MM_FLAG_STAGE1, ppool)) {
 		return ptr_from_va(va_from_pa(begin));
 	}
 
@@ -879,15 +883,13 @@
  * Updates the hypervisor table such that the given physical address range is
  * not mapped in the address space.
  */
-bool mm_unmap(paddr_t begin, paddr_t end, int mode, struct mpool *ppool)
+bool mm_unmap(paddr_t begin, paddr_t end, struct mpool *ppool)
 {
-	int flags = (mode & MM_MODE_NOINVALIDATE ? MM_FLAG_NOINVALIDATE : 0) |
-		    MM_FLAG_STAGE1 | MM_FLAG_UNMAP;
 	return mm_ptable_identity_update(
 		&ptable, begin, end,
 		arch_mm_mode_to_stage1_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
 					     MM_MODE_SHARED),
-		flags, ppool);
+		MM_FLAG_STAGE1 | MM_FLAG_UNMAP, ppool);
 }
 
 /**
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 0f855e5..9350755 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -470,7 +470,7 @@
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
 				       nullptr, &ppool));
 	EXPECT_THAT(get_ptable(ptable),
@@ -489,10 +489,9 @@
  */
 TEST_F(mm, vm_unmap_hypervisor_not_mapped)
 {
-	constexpr int mode = 0;
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
-	EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode, &ppool));
+	EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -504,11 +503,10 @@
  */
 TEST_F(mm, unmap_not_mapped)
 {
-	constexpr int mode = 0;
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
-	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode,
-				&ppool));
+	EXPECT_TRUE(
+		mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -531,7 +529,7 @@
 				       &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
 				       &ppool));
-	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode, &ppool));
+	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -551,7 +549,7 @@
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
-				pa_add(map_begin, 99), mode, &ppool));
+				pa_add(map_begin, 99), &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -570,7 +568,7 @@
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
 				       nullptr, &ppool));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -588,7 +586,7 @@
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
-				mode, &ppool));
+				&ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
@@ -608,7 +606,7 @@
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
-				mode, &ppool));
+				&ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
@@ -633,7 +631,7 @@
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
 				       nullptr, &ppool));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
-				pa_add(page_begin, 50), mode, &ppool));
+				pa_add(page_begin, 50), &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -654,10 +652,9 @@
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
-	ASSERT_TRUE(
-		mm_vm_unmap(&ptable, pa_init(0),
-			    pa_init(std::numeric_limits<uintpaddr_t>::max()),
-			    mode, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(
+		&ptable, pa_init(0),
+		pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
@@ -681,8 +678,8 @@
 				       &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
 				       &ppool));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
 	EXPECT_THAT(
 		get_ptable(ptable),
 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -869,8 +866,8 @@
 				       &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
 				       &ppool));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
 	mm_vm_defrag(&ptable, &ppool);
 	EXPECT_THAT(
 		get_ptable(ptable),
@@ -892,7 +889,7 @@
 	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
-	ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode, &ppool));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr,
 				       &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr,