Expose preparation and commital update phases.

Some higher level operations such as multi-region atomic updates can
make use of these. Care must be taken to use them correctly so it is
recommended to continue using the previous single call API when
possible.

Change-Id: I05fe433a62a538f38b3274e9418deeb01a053dc7
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index ecd205f..69fa78a 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -109,6 +109,10 @@
 void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
 			uint32_t mode, ipaddr_t *ipa, struct mpool *ppool);
+bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
+			    uint32_t mode, struct mpool *ppool);
+void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
+			   uint32_t mode, ipaddr_t *ipa, struct mpool *ppool);
 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
 		 struct mpool *ppool);
 bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool);
diff --git a/src/mm.c b/src/mm.c
index fbc91b4..30f4ca7 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -489,33 +489,67 @@
 	return true;
 }
 
+/*
+ * Prepares the given page table for the given address mapping such that it
+ * will be able to commit the change without failure. It does so by ensuring
+ * the smallest granularity needed is available. This remains valid provided
+ * subsequent operations no not decrease the granularity.
+ *
+ * In particular, multiple calls to this function will result in the
+ * corresponding calls to commit the changes to succeed.
+ */
+static bool mm_ptable_identity_prepare(struct mm_ptable *t, paddr_t pa_begin,
+				       paddr_t pa_end, uint64_t attrs,
+				       int flags, struct mpool *ppool)
+{
+	flags &= ~MM_FLAG_COMMIT;
+	return mm_ptable_identity_map(t, pa_begin, pa_end, attrs, flags, ppool);
+}
+
+/**
+ * Commits the given address mapping to the page table assuming the operation
+ * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to
+ * ensure this condition.
+ *
+ * Without the table being properly prepared, the commit may only partially
+ * complete if it runs out of memory resulting in an inconsistent state that
+ * isn't handled.
+ *
+ * Since the non-failure assumtion is used in the reasoning about the atomicity
+ * of higher level memory operations, any detected violations result in a panic.
+ *
+ * TODO: remove ppool argument to be sure no changes are made.
+ */
+static void mm_ptable_identity_commit(struct mm_ptable *t, paddr_t pa_begin,
+				      paddr_t pa_end, uint64_t attrs, int flags,
+				      struct mpool *ppool)
+{
+	CHECK(mm_ptable_identity_map(t, pa_begin, pa_end, attrs,
+				     flags | MM_FLAG_COMMIT, ppool));
+}
+
 /**
  * Updates the given table such that the given physical address range is mapped
  * or not mapped into the address space with the architecture-agnostic mode
- * provided. Tries first without committing, and then commits if that is
- * successful.
+ * provided.
+ *
+ * The page table is updated using the separate prepare and commit stages so
+ * that, on failure, a partial update of the address space cannot happen. The
+ * table may be left with extra internal tables but the address space is
+ * unchanged.
  */
 static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
 				      paddr_t pa_end, uint64_t attrs, int flags,
 				      struct mpool *ppool)
 {
-	bool success;
-
-	CHECK(!(flags & MM_FLAG_COMMIT));
-
-	/*
-	 * Do it in two steps to prevent leaving the table in a halfway updated
-	 * state. In such a two-step implementation, the table may be left with
-	 * extra internal tables, but no different mapping on failure.
-	 */
-	success = mm_ptable_identity_map(t, pa_begin, pa_end, attrs, flags,
-					 ppool);
-	if (success) {
-		CHECK(mm_ptable_identity_map(t, pa_begin, pa_end, attrs,
-					     flags | MM_FLAG_COMMIT, ppool));
+	if (!mm_ptable_identity_prepare(t, pa_begin, pa_end, attrs, flags,
+					ppool)) {
+		return false;
 	}
 
-	return success;
+	mm_ptable_identity_commit(t, pa_begin, pa_end, attrs, flags, ppool);
+
+	return true;
 }
 
 /**
@@ -814,6 +848,40 @@
 }
 
 /**
+ * See `mm_ptable_identity_prepare`.
+ *
+ * This must be called before `mm_vm_identity_commit` for the same mapping.
+ */
+bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
+			    uint32_t mode, struct mpool *ppool)
+{
+	int flags = mm_mode_to_flags(mode);
+
+	return mm_ptable_identity_prepare(t, begin, end,
+					  arch_mm_mode_to_stage2_attrs(mode),
+					  flags, ppool);
+}
+
+/**
+ * See `mm_ptable_identity_commit`.
+ *
+ * `mm_vm_identity_prepare` must be called before this for the same mapping.
+ */
+void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
+			   uint32_t mode, ipaddr_t *ipa, struct mpool *ppool)
+{
+	int flags = mm_mode_to_flags(mode);
+
+	mm_ptable_identity_commit(t, begin, end,
+				  arch_mm_mode_to_stage2_attrs(mode), flags,
+				  ppool);
+
+	if (ipa != NULL) {
+		*ipa = ipa_from_pa(begin);
+	}
+}
+
+/**
  * Updates a VM's page table such that the given physical address range is
  * mapped in the address space at the corresponding address range in the
  * architecture-agnostic mode provided.
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 05723b2..5989dfc 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -510,6 +510,193 @@
 	mm_vm_fini(&ptable, &ppool);
 }
 
+/*
+ * Preparing and committing an address range works the same as mapping it.
+ */
+TEST_F(mm, prepare_and_commit_first_page)
+{
+	constexpr uint32_t mode = 0;
+	const paddr_t page_begin = pa_init(0);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+	ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
+					   &ppool));
+	mm_vm_identity_commit(&ptable, page_begin, page_end, mode, nullptr,
+			      &ppool);
+
+	auto tables = get_ptable(ptable);
+	EXPECT_THAT(tables, SizeIs(4));
+	ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+	/* Check that the first page is mapped and nothing else. */
+	EXPECT_THAT(std::span(tables).last(3),
+		    Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+
+	auto table_l2 = tables.front();
+	EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
+	ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
+
+	auto table_l1 =
+		get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
+	EXPECT_THAT(table_l1.subspan(1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+	ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
+
+	auto table_l0 =
+		get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
+	EXPECT_THAT(table_l0.subspan(1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+	ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
+		    Eq(pa_addr(page_begin)));
+
+	mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Disjoint address ranges can be prepared and committed together.
+ */
+TEST_F(mm, prepare_and_commit_disjoint_regions)
+{
+	constexpr uint32_t mode = 0;
+	const paddr_t first_begin = pa_init(0);
+	const paddr_t first_end = pa_add(first_begin, PAGE_SIZE);
+	const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
+	const paddr_t last_end = VM_MEM_END;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+	ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end,
+					   mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
+					   &ppool));
+	mm_vm_identity_commit(&ptable, first_begin, first_end, mode, nullptr,
+			      &ppool);
+	mm_vm_identity_commit(&ptable, last_begin, last_end, mode, nullptr,
+			      &ppool);
+
+	auto tables = get_ptable(ptable);
+	EXPECT_THAT(tables, SizeIs(4));
+	ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+	/* Check that the first and last pages are mapped and nothing else. */
+	EXPECT_THAT(std::span(tables).subspan(1, 2),
+		    Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+
+	/* Check the first page. */
+	auto table0_l2 = tables.front();
+	EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
+	ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL));
+
+	auto table0_l1 =
+		get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL));
+	EXPECT_THAT(table0_l1.subspan(1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+	ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1));
+
+	auto table0_l0 =
+		get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1));
+	EXPECT_THAT(table0_l0.subspan(1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+	ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2));
+	EXPECT_THAT(
+		pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)),
+		Eq(pa_addr(first_begin)));
+
+	/* Check the last page. */
+	auto table3_l2 = tables.back();
+	EXPECT_THAT(table3_l2.first(table3_l2.size() - 1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL)));
+	ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL));
+
+	auto table3_l1 = get_table(
+		arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL));
+	EXPECT_THAT(table3_l1.first(table3_l1.size() - 1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+	ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1));
+
+	auto table3_l0 = get_table(
+		arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1));
+	EXPECT_THAT(table3_l0.first(table3_l0.size() - 1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+	ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0],
+						   TOP_LEVEL - 2)),
+		    Eq(pa_addr(last_begin)));
+
+	mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Overlapping address ranges can be prepared and committed together.
+ */
+TEST_F(mm, prepare_and_commit_overlapping_regions)
+{
+	constexpr uint32_t mode = 0;
+	const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
+	const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
+	const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+	ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode,
+					   &ppool));
+	ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
+					   &ppool));
+	mm_vm_identity_commit(&ptable, high_begin, map_end, mode, nullptr,
+			      &ppool);
+	mm_vm_identity_commit(&ptable, low_begin, map_end, mode, nullptr,
+			      &ppool);
+
+	auto tables = get_ptable(ptable);
+	EXPECT_THAT(tables, SizeIs(4));
+	EXPECT_THAT(std::span(tables).last(2),
+		    Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+	ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+	/* Check only the last page of the first table is mapped. */
+	auto table0_l2 = tables.front();
+	EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL)));
+	ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
+
+	auto table0_l1 = get_table(
+		arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
+	EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+	ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
+
+	auto table0_l0 = get_table(
+		arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
+	EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+	ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
+						   TOP_LEVEL - 2)),
+		    Eq(pa_addr(low_begin)));
+
+	/* Check only the first page of the second table is mapped. */
+	auto table1_l2 = tables[1];
+	EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
+	ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
+
+	auto table1_l1 =
+		get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
+	EXPECT_THAT(table1_l1.subspan(1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+	ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
+
+	auto table1_l0 =
+		get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
+	EXPECT_THAT(table1_l0.subspan(1),
+		    Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+	ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
+	EXPECT_THAT(
+		pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
+		Eq(pa_addr(high_begin)));
+
+	mm_vm_fini(&ptable, &ppool);
+}
+
 /**
  * If nothing is mapped, unmapping the hypervisor has no effect.
  */