Convert logically unmapped modes to unmapping.

If an address range is invalid and unowned, it holds no extra
information so can be replaced with an absent entry.

The mapping functions are sometimes used to unmap addresses, for example
during memory sharing. This change means the resulting state of the page
table is the same as though an unmapping function had been used.

This has the side effect of canonicalizing the state for an unmapped
entry.

Change-Id: I1ff35d1dffb9720d5a18c00cd223879fa91c4b24
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 613e6e5..ecd205f 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -60,8 +60,8 @@
  *  - !V  O  X : Owner of memory lent to a VM that has exclusive access.
  *
  *  - !V  O !X : Unused. Owner of shared memory always has access.
+ *  - !V !O  X : Unused. Next entry is used for invalid memory.
  *
- *  - !V !O  X : Invalid memory. Memory is unrelated to the VM.
  *  - !V !O !X : Invalid memory. Memory is unrelated to the VM.
  *
  *  Modes are selected so that owner of exclusive memory is the default.
@@ -70,6 +70,9 @@
 #define MM_MODE_UNOWNED UINT32_C(0x0020)
 #define MM_MODE_SHARED  UINT32_C(0x0040)
 
+/* The mask for a mode that is considered unmapped. */
+#define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED)
+
 #define MM_FLAG_COMMIT  0x01
 #define MM_FLAG_UNMAP   0x02
 #define MM_FLAG_STAGE1  0x04
diff --git a/src/mm.c b/src/mm.c
index 918359f..fbc91b4 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -801,6 +801,19 @@
 }
 
 /**
+ * Selects flags to pass to the page table manipulation operation based on the
+ * mapping mode.
+ */
+static int mm_mode_to_flags(uint32_t mode)
+{
+	if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) {
+		return MM_FLAG_UNMAP;
+	}
+
+	return 0;
+}
+
+/**
  * Updates a VM's page table such that the given physical address range is
  * mapped in the address space at the corresponding address range in the
  * architecture-agnostic mode provided.
@@ -808,7 +821,7 @@
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
 			uint32_t mode, ipaddr_t *ipa, struct mpool *ppool)
 {
-	int flags = 0;
+	int flags = mm_mode_to_flags(mode);
 	bool success = mm_ptable_identity_update(
 		t, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
 		ppool);
@@ -827,11 +840,9 @@
 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
 		 struct mpool *ppool)
 {
-	return mm_ptable_identity_update(
-		t, begin, end,
-		arch_mm_mode_to_stage2_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
-					     MM_MODE_SHARED),
-		MM_FLAG_UNMAP, ppool);
+	uint32_t mode = MM_MODE_UNMAPPED_MASK;
+
+	return mm_vm_identity_map(t, begin, end, mode, NULL, ppool);
 }
 
 /**
@@ -908,9 +919,11 @@
 void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
 		      paddr_t end, uint32_t mode, struct mpool *ppool)
 {
+	int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
+
 	if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
-				      arch_mm_mode_to_stage1_attrs(mode),
-				      MM_FLAG_STAGE1, ppool)) {
+				      arch_mm_mode_to_stage1_attrs(mode), flags,
+				      ppool)) {
 		return ptr_from_va(va_from_pa(begin));
 	}
 
@@ -924,11 +937,9 @@
 bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
 	      struct mpool *ppool)
 {
-	return mm_ptable_identity_update(
-		stage1_locked.ptable, begin, end,
-		arch_mm_mode_to_stage1_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
-					     MM_MODE_SHARED),
-		MM_FLAG_STAGE1 | MM_FLAG_UNMAP, ppool);
+	uint32_t mode = MM_MODE_UNMAPPED_MASK;
+
+	return mm_identity_map(stage1_locked, begin, end, mode, ppool);
 }
 
 /**
diff --git a/src/mm_test.cc b/src/mm_test.cc
index ae13df7..05723b2 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -486,6 +486,31 @@
 }
 
 /**
+ * Mapping with a mode that indicates unmapping results in the addresses being
+ * unmapped with absent entries.
+ */
+TEST_F(mm, map_to_unmap)
+{
+	constexpr uint32_t mode = 0;
+	const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
+	const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
+	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
+	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
+				       &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
+				       &ppool));
+	EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
+				       MM_MODE_UNMAPPED_MASK, nullptr, &ppool));
+	EXPECT_THAT(
+		get_ptable(ptable),
+		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+	mm_vm_fini(&ptable, &ppool);
+}
+
+/**
  * If nothing is mapped, unmapping the hypervisor has no effect.
  */
 TEST_F(mm, vm_unmap_hypervisor_not_mapped)