mm: function to get the mode of a range of memory.

This supersedes mm_vm_is_mapped() since that is checking that the range
was not mapped with MM_MODE_INVALID.

Change-Id: Ic3d5018a6d207d092a7f6bf853b015eba22945bb
diff --git a/inc/hf/addr.h b/inc/hf/addr.h
index c37601d..08c7e12 100644
--- a/inc/hf/addr.h
+++ b/inc/hf/addr.h
@@ -53,6 +53,14 @@
 }
 
 /**
+ * Advances a physical address.
+ */
+static inline paddr_t pa_add(paddr_t pa, size_t n)
+{
+	return pa_init(pa_addr(pa) + n);
+}
+
+/**
  * Initializes an intermeditate physical address.
  */
 static inline ipaddr_t ipa_init(uintvaddr_t v)
@@ -69,6 +77,14 @@
 }
 
 /**
+ * Advances an intermediate physical address.
+ */
+static inline ipaddr_t ipa_add(ipaddr_t ipa, size_t n)
+{
+	return ipa_init(ipa_addr(ipa) + n);
+}
+
+/**
  * Initializes a virtual address.
  */
 static inline vaddr_t va_init(uintvaddr_t v)
@@ -85,14 +101,6 @@
 }
 
 /**
- * Advances a physical address.
- */
-static inline paddr_t pa_add(paddr_t pa, size_t n)
-{
-	return pa_init(pa_addr(pa) + n);
-}
-
-/**
  * Casts a physical address to a virtual address.
  */
 static inline vaddr_t va_from_pa(paddr_t pa)
diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h
index fea5f65..d6c8211 100644
--- a/inc/hf/arch/mm.h
+++ b/inc/hf/arch/mm.h
@@ -134,6 +134,11 @@
 uint64_t arch_mm_mode_to_attrs(int mode);
 
 /**
+ * Converts the stage-2 block attributes back to the corresponding mode.
+ */
+int arch_mm_stage2_attrs_to_mode(uint64_t attrs);
+
+/**
  * Initializes the arch specific memory management state.
  */
 bool arch_mm_init(paddr_t table, bool first);
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 8567acd..509d79e 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -109,7 +109,8 @@
 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode,
 		 struct mpool *ppool);
 bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode, struct mpool *ppool);
-bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode);
+bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
+		    int *mode);
 
 bool mm_init(struct mpool *ppool);
 bool mm_cpu_init(void);
diff --git a/src/api.c b/src/api.c
index d8df3e4..a804a11 100644
--- a/src/api.c
+++ b/src/api.c
@@ -253,6 +253,15 @@
 }
 
 /**
+ * Check that the mode indicates memory that is valid, owned and exclusive.
+ */
+bool static api_mode_valid_owned_and_exclusive(int mode)
+{
+	return (mode & (MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED)) ==
+	       0;
+}
+
+/**
  * Configures the VM to send/receive data through the specified pages. The pages
  * must not be shared.
  */
@@ -264,6 +273,7 @@
 	paddr_t pa_send_end;
 	paddr_t pa_recv_begin;
 	paddr_t pa_recv_end;
+	int mode;
 	int64_t ret;
 
 	/* Fail if addresses are not page-aligned. */
@@ -280,14 +290,17 @@
 		goto exit;
 	}
 
-	/*
-	 * TODO: Once memory sharing is implemented, we need to make sure that
-	 * these pages aren't and won't be shared.
-	 */
+	/* Ensure the pages are valid, owned and exclusive to the VM. */
+	if (!mm_vm_get_mode(&vm->ptable, send, ipa_add(send, PAGE_SIZE),
+			    &mode) ||
+	    !api_mode_valid_owned_and_exclusive(mode)) {
+		ret = -1;
+		goto exit;
+	}
 
-	/* Ensure the pages are accessible from the VM. */
-	if (!mm_vm_is_mapped(&vm->ptable, send, 0) ||
-	    !mm_vm_is_mapped(&vm->ptable, recv, 0)) {
+	if (!mm_vm_get_mode(&vm->ptable, recv, ipa_add(recv, PAGE_SIZE),
+			    &mode) ||
+	    !api_mode_valid_owned_and_exclusive(mode)) {
 		ret = -1;
 		goto exit;
 	}
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 98eb251..5c5b281 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -64,6 +64,7 @@
 #define STAGE2_EXECUTE_EL0  UINT64_C(1)
 #define STAGE2_EXECUTE_NONE UINT64_C(2)
 #define STAGE2_EXECUTE_EL1  UINT64_C(3)
+#define STAGE2_EXECUTE_MASK UINT64_C(3)
 
 /* Table attributes only apply to stage 1 translations. */
 #define TABLE_NSTABLE  (UINT64_C(1) << 63)
@@ -301,6 +302,11 @@
 {
 	uint64_t attrs = 0;
 
+	/* Define the valid bit. */
+	if (!(mode & MM_MODE_INVALID)) {
+		attrs |= PTE_VALID;
+	}
+
 	if (mode & MM_MODE_STAGE1) {
 		attrs |= STAGE1_AF | STAGE1_SH(OUTER_SHAREABLE);
 
@@ -322,11 +328,6 @@
 		} else {
 			attrs |= STAGE1_ATTRINDX(STAGE1_NORMALINDX);
 		}
-
-		/* Define the valid bit. */
-		if (!(mode & MM_MODE_INVALID)) {
-			attrs |= PTE_VALID;
-		}
 	} else {
 		uint64_t access = 0;
 
@@ -372,16 +373,43 @@
 		if (!(mode & MM_MODE_SHARED)) {
 			attrs |= STAGE2_SW_EXCLUSIVE;
 		}
-
-		/* Define the validity bit. */
-		if (!(mode & MM_MODE_INVALID)) {
-			attrs |= PTE_VALID;
-		}
 	}
 
 	return attrs;
 }
 
+int arch_mm_stage2_attrs_to_mode(uint64_t attrs)
+{
+	int mode = 0;
+
+	if (attrs & STAGE2_S2AP(STAGE2_ACCESS_READ)) {
+		mode |= MM_MODE_R;
+	}
+
+	if (attrs & STAGE2_S2AP(STAGE2_ACCESS_WRITE)) {
+		mode |= MM_MODE_W;
+	}
+
+	if ((attrs & STAGE2_XN(STAGE2_EXECUTE_MASK)) ==
+	    STAGE2_XN(STAGE2_EXECUTE_ALL)) {
+		mode |= MM_MODE_X;
+	}
+
+	if (!(attrs & STAGE2_SW_OWNED)) {
+		mode |= MM_MODE_UNOWNED;
+	}
+
+	if (!(attrs & STAGE2_SW_EXCLUSIVE)) {
+		mode |= MM_MODE_SHARED;
+	}
+
+	if (!(attrs & PTE_VALID)) {
+		mode |= MM_MODE_INVALID;
+	}
+
+	return mode;
+}
+
 /**
  * Determines the maximum level supported by the given mode.
  */
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index f4d28c7..561aa4d 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -41,7 +41,7 @@
 
 pte_t arch_mm_absent_pte(uint8_t level)
 {
-	return ((uint64_t)(MM_MODE_INVALID | MM_MODE_UNOWNED)
+	return ((uint64_t)(MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED)
 		<< PTE_ATTR_MODE_SHIFT) >>
 	       PTE_LEVEL_SHIFT(level);
 }
@@ -103,8 +103,7 @@
 
 uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level)
 {
-	return ((pte << PTE_LEVEL_SHIFT(level)) & PTE_ATTR_MODE_MASK) >>
-	       PTE_ATTR_MODE_SHIFT;
+	return (pte << PTE_LEVEL_SHIFT(level)) & PTE_ATTR_MODE_MASK;
 }
 
 uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
@@ -138,6 +137,8 @@
 
 uint64_t arch_mm_mode_to_attrs(int mode)
 {
+	mode &= ~MM_MODE_NOINVALIDATE;
+
 	/* Stage-2 ignores the device mode. */
 	if (!(mode & MM_MODE_STAGE1)) {
 		mode &= ~MM_MODE_D;
@@ -146,6 +147,11 @@
 	return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
 }
 
+int arch_mm_stage2_attrs_to_mode(uint64_t attrs)
+{
+	return attrs >> PTE_ATTR_MODE_SHIFT;
+}
+
 bool arch_mm_init(paddr_t table, bool first)
 {
 	/* No initialization required. */
diff --git a/src/mm.c b/src/mm.c
index 55340a1..e46902a 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -625,56 +625,99 @@
 }
 
 /**
- * Determines if the given address is valid in the address space of the given
- * page table by recursively traversing all levels of the page table.
+ * Gets the attributes applied to the given range of stage-2 addresses at the
+ * given level.
+ *
+ * The `got_attrs` argument is initially passed as false until `attrs` contains
+ * attributes of the memory region at which point it is passed as true.
+ *
+ * The value returned in `attrs` is only valid if the function returns true.
+ *
+ * Returns true if the whole range has the same attributes and false otherwise.
  */
-static bool mm_is_mapped_recursive(struct mm_page_table *table,
-				   ptable_addr_t addr, uint8_t level)
+static bool mm_ptable_get_attrs_level(struct mm_page_table *table,
+				      ptable_addr_t begin, ptable_addr_t end,
+				      uint8_t level, bool got_attrs,
+				      uint64_t *attrs)
 {
-	pte_t pte;
-	ptable_addr_t va_level_end = mm_level_end(addr, level);
+	pte_t *pte = &table->entries[mm_index(begin, level)];
+	ptable_addr_t level_end = mm_level_end(begin, level);
+	size_t entry_size = mm_entry_size(level);
 
-	/* It isn't mapped if it doesn't fit in the table. */
-	if (addr >= va_level_end) {
-		return false;
+	/* Cap end so that we don't go over the current level max. */
+	if (end > level_end) {
+		end = level_end;
 	}
 
-	pte = table->entries[mm_index(addr, level)];
+	/* Check that each entry is owned. */
+	while (begin < end) {
+		if (arch_mm_pte_is_table(*pte, level)) {
+			if (!mm_ptable_get_attrs_level(
+				    mm_page_table_from_pa(
+					    arch_mm_table_from_pte(*pte,
+								   level)),
+				    begin, end, level - 1, got_attrs, attrs)) {
+				return false;
+			}
+			got_attrs = true;
+		} else {
+			if (!got_attrs) {
+				*attrs = arch_mm_pte_attrs(*pte, level);
+				got_attrs = true;
+			} else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
+				return false;
+			}
+		}
 
-	if (!arch_mm_pte_is_valid(pte, level)) {
-		return false;
-	}
-
-	if (arch_mm_pte_is_table(pte, level)) {
-		return mm_is_mapped_recursive(
-			mm_page_table_from_pa(
-				arch_mm_table_from_pte(pte, level)),
-			addr, level - 1);
+		begin = mm_start_of_next_block(begin, entry_size);
+		pte++;
 	}
 
 	/* The entry is a valid block. */
-	return true;
+	return got_attrs;
 }
 
 /**
- * Determines if the given address is valid in the address space of the given
- * page table.
+ * Gets the attributes applies to the given range of addresses in the stage-2
+ * table.
+ *
+ * The value returned in `attrs` is only valid if the function returns true.
+ *
+ * Returns true if the whole range has the same attributes and false otherwise.
  */
-static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
-				int mode)
+static bool mm_vm_get_attrs(struct mm_ptable *t, ptable_addr_t begin,
+			    ptable_addr_t end, uint64_t *attrs)
 {
-	struct mm_page_table *tables = mm_page_table_from_pa(t->root);
-	uint8_t level = arch_mm_max_level(mode);
-	size_t index;
+	int mode = 0;
+	uint8_t max_level = arch_mm_max_level(mode);
+	uint8_t root_level = max_level + 1;
+	size_t root_table_size = mm_entry_size(root_level);
+	ptable_addr_t ptable_end =
+		arch_mm_root_table_count(mode) * mm_entry_size(root_level);
+	struct mm_page_table *table;
+	bool got_attrs = false;
 
-	addr = mm_round_down_to_page(addr);
-	index = mm_index(addr, level + 1);
+	begin = mm_round_down_to_page(begin);
+	end = mm_round_up_to_page(end);
 
-	if (index >= arch_mm_root_table_count(mode)) {
+	/* Fail if the addresses are out of range. */
+	if (end > ptable_end) {
 		return false;
 	}
 
-	return mm_is_mapped_recursive(&tables[index], addr, level);
+	table = &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
+	while (begin < end) {
+		if (!mm_ptable_get_attrs_level(table, begin, end, max_level,
+					       got_attrs, attrs)) {
+			return false;
+		}
+
+		got_attrs = true;
+		begin = mm_start_of_next_block(begin, root_table_size);
+		table++;
+	}
+
+	return got_attrs;
 }
 
 /**
@@ -772,12 +815,23 @@
 }
 
 /**
- * Checks whether the given intermediate physical addess is mapped in the given
- * page table of a VM.
+ * Gets the mode of the give range of intermediate physical addresses if they
+ * are mapped with the same mode.
+ *
+ * Returns true if the range is mapped with the same mode and false otherwise.
  */
-bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode)
+bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
+		    int *mode)
 {
-	return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1);
+	uint64_t attrs;
+	bool ret;
+
+	ret = mm_vm_get_attrs(t, ipa_addr(begin), ipa_addr(end), &attrs);
+	if (ret) {
+		*mode = arch_mm_stage2_attrs_to_mode(attrs);
+	}
+
+	return ret;
 }
 
 /**
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 69164f4..c159bba 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -53,6 +53,16 @@
 }
 
 /**
+ * Checks whether the address is mapped in the address space.
+ */
+bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
+{
+	int mode;
+	return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
+	       (mode & MM_MODE_INVALID) == 0;
+}
+
+/**
  * Get an STL representation of the page table.
  */
 std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
@@ -360,7 +370,7 @@
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
 				       &ipa, &ppool));
 	EXPECT_THAT(ipa_addr(ipa), Eq(20));
-	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa, mode));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
 	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
@@ -689,9 +699,9 @@
 	constexpr int mode = 0;
 	struct mm_ptable ptable;
 	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
-	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
-	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344), mode));
-	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073), mode));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
 	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
@@ -705,9 +715,9 @@
 	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
-	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
-	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3), mode));
-	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b), mode));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
 	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
@@ -723,10 +733,10 @@
 	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
 				       nullptr, &ppool));
-	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin), mode));
-	EXPECT_TRUE(mm_vm_is_mapped(
-		&ptable, ipa_from_pa(pa_add(page_begin, 127)), mode));
-	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end), mode));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
+	EXPECT_TRUE(
+		mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
 	mm_ptable_fini(&ptable, mode, &ppool);
 }
 
@@ -740,12 +750,95 @@
 	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       nullptr, &ppool));
-	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END), mode));
-	EXPECT_FALSE(
-		mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123), mode));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
 	EXPECT_FALSE(mm_vm_is_mapped(
-		&ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max()),
-		mode));
+		&ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
+	mm_ptable_fini(&ptable, mode, &ppool);
+}
+
+/**
+ * The mode of unmapped addresses can be retrieved and is set to invalid,
+ * unowned and shared.
+ */
+TEST_F(mm, get_mode_empty)
+{
+	constexpr int mode = 0;
+	constexpr int default_mode =
+		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
+	struct mm_ptable ptable;
+	int read_mode;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+
+	read_mode = 0;
+	EXPECT_TRUE(
+		mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
+	EXPECT_THAT(read_mode, Eq(default_mode));
+
+	read_mode = 0;
+	EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
+				   ipa_init(0x3c97'e000), &read_mode));
+	EXPECT_THAT(read_mode, Eq(default_mode));
+
+	read_mode = 0;
+	EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
+				   ipa_init(0x1ff'ffff'ffff), &read_mode));
+	EXPECT_THAT(read_mode, Eq(default_mode));
+
+	mm_ptable_fini(&ptable, mode, &ppool);
+}
+
+/**
+ * Get the mode of a range comprised of individual pages which are either side
+ * of a root table boundary.
+ */
+TEST_F(mm, get_mode_pages_across_tables)
+{
+	constexpr int mode = MM_MODE_INVALID | MM_MODE_SHARED;
+	const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
+	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
+	struct mm_ptable ptable;
+	int read_mode;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
+				       nullptr, &ppool));
+
+	read_mode = 0;
+	EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
+				   ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
+				   &read_mode));
+	EXPECT_THAT(read_mode, Eq(mode));
+
+	EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
+				    ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
+				    &read_mode));
+
+	read_mode = 0;
+	EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
+				   ipa_from_pa(map_end), &read_mode));
+	EXPECT_THAT(read_mode, Eq(mode));
+	mm_ptable_fini(&ptable, mode, &ppool);
+}
+
+/**
+ * Anything out of range fail to retrieve the mode.
+ */
+TEST_F(mm, get_mode_out_of_range)
+{
+	constexpr int mode = MM_MODE_UNOWNED;
+	struct mm_ptable ptable;
+	int read_mode;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr, &ppool));
+	EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
+				    ipa_from_pa(pa_add(VM_MEM_END, 1)),
+				    &read_mode));
+	EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
+				    ipa_from_pa(pa_add(VM_MEM_END, 1)),
+				    &read_mode));
+	EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
+				    ipa_init(2'0000'0000'0000), &read_mode));
 	mm_ptable_fini(&ptable, mode, &ppool);
 }