Model attributes in the fake architecture.
To enable unit tests for attributes, the fake architecture used in those
tests needs to model the attributes.
Change-Id: I5065f3e6d98bce4be9d446fc6fec59559a750d06
diff --git a/src/arch/aarch64/inc/hf/arch/mm.h b/src/arch/aarch64/inc/hf/arch/mm.h
index 8423c97..653538e 100644
--- a/src/arch/aarch64/inc/hf/arch/mm.h
+++ b/src/arch/aarch64/inc/hf/arch/mm.h
@@ -40,39 +40,39 @@
* also check that the specific implementation defines everything it needs to
* too.
*/
-pte_t arch_mm_absent_pte(int level);
-pte_t arch_mm_table_pte(int level, paddr_t pa);
-pte_t arch_mm_block_pte(int level, paddr_t pa, uint64_t attrs);
-bool arch_mm_is_block_allowed(int level);
+pte_t arch_mm_absent_pte(uint8_t level);
+pte_t arch_mm_table_pte(uint8_t level, paddr_t pa);
+pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs);
+bool arch_mm_is_block_allowed(uint8_t level);
/**
* Determines if a PTE is present i.e. it contains information and therefore
* needs to exist in the page table. Any non-absent PTE is present.
*/
-bool arch_mm_pte_is_present(pte_t pte, int level);
+bool arch_mm_pte_is_present(pte_t pte, uint8_t level);
/**
* Determines if a PTE is valid i.e. it can affect the address space. Tables and
* valid blocks fall into this category. Invalid blocks do not as they hold
* information about blocks that are not in the address space.
*/
-bool arch_mm_pte_is_valid(pte_t pte, int level);
+bool arch_mm_pte_is_valid(pte_t pte, uint8_t level);
/**
* Determines if a PTE is a block and represents an address range, valid or
* invalid.
*/
-bool arch_mm_pte_is_block(pte_t pte, int level);
+bool arch_mm_pte_is_block(pte_t pte, uint8_t level);
/**
* Determines if a PTE represents a reference to a table of PTEs.
*/
-bool arch_mm_pte_is_table(pte_t pte, int level);
+bool arch_mm_pte_is_table(pte_t pte, uint8_t level);
paddr_t arch_mm_clear_pa(paddr_t pa);
-paddr_t arch_mm_block_from_pte(pte_t pte);
-paddr_t arch_mm_table_from_pte(pte_t pte);
-uint64_t arch_mm_pte_attrs(pte_t pte);
+paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level);
+paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level);
+uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level);
uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
uint64_t block_attrs);
void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end);
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index a3809e0..c34f404 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -107,7 +107,7 @@
/**
* Returns the encoding of a page table entry that isn't present.
*/
-pte_t arch_mm_absent_pte(int level)
+pte_t arch_mm_absent_pte(uint8_t level)
{
(void)level;
return 0;
@@ -119,7 +119,7 @@
* The spec says that 'Table descriptors for stage 2 translations do not
* include any attribute field', so we don't take any attributes as arguments.
*/
-pte_t arch_mm_table_pte(int level, paddr_t pa)
+pte_t arch_mm_table_pte(uint8_t level, paddr_t pa)
{
/* This is the same for all levels on aarch64. */
(void)level;
@@ -131,7 +131,7 @@
*
* The level must allow block entries.
*/
-pte_t arch_mm_block_pte(int level, paddr_t pa, uint64_t attrs)
+pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs)
{
pte_t pte = pa_addr(pa) | attrs;
if (level == 0) {
@@ -146,7 +146,7 @@
*
* Level 0 must allow block entries.
*/
-bool arch_mm_is_block_allowed(int level)
+bool arch_mm_is_block_allowed(uint8_t level)
{
return level <= 2;
}
@@ -155,7 +155,7 @@
* Determines if the given pte is present, i.e., if it is valid or it is invalid
* but still holds state about the memory so needs to be present in the table.
*/
-bool arch_mm_pte_is_present(pte_t pte, int level)
+bool arch_mm_pte_is_present(pte_t pte, uint8_t level)
{
return arch_mm_pte_is_valid(pte, level) || (pte & STAGE2_SW_OWNED) != 0;
}
@@ -164,7 +164,7 @@
* Determines if the given pte is valid, i.e., if it points to another table,
* to a page, or a block of pages that can be accessed.
*/
-bool arch_mm_pte_is_valid(pte_t pte, int level)
+bool arch_mm_pte_is_valid(pte_t pte, uint8_t level)
{
(void)level;
return (pte & PTE_VALID) != 0;
@@ -173,7 +173,7 @@
/**
* Determines if the given pte references a block of pages.
*/
-bool arch_mm_pte_is_block(pte_t pte, int level)
+bool arch_mm_pte_is_block(pte_t pte, uint8_t level)
{
/* We count pages at level 0 as blocks. */
return arch_mm_is_block_allowed(level) &&
@@ -185,7 +185,7 @@
/**
* Determines if the given pte references another table.
*/
-bool arch_mm_pte_is_table(pte_t pte, int level)
+bool arch_mm_pte_is_table(pte_t pte, uint8_t level)
{
return level != 0 && arch_mm_pte_is_valid(pte, level) &&
(pte & PTE_TABLE) != 0;
@@ -209,8 +209,9 @@
* Extracts the physical address of the block referred to by the given page
* table entry.
*/
-paddr_t arch_mm_block_from_pte(pte_t pte)
+paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level)
{
+ (void)level;
return pa_init(pte_addr(pte));
}
@@ -218,8 +219,9 @@
* Extracts the physical address of the page table referred to by the given page
* table entry.
*/
-paddr_t arch_mm_table_from_pte(pte_t pte)
+paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level)
{
+ (void)level;
return pa_init(pte_addr(pte));
}
@@ -227,8 +229,9 @@
* Extracts the architecture specific attributes applies to the given page table
* entry.
*/
-uint64_t arch_mm_pte_attrs(pte_t pte)
+uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level)
{
+ (void)level;
return pte & PTE_ATTR_MASK;
}
diff --git a/src/arch/fake/inc/hf/arch/mm.h b/src/arch/fake/inc/hf/arch/mm.h
index face8cf..13064b5 100644
--- a/src/arch/fake/inc/hf/arch/mm.h
+++ b/src/arch/fake/inc/hf/arch/mm.h
@@ -30,18 +30,18 @@
* also check that the specific implementation defines everything it needs to
* too.
*/
-pte_t arch_mm_absent_pte(int level);
-pte_t arch_mm_table_pte(int level, paddr_t pa);
-pte_t arch_mm_block_pte(int level, paddr_t pa, uint64_t attrs);
-bool arch_mm_is_block_allowed(int level);
-bool arch_mm_pte_is_present(pte_t pte, int level);
-bool arch_mm_pte_is_valid(pte_t pte, int level);
-bool arch_mm_pte_is_table(pte_t pte, int level);
-bool arch_mm_pte_is_block(pte_t pte, int level);
+pte_t arch_mm_absent_pte(uint8_t level);
+pte_t arch_mm_table_pte(uint8_t level, paddr_t pa);
+pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs);
+bool arch_mm_is_block_allowed(uint8_t level);
+bool arch_mm_pte_is_present(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_valid(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_table(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_block(pte_t pte, uint8_t level);
paddr_t arch_mm_clear_pa(paddr_t pa);
-paddr_t arch_mm_block_from_pte(pte_t pte);
-paddr_t arch_mm_table_from_pte(pte_t pte);
-uint64_t arch_mm_pte_attrs(pte_t pte);
+paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level);
+paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level);
+uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level);
uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
uint64_t block_attrs);
void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end);
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index 721b230..61e0414 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -19,103 +19,92 @@
#include "hf/mm.h"
/*
- * Our fake architecture has page tables base on those of aarch64:
- *
- * - The highest level table is always 2, lowest level is 0.
- * - Blocks are allowed at all levels.
- *
- * There are four kinds of entry:
- *
- * 1. Absent: 0
- * 2. Page, at level 0: <page-aligned address> | <attrs> | 0x3
- * 3. Block, at level 2 or 1: <block-aligned address> | <attrs> | 0x1
- * 4. Subtable, at level 2 or 1: <subtable address> | 0x3
- *
- * <attrs> are always 0 for now.
+ * The fake architecture uses the mode flags to represent the attributes applied
+ * to memory. The flags are shifted to avoid equality of modes and attributes.
*/
+#define PTE_ATTR_MODE_SHIFT 48
+#define PTE_ATTR_MODE_MASK \
+ ((uint64_t)(MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_D | \
+ MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED | \
+ MM_MODE_STAGE1) \
+ << PTE_ATTR_MODE_SHIFT)
-pte_t arch_mm_absent_pte(int level)
+/* The bit to distinguish a table from a block is the highest of the page bits.
+ */
+#define PTE_TABLE (UINT64_C(1) << (PAGE_BITS - 1))
+
+/* Mask for the address part of an entry. */
+#define PTE_ADDR_MASK (~(PTE_ATTR_MODE_MASK | (UINT64_C(1) << PAGE_BITS) - 1))
+
+/* Offset the bits of each level so they can't be misued. */
+#define PTE_LEVEL_SHIFT(lvl) ((lvl)*2)
+
+pte_t arch_mm_absent_pte(uint8_t level)
{
- (void)level;
- return 0;
+ return ((uint64_t)(MM_MODE_INVALID | MM_MODE_UNOWNED)
+ << PTE_ATTR_MODE_SHIFT) >>
+ PTE_LEVEL_SHIFT(level);
}
-pte_t arch_mm_table_pte(int level, paddr_t pa)
+pte_t arch_mm_table_pte(uint8_t level, paddr_t pa)
{
- /* This is the same for all levels. */
- (void)level;
- return pa_addr(pa) | 0x3;
+ return (pa_addr(pa) | PTE_TABLE) >> PTE_LEVEL_SHIFT(level);
}
-pte_t arch_mm_block_pte(int level, paddr_t pa, uint64_t attrs)
+pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs)
{
- /* Single pages are encoded differently to larger blocks. */
- pte_t pte = pa_addr(pa) | attrs;
- if (level == 0) {
- pte |= 0x2;
- }
- return pte;
+ return (pa_addr(pa) | attrs) >> PTE_LEVEL_SHIFT(level);
}
-bool arch_mm_is_block_allowed(int level)
+bool arch_mm_is_block_allowed(uint8_t level)
{
- /* All levels can have blocks. */
(void)level;
return true;
}
-bool arch_mm_pte_is_present(pte_t pte, int level)
+bool arch_mm_pte_is_present(pte_t pte, uint8_t level)
{
- /* TODO: model attributes. */
- return arch_mm_pte_is_valid(pte, level);
+ return arch_mm_pte_is_valid(pte, level) ||
+ !(((pte << PTE_LEVEL_SHIFT(level)) >> PTE_ATTR_MODE_SHIFT) &
+ MM_MODE_UNOWNED);
}
-bool arch_mm_pte_is_valid(pte_t pte, int level)
+bool arch_mm_pte_is_valid(pte_t pte, uint8_t level)
{
- (void)level;
- return (pte & 0x1) != 0;
+ return !(((pte << PTE_LEVEL_SHIFT(level)) >> PTE_ATTR_MODE_SHIFT) &
+ MM_MODE_INVALID);
}
-bool arch_mm_pte_is_table(pte_t pte, int level)
+bool arch_mm_pte_is_block(pte_t pte, uint8_t level)
{
- /* Level 0 only contains pages so cannot be a table. */
- return level != 0 && (pte & 0x3) == 0x3;
+ return arch_mm_pte_is_present(pte, level) &&
+ !arch_mm_pte_is_table(pte, level);
}
-bool arch_mm_pte_is_block(pte_t pte, int level)
+bool arch_mm_pte_is_table(pte_t pte, uint8_t level)
{
- /* Single pages are encoded differently to larger blocks. */
- return (level == 0 ? (pte & 0x2) != 0
- : arch_mm_pte_is_present(pte, level) &&
- !arch_mm_pte_is_table(pte, level));
-}
-
-static uint64_t hf_arch_fake_mm_clear_pte_attrs(pte_t pte)
-{
- return pte & ~0x3;
+ return (pte << PTE_LEVEL_SHIFT(level)) & PTE_TABLE;
}
paddr_t arch_mm_clear_pa(paddr_t pa)
{
- /* This is assumed to round down to the page boundary. */
- return pa_init(hf_arch_fake_mm_clear_pte_attrs(pa_addr(pa)) &
- ~((1 << PAGE_BITS) - 1));
+ return pa_init(pa_addr(pa) & PTE_ADDR_MASK);
}
-paddr_t arch_mm_block_from_pte(pte_t pte)
+paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level)
{
- return pa_init(hf_arch_fake_mm_clear_pte_attrs(pte));
+ return pa_init((pte << PTE_LEVEL_SHIFT(level)) & PTE_ADDR_MASK);
}
-paddr_t arch_mm_table_from_pte(pte_t pte)
+paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level)
{
- return pa_init(hf_arch_fake_mm_clear_pte_attrs(pte));
+ return pa_init((pte << PTE_LEVEL_SHIFT(level)) & PTE_ADDR_MASK);
}
-uint64_t arch_mm_pte_attrs(pte_t pte)
+uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level)
{
- /* Attributes are not modelled fully. */
- return pte & 0x1;
+ return ((pte << PTE_LEVEL_SHIFT(level)) & PTE_ATTR_MODE_MASK) >>
+ PTE_ATTR_MODE_SHIFT;
}
uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
@@ -149,8 +138,7 @@
uint64_t arch_mm_mode_to_attrs(int mode)
{
- /* Attributes are not modelled fully. */
- return mode & MM_MODE_INVALID ? 0 : 0x1;
+ return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
}
bool arch_mm_init(paddr_t table, bool first)
diff --git a/src/mm.c b/src/mm.c
index 18a2e78..dcf94a5 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -151,7 +151,7 @@
}
/* Recursively free any subtables. */
- table = mm_page_table_from_pa(arch_mm_table_from_pte(pte));
+ table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
mm_free_page_pte(table->entries[i], level - 1);
}
@@ -209,7 +209,7 @@
/* Just return pointer to table if it's already populated. */
if (arch_mm_pte_is_table(v, level)) {
- return mm_page_table_from_pa(arch_mm_table_from_pte(v));
+ return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
}
/* Allocate a new table. */
@@ -223,8 +223,8 @@
if (arch_mm_pte_is_block(v, level)) {
inc = mm_entry_size(level_below);
new_pte = arch_mm_block_pte(level_below,
- arch_mm_block_from_pte(v),
- arch_mm_pte_attrs(v));
+ arch_mm_block_from_pte(v, level),
+ arch_mm_pte_attrs(v, level));
} else {
inc = 0;
new_pte = arch_mm_absent_pte(level_below);
@@ -291,7 +291,7 @@
while (begin < end) {
if (unmap ? !arch_mm_pte_is_present(*pte, level)
: arch_mm_pte_is_block(*pte, level) &&
- arch_mm_pte_attrs(*pte) == attrs) {
+ arch_mm_pte_attrs(*pte, level) == attrs) {
/*
* If the entry is already mapped with the right
* attributes, or already absent in the case of
@@ -474,7 +474,7 @@
if (arch_mm_pte_is_table(table->entries[i], level)) {
mm_dump_table_recursive(
mm_page_table_from_pa(arch_mm_table_from_pte(
- table->entries[i])),
+ table->entries[i], level)),
level - 1, max_level);
}
}
@@ -502,7 +502,7 @@
static pte_t mm_table_pte_to_absent(pte_t entry, uint8_t level)
{
struct mm_page_table *table =
- mm_page_table_from_pa(arch_mm_table_from_pte(entry));
+ mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
/*
* Free the subtable. This is safe to do directly (rather than
@@ -533,16 +533,16 @@
return entry;
}
- table = mm_page_table_from_pa(arch_mm_table_from_pte(entry));
+ table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
/*
* Replace subtable with a single block, with equivalent
* attributes.
*/
- block_attrs = arch_mm_pte_attrs(table->entries[0]);
- table_attrs = arch_mm_pte_attrs(entry);
+ block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
+ table_attrs = arch_mm_pte_attrs(entry, level);
combined_attrs =
arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
- block_address = arch_mm_block_from_pte(table->entries[0]);
+ block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
/* Free the subtable. */
hfree(table);
/*
@@ -570,13 +570,13 @@
return entry;
}
- table = mm_page_table_from_pa(arch_mm_table_from_pte(entry));
+ table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
/*
* Check if all entries are blocks with the same flags or are all
* absent.
*/
- attrs = arch_mm_pte_attrs(table->entries[0]);
+ attrs = arch_mm_pte_attrs(table->entries[0], level);
for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
/*
* First try to defrag the entry, in case it is a subtable.
@@ -593,7 +593,7 @@
* what we have so far.
*/
if (!arch_mm_pte_is_block(table->entries[i], level - 1) ||
- arch_mm_pte_attrs(table->entries[i]) != attrs) {
+ arch_mm_pte_attrs(table->entries[i], level) != attrs) {
identical_blocks_so_far = false;
}
}
@@ -653,7 +653,8 @@
if (arch_mm_pte_is_table(pte, level)) {
return mm_is_mapped_recursive(
- mm_page_table_from_pa(arch_mm_table_from_pte(pte)),
+ mm_page_table_from_pa(
+ arch_mm_table_from_pte(pte, level)),
addr, level - 1);
}
diff --git a/src/mm_test.cc b/src/mm_test.cc
index cd1e663..2a952dc 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -42,7 +42,6 @@
constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
const int TOP_LEVEL = arch_mm_max_level(0);
-const pte_t ABSENT_ENTRY = arch_mm_absent_pte(TOP_LEVEL);
const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
/**
@@ -102,8 +101,9 @@
constexpr int mode = MM_MODE_STAGE1;
struct mm_ptable ptable;
ASSERT_TRUE(mm_ptable_init(&ptable, mode));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(1), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(1), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -115,8 +115,9 @@
constexpr int mode = 0;
struct mm_ptable ptable;
ASSERT_TRUE(mm_ptable_init(&ptable, mode));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -138,20 +139,25 @@
ASSERT_THAT(TOP_LEVEL, Eq(2));
/* Check that the first page is mapped and nothing else. */
- EXPECT_THAT(std::span(tables).last(3), Each(Each(ABSENT_ENTRY)));
+ EXPECT_THAT(std::span(tables).last(3),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
auto table_l2 = tables.front();
- EXPECT_THAT(table_l2.subspan(1), Each(ABSENT_ENTRY));
+ EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
- auto table_l1 = get_table(arch_mm_table_from_pte(table_l2[0]));
- EXPECT_THAT(table_l1.subspan(1), Each(ABSENT_ENTRY));
+ auto table_l1 =
+ get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
+ EXPECT_THAT(table_l1.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
- auto table_l0 = get_table(arch_mm_table_from_pte(table_l1[0]));
- EXPECT_THAT(table_l0.subspan(1), Each(ABSENT_ENTRY));
+ auto table_l0 =
+ get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table_l0.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
- EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0])),
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Eq(pa_addr(page_begin)));
mm_ptable_fini(&ptable, mode);
@@ -178,20 +184,27 @@
ASSERT_THAT(TOP_LEVEL, Eq(2));
/* Check that the last page is mapped, and nothing else. */
- EXPECT_THAT(std::span(tables).first(3), Each(Each(ABSENT_ENTRY)));
+ EXPECT_THAT(std::span(tables).first(3),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
auto table_l2 = tables.back();
- EXPECT_THAT(table_l2.first(table_l2.size() - 1), Each(ABSENT_ENTRY));
+ EXPECT_THAT(table_l2.first(table_l2.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL)));
ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
- auto table_l1 = get_table(arch_mm_table_from_pte(table_l2.last(1)[0]));
- EXPECT_THAT(table_l1.first(table_l1.size() - 1), Each(ABSENT_ENTRY));
+ auto table_l1 = get_table(
+ arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
+ EXPECT_THAT(table_l1.first(table_l1.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
- auto table_l0 = get_table(arch_mm_table_from_pte(table_l1.last(1)[0]));
- EXPECT_THAT(table_l0.first(table_l0.size() - 1), Each(ABSENT_ENTRY));
+ auto table_l0 = get_table(
+ arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table_l0.first(table_l0.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
- EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0])),
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
+ TOP_LEVEL - 2)),
Eq(0x200'0000'0000 - PAGE_SIZE));
mm_ptable_fini(&ptable, mode);
@@ -212,40 +225,50 @@
auto tables = get_ptable(ptable, mode);
EXPECT_THAT(tables, SizeIs(4));
- EXPECT_THAT(std::span(tables).last(2), Each(Each(ABSENT_ENTRY)));
+ EXPECT_THAT(std::span(tables).last(2),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
ASSERT_THAT(TOP_LEVEL, Eq(2));
/* Check only the last page of the first table is mapped. */
auto table0_l2 = tables.front();
- EXPECT_THAT(table0_l2.first(table0_l2.size() - 1), Each(ABSENT_ENTRY));
+ EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL)));
ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
- auto table0_l1 =
- get_table(arch_mm_table_from_pte(table0_l2.last(1)[0]));
- EXPECT_THAT(table0_l1.first(table0_l1.size() - 1), Each(ABSENT_ENTRY));
+ auto table0_l1 = get_table(
+ arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
+ EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
- auto table0_l0 =
- get_table(arch_mm_table_from_pte(table0_l1.last(1)[0]));
- EXPECT_THAT(table0_l0.first(table0_l0.size() - 1), Each(ABSENT_ENTRY));
+ auto table0_l0 = get_table(
+ arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
- EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0])),
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
+ TOP_LEVEL - 2)),
Eq(pa_addr(map_begin)));
/* Checl only the first page of the second table is mapped. */
auto table1_l2 = tables[1];
- EXPECT_THAT(table1_l2.subspan(1), Each(ABSENT_ENTRY));
+ EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
- auto table1_l1 = get_table(arch_mm_table_from_pte(table1_l2[0]));
- EXPECT_THAT(table1_l1.subspan(1), Each(ABSENT_ENTRY));
+ auto table1_l1 =
+ get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
+ EXPECT_THAT(table1_l1.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
- auto table1_l0 = get_table(arch_mm_table_from_pte(table1_l1[0]));
- EXPECT_THAT(table1_l0.subspan(1), Each(ABSENT_ENTRY));
+ auto table1_l0 =
+ get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table1_l0.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
- EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table1_l0[0])),
- Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
+ EXPECT_THAT(
+ pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
+ Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
mm_ptable_fini(&ptable, mode);
}
@@ -267,10 +290,10 @@
_1, TOP_LEVEL))))));
for (uint64_t i = 0; i < tables.size(); ++i) {
for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
- EXPECT_THAT(
- pa_addr(arch_mm_block_from_pte(tables[i][j])),
- Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
- (j * mm_entry_size(TOP_LEVEL))))
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
+ TOP_LEVEL)),
+ Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
+ (j * mm_entry_size(TOP_LEVEL))))
<< "i=" << i << " j=" << j;
}
}
@@ -312,8 +335,9 @@
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
pa_init(0x5000), mode, &ipa));
EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -354,8 +378,9 @@
&ptable, pa_init(0),
pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa));
EXPECT_THAT(ipa_addr(ipa), Eq(0));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -391,8 +416,9 @@
ASSERT_TRUE(mm_vm_identity_map(
&ptable, VM_MEM_END, pa_init(0xf0'0000'0000'0000), mode, &ipa));
EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, 0);
}
@@ -454,8 +480,9 @@
struct mm_ptable ptable;
ASSERT_TRUE(mm_ptable_init(&ptable, mode));
EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -469,8 +496,9 @@
ASSERT_TRUE(mm_ptable_init(&ptable, mode));
EXPECT_TRUE(
mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -491,8 +519,9 @@
ASSERT_TRUE(
mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr));
EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -510,8 +539,9 @@
mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
pa_add(map_begin, 99), mode));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -528,8 +558,9 @@
ASSERT_TRUE(
mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -590,8 +621,9 @@
nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
pa_add(page_begin, 50), mode));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -637,8 +669,9 @@
mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode));
ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode));
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, MM_MODE_STAGE1);
}
@@ -719,8 +752,9 @@
struct mm_ptable ptable;
ASSERT_TRUE(mm_ptable_init(&ptable, mode));
mm_ptable_defrag(&ptable, mode);
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
@@ -744,8 +778,9 @@
ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode));
ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode));
mm_ptable_defrag(&ptable, 0);
- EXPECT_THAT(get_ptable(ptable, mode),
- AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+ EXPECT_THAT(
+ get_ptable(ptable, mode),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
mm_ptable_fini(&ptable, mode);
}
diff --git a/test/arch/mm_test.c b/test/arch/mm_test.c
index 9bc947f..60f927b 100644
--- a/test/arch/mm_test.c
+++ b/test/arch/mm_test.c
@@ -160,39 +160,39 @@
* The address and attributes of a block must be preserved when encoding and
* decoding.
*/
-#define LEVEL_TEST(lvl) \
- TEST(arch_mm, block_addr_and_attrs_preserved_level##lvl) \
- { \
- uint8_t level = lvl; \
- paddr_t addr; \
- uint64_t attrs; \
- pte_t block_pte; \
- \
- /* Test doesn't apply if a block is not allowed. */ \
- if (!arch_mm_is_block_allowed(level)) { \
- return; \
- } \
- \
- addr = pa_init(0); \
- attrs = arch_mm_mode_to_attrs(0); \
- block_pte = arch_mm_block_pte(level, addr, attrs); \
- EXPECT_EQ(arch_mm_pte_attrs(block_pte), attrs); \
- EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte)), \
- pa_addr(addr)); \
- \
- addr = pa_init(PAGE_SIZE * 17); \
- attrs = arch_mm_mode_to_attrs(MM_MODE_INVALID); \
- block_pte = arch_mm_block_pte(level, addr, attrs); \
- EXPECT_EQ(arch_mm_pte_attrs(block_pte), attrs); \
- EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte)), \
- pa_addr(addr)); \
- \
- addr = pa_init(PAGE_SIZE * 500); \
- attrs = arch_mm_mode_to_attrs(MM_MODE_R | MM_MODE_W); \
- block_pte = arch_mm_block_pte(level, addr, attrs); \
- EXPECT_EQ(arch_mm_pte_attrs(block_pte), attrs); \
- EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte)), \
- pa_addr(addr)); \
+#define LEVEL_TEST(lvl) \
+ TEST(arch_mm, block_addr_and_attrs_preserved_level##lvl) \
+ { \
+ uint8_t level = lvl; \
+ paddr_t addr; \
+ uint64_t attrs; \
+ pte_t block_pte; \
+ \
+ /* Test doesn't apply if a block is not allowed. */ \
+ if (!arch_mm_is_block_allowed(level)) { \
+ return; \
+ } \
+ \
+ addr = pa_init(0); \
+ attrs = arch_mm_mode_to_attrs(0); \
+ block_pte = arch_mm_block_pte(level, addr, attrs); \
+ EXPECT_EQ(arch_mm_pte_attrs(block_pte, level), attrs); \
+ EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte, level)), \
+ pa_addr(addr)); \
+ \
+ addr = pa_init(PAGE_SIZE * 17); \
+ attrs = arch_mm_mode_to_attrs(MM_MODE_INVALID); \
+ block_pte = arch_mm_block_pte(level, addr, attrs); \
+ EXPECT_EQ(arch_mm_pte_attrs(block_pte, level), attrs); \
+ EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte, level)), \
+ pa_addr(addr)); \
+ \
+ addr = pa_init(PAGE_SIZE * 500); \
+ attrs = arch_mm_mode_to_attrs(MM_MODE_R | MM_MODE_W); \
+ block_pte = arch_mm_block_pte(level, addr, attrs); \
+ EXPECT_EQ(arch_mm_pte_attrs(block_pte, level), attrs); \
+ EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte, level)), \
+ pa_addr(addr)); \
}
EXPAND_LEVEL_TESTS
#undef LEVEL_TEST