Move aarch64 mm implementation out of header.

The linker can optimize this just as well and we avoid the difficulties
of polluting the namespace and leaking implementation details.

Change-Id: I63690b76a0dbf0565a6ddc1bfe155c1fe2d2ca9e
diff --git a/src/arch/aarch64/inc/hf/arch/mm.h b/src/arch/aarch64/inc/hf/arch/mm.h
index 6872f64..dd9c829 100644
--- a/src/arch/aarch64/inc/hf/arch/mm.h
+++ b/src/arch/aarch64/inc/hf/arch/mm.h
@@ -27,192 +27,27 @@
 #define PAGE_LEVEL_BITS 9
 
 /*
- * This mask actually includes everything other than the address bits: not just
- * the attributes but also some ignored bits, reserved bits, and the entry type
- * bits which distinguish between absent, table, block or page entries.
+ * TODO: move the arch_mm_* declarations to a shared header. That header can
+ * also check that the specific implementation defines everything it needs to
+ * too.
  */
-#define ARCH_AARCH64_MM_PTE_ATTR_MASK \
-	(((UINT64_C(1) << PAGE_BITS) - 1) | ~((UINT64_C(1) << 48) - 1))
-
-/**
- * Returns the encoding of a page table entry that isn't present.
- */
-static inline pte_t arch_mm_absent_pte(int level)
-{
-	return 0;
-}
-
-/**
- * Converts a physical address to a table PTE.
- *
- * The spec says that 'Table descriptors for stage 2 translations do not
- * include any attribute field', so we don't take any attributes as arguments.
- */
-static inline pte_t arch_mm_table_pte(int level, paddr_t pa)
-{
-	/* This is the same for all levels on aarch64. */
-	(void)level;
-	return pa_addr(pa) | 0x3;
-}
-
-/**
- * Converts a physical address to a block PTE.
- *
- * The level must allow block entries.
- */
-static inline pte_t arch_mm_block_pte(int level, paddr_t pa, uint64_t attrs)
-{
-	pte_t pte = pa_addr(pa) | attrs | 0x1;
-	if (level == 0) {
-		/* A level 0 'block' is actually a page entry. */
-		pte |= 0x2;
-	}
-	return pte;
-}
-
-/**
- * Specifies whether block mappings are acceptable at the given level.
- *
- * Level 0 must allow block entries.
- */
-static inline bool arch_mm_is_block_allowed(int level)
-{
-	return level <= 2;
-}
-
-/**
- * Determines if the given pte is present, i.e., if it points to another table,
- * to a page, or a block of pages.
- */
-static inline bool arch_mm_pte_is_present(pte_t pte, int level)
-{
-	return (pte & 0x1) != 0;
-}
-
-/**
- * Determines if the given pte references another table.
- */
-static inline bool arch_mm_pte_is_table(pte_t pte, int level)
-{
-	return level != 0 && (pte & 0x3) == 0x3;
-}
-
-/**
- * Determines if the given pte references a block of pages.
- */
-static inline bool arch_mm_pte_is_block(pte_t pte, int level)
-{
-	/* We count pages at level 0 as blocks. */
-	return arch_mm_is_block_allowed(level) &&
-	       (pte & 0x3) == (level == 0 ? 0x3 : 0x1);
-}
-
-static inline uint64_t arch_aarch64_mm_clear_pte_attrs(pte_t pte)
-{
-	return pte & ~ARCH_AARCH64_MM_PTE_ATTR_MASK;
-}
-
-/**
- * Clears the given physical address, i.e., sets the ignored bits (from a page
- * table perspective) to zero.
- */
-static inline paddr_t arch_mm_clear_pa(paddr_t pa)
-{
-	return pa_init(arch_aarch64_mm_clear_pte_attrs(pa_addr(pa)));
-}
-
-/**
- * Extracts the physical address of the block referred to by the given page
- * table entry.
- */
-static inline paddr_t arch_mm_block_from_pte(pte_t pte)
-{
-	return pa_init(arch_aarch64_mm_clear_pte_attrs(pte));
-}
-
-/**
- * Extracts the physical address of the page table referred to by the given page
- * table entry.
- */
-static inline paddr_t arch_mm_table_from_pte(pte_t pte)
-{
-	return pa_init(arch_aarch64_mm_clear_pte_attrs(pte));
-}
-
-/**
- * Extracts the architecture specific attributes applies to the given page table
- * entry.
- */
-static inline uint64_t arch_mm_pte_attrs(pte_t pte)
-{
-	return pte & ARCH_AARCH64_MM_PTE_ATTR_MASK;
-}
-
-/**
- * Invalidates stage-1 TLB entries referring to the given virtual address range.
- */
-static inline void arch_mm_invalidate_stage1_range(vaddr_t va_begin,
-						   vaddr_t va_end)
-{
-	uintvaddr_t begin = va_addr(va_begin);
-	uintvaddr_t end = va_addr(va_end);
-	uintvaddr_t it;
-
-	begin >>= 12;
-	end >>= 12;
-
-	__asm__ volatile("dsb ishst");
-
-	for (it = begin; it < end; it += (UINT64_C(1) << (PAGE_BITS - 12))) {
-		__asm__("tlbi vae2is, %0" : : "r"(it));
-	}
-
-	__asm__ volatile("dsb ish");
-}
-
-/**
- * Invalidates stage-2 TLB entries referring to the given intermediate physical
- * address range.
- */
-static inline void arch_mm_invalidate_stage2_range(ipaddr_t va_begin,
-						   ipaddr_t va_end)
-{
-	uintpaddr_t begin = ipa_addr(va_begin);
-	uintpaddr_t end = ipa_addr(va_end);
-	uintpaddr_t it;
-
-	/* TODO: This only applies to the current VMID. */
-
-	begin >>= 12;
-	end >>= 12;
-
-	__asm__ volatile("dsb ishst");
-
-	for (it = begin; it < end; it += (UINT64_C(1) << (PAGE_BITS - 12))) {
-		__asm__("tlbi ipas2e1, %0" : : "r"(it));
-	}
-
-	__asm__ volatile(
-		"dsb ish\n"
-		"tlbi vmalle1is\n"
-		"dsb ish\n");
-}
-
-/**
- * Ensures that the range of data in the cache is written back so that it is
- * visible to all cores in the system.
- */
-void arch_mm_write_back_dcache(void *base, size_t size);
-
-uint64_t arch_mm_mode_to_attrs(int mode);
-bool arch_mm_init(paddr_t table, bool first);
-uint8_t arch_mm_max_level(int mode);
-uint8_t arch_mm_root_table_count(int mode);
-
-/**
- * Given the attrs from a table at some level and the attrs from all the blocks
- * in that table, return equivalent attrs to use for a block which will replace
- * the entire table.
- */
+pte_t arch_mm_absent_pte(int level);
+pte_t arch_mm_table_pte(int level, paddr_t pa);
+pte_t arch_mm_block_pte(int level, paddr_t pa, uint64_t attrs);
+bool arch_mm_is_block_allowed(int level);
+bool arch_mm_pte_is_present(pte_t pte, int level);
+bool arch_mm_pte_is_table(pte_t pte, int level);
+bool arch_mm_pte_is_block(pte_t pte, int level);
+paddr_t arch_mm_clear_pa(paddr_t pa);
+paddr_t arch_mm_block_from_pte(pte_t pte);
+paddr_t arch_mm_table_from_pte(pte_t pte);
+uint64_t arch_mm_pte_attrs(pte_t pte);
 uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
 					   uint64_t block_attrs);
+void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end);
+void arch_mm_invalidate_stage2_range(ipaddr_t va_begin, ipaddr_t va_end);
+void arch_mm_write_back_dcache(void *base, size_t size);
+uint8_t arch_mm_max_level(int mode);
+uint8_t arch_mm_root_table_count(int mode);
+uint64_t arch_mm_mode_to_attrs(int mode);
+bool arch_mm_init(paddr_t table, bool first);
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 366fc7b..568bfb3 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -86,9 +86,185 @@
 
 /* clang-format on */
 
+/*
+ * This mask actually includes everything other than the address bits: not just
+ * the attributes but also some ignored bits, reserved bits, and the entry type
+ * bits which distinguish between absent, table, block or page entries.
+ */
+#define PTE_ATTR_MASK \
+	(((UINT64_C(1) << PAGE_BITS) - 1) | ~((UINT64_C(1) << 48) - 1))
+
 static uint8_t mm_s2_max_level;
 static uint8_t mm_s2_root_table_count;
 
+/**
+ * Returns the encoding of a page table entry that isn't present.
+ */
+pte_t arch_mm_absent_pte(int level)
+{
+	(void)level;
+	return 0;
+}
+
+/**
+ * Converts a physical address to a table PTE.
+ *
+ * The spec says that 'Table descriptors for stage 2 translations do not
+ * include any attribute field', so we don't take any attributes as arguments.
+ */
+pte_t arch_mm_table_pte(int level, paddr_t pa)
+{
+	/* This is the same for all levels on aarch64. */
+	(void)level;
+	return pa_addr(pa) | 0x3;
+}
+
+/**
+ * Converts a physical address to a block PTE.
+ *
+ * The level must allow block entries.
+ */
+pte_t arch_mm_block_pte(int level, paddr_t pa, uint64_t attrs)
+{
+	pte_t pte = pa_addr(pa) | attrs | 0x1;
+	if (level == 0) {
+		/* A level 0 'block' is actually a page entry. */
+		pte |= 0x2;
+	}
+	return pte;
+}
+
+/**
+ * Specifies whether block mappings are acceptable at the given level.
+ *
+ * Level 0 must allow block entries.
+ */
+bool arch_mm_is_block_allowed(int level)
+{
+	return level <= 2;
+}
+
+/**
+ * Determines if the given pte is present, i.e., if it points to another table,
+ * to a page, or a block of pages.
+ */
+bool arch_mm_pte_is_present(pte_t pte, int level)
+{
+	(void)level;
+	return (pte & 0x1) != 0;
+}
+
+/**
+ * Determines if the given pte references another table.
+ */
+bool arch_mm_pte_is_table(pte_t pte, int level)
+{
+	return level != 0 && (pte & 0x3) == 0x3;
+}
+
+/**
+ * Determines if the given pte references a block of pages.
+ */
+bool arch_mm_pte_is_block(pte_t pte, int level)
+{
+	/* We count pages at level 0 as blocks. */
+	return arch_mm_is_block_allowed(level) &&
+	       (pte & 0x3) == (level == 0 ? 0x3 : 0x1);
+}
+
+uint64_t arch_aarch64_mm_clear_pte_attrs(pte_t pte)
+{
+	return pte & ~PTE_ATTR_MASK;
+}
+
+/**
+ * Clears the given physical address, i.e., sets the ignored bits (from a page
+ * table perspective) to zero.
+ */
+paddr_t arch_mm_clear_pa(paddr_t pa)
+{
+	return pa_init(arch_aarch64_mm_clear_pte_attrs(pa_addr(pa)));
+}
+
+/**
+ * Extracts the physical address of the block referred to by the given page
+ * table entry.
+ */
+paddr_t arch_mm_block_from_pte(pte_t pte)
+{
+	return pa_init(arch_aarch64_mm_clear_pte_attrs(pte));
+}
+
+/**
+ * Extracts the physical address of the page table referred to by the given page
+ * table entry.
+ */
+paddr_t arch_mm_table_from_pte(pte_t pte)
+{
+	return pa_init(arch_aarch64_mm_clear_pte_attrs(pte));
+}
+
+/**
+ * Extracts the architecture specific attributes applies to the given page table
+ * entry.
+ */
+uint64_t arch_mm_pte_attrs(pte_t pte)
+{
+	return pte & PTE_ATTR_MASK;
+}
+
+/**
+ * Invalidates stage-1 TLB entries referring to the given virtual address range.
+ */
+void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end)
+{
+	uintvaddr_t begin = va_addr(va_begin);
+	uintvaddr_t end = va_addr(va_end);
+	uintvaddr_t it;
+
+	begin >>= 12;
+	end >>= 12;
+
+	__asm__ volatile("dsb ishst");
+
+	for (it = begin; it < end; it += (UINT64_C(1) << (PAGE_BITS - 12))) {
+		__asm__("tlbi vae2is, %0" : : "r"(it));
+	}
+
+	__asm__ volatile("dsb ish");
+}
+
+/**
+ * Invalidates stage-2 TLB entries referring to the given intermediate physical
+ * address range.
+ */
+void arch_mm_invalidate_stage2_range(ipaddr_t va_begin, ipaddr_t va_end)
+{
+	uintpaddr_t begin = ipa_addr(va_begin);
+	uintpaddr_t end = ipa_addr(va_end);
+	uintpaddr_t it;
+
+	/* TODO: This only applies to the current VMID. */
+
+	begin >>= 12;
+	end >>= 12;
+
+	__asm__ volatile("dsb ishst");
+
+	for (it = begin; it < end; it += (UINT64_C(1) << (PAGE_BITS - 12))) {
+		__asm__("tlbi ipas2e1, %0" : : "r"(it));
+	}
+
+	__asm__ volatile(
+		"dsb ish\n"
+		"tlbi vmalle1is\n"
+		"dsb ish\n");
+}
+
+/**
+ * Ensures that the range of data in the cache is written back so that it is
+ * visible to all cores in the system.
+ */
 void arch_mm_write_back_dcache(void *base, size_t size)
 {
 	/* Clean each data cache line the corresponds to data in the range. */
@@ -327,6 +503,11 @@
 	return true;
 }
 
+/**
+ * Given the attrs from a table at some level and the attrs from all the blocks
+ * in that table, returns equivalent attrs to use for a block which will replace
+ * the entire table.
+ */
 uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
 					   uint64_t block_attrs)
 {