Remove single page mapping functions.

This was more code to maintain for no benefit. It was a specialized
version of the generic mapping and only used for mapping the PL011 so it
was also rarely run.

It's uses have been replaced with the generic mapping function but if we
can show that we need it later then it can be added back.

Change-Id: I836007dd690dcc9dbdc79bd424127139f20fc0a5
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index c322b96..4df8899 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -62,8 +62,6 @@
 
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
 			int mode, ipaddr_t *ipa);
-bool mm_vm_identity_map_page(struct mm_ptable *t, paddr_t begin, int mode,
-			     ipaddr_t *ipa);
 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode);
 bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode);
 bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa);
diff --git a/src/load.c b/src/load.c
index eb6a9f9..c118b89 100644
--- a/src/load.c
+++ b/src/load.c
@@ -329,10 +329,11 @@
 
 		/* TODO: Remove this. */
 		/* Grant VM access to uart. */
-		mm_vm_identity_map_page(&vm->ptable, pa_init(PL011_BASE),
-					MM_MODE_R | MM_MODE_W | MM_MODE_D |
-						MM_MODE_NOINVALIDATE,
-					NULL);
+		mm_vm_identity_map(&vm->ptable, pa_init(PL011_BASE),
+				   pa_add(pa_init(PL011_BASE), PAGE_SIZE),
+				   MM_MODE_R | MM_MODE_W | MM_MODE_D |
+					   MM_MODE_NOINVALIDATE,
+				   NULL);
 
 		/* Grant the VM access to the memory. */
 		if (!mm_vm_identity_map(&vm->ptable, secondary_mem_begin,
diff --git a/src/mm.c b/src/mm.c
index 5785d64..210fa8e 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -382,43 +382,6 @@
 }
 
 /**
- * Updates the given table such that a single physical address page is mapped
- * into the address space with the corresponding address page in the provided
- * architecture-agnostic mode.
- */
-static bool mm_ptable_identity_map_page(struct mm_ptable *t, paddr_t pa,
-					int mode)
-{
-	size_t i;
-	uint64_t attrs = arch_mm_mode_to_attrs(mode);
-	pte_t *table = ptr_from_pa(t->table);
-	bool sync = !(mode & MM_MODE_NOSYNC);
-	ptable_addr_t addr;
-
-	pa = arch_mm_clear_pa(pa);
-	addr = pa_addr(pa);
-
-	for (i = arch_mm_max_level(mode); i > 0; i--) {
-		pte_t *pte = &table[mm_index(addr, i)];
-		if (arch_mm_pte_is_block(*pte, i) &&
-		    arch_mm_pte_attrs(*pte) == attrs) {
-			/* If the page is within a block that is already mapped
-			 * with the appropriate attributes, no need to do
-			 * anything more. */
-			return true;
-		}
-		table = mm_populate_table_pte(pte, i, sync);
-		if (!table) {
-			return false;
-		}
-	}
-
-	i = mm_index(addr, 0);
-	table[i] = arch_mm_block_pte(0, pa, attrs);
-	return true;
-}
-
-/**
  * Writes the given table to the debug log, calling itself recursively to
  * write sub-tables.
  */
@@ -684,24 +647,6 @@
 }
 
 /**
- * Updates a VM's page table such that the given physical address page is
- * mapped in the address space at the corresponding address page in the
- * architecture-agnostic mode provided.
- */
-bool mm_vm_identity_map_page(struct mm_ptable *t, paddr_t begin, int mode,
-			     ipaddr_t *ipa)
-{
-	bool success =
-		mm_ptable_identity_map_page(t, begin, mode & ~MM_MODE_STAGE1);
-
-	if (success && ipa != NULL) {
-		*ipa = ipa_from_pa(begin);
-	}
-
-	return success;
-}
-
-/**
  * Updates the VM's table such that the given physical address range is not
  * mapped in the address space.
  */
@@ -778,9 +723,10 @@
 
 	/* Map page for uart. */
 	/* TODO: We may not want to map this. */
-	mm_ptable_identity_map_page(&ptable, pa_init(PL011_BASE),
-				    MM_MODE_R | MM_MODE_W | MM_MODE_D |
-					    MM_MODE_NOSYNC | MM_MODE_STAGE1);
+	mm_ptable_identity_map(&ptable, pa_init(PL011_BASE),
+			       pa_add(pa_init(PL011_BASE), PAGE_SIZE),
+			       MM_MODE_R | MM_MODE_W | MM_MODE_D |
+				       MM_MODE_NOSYNC | MM_MODE_STAGE1);
 
 	/* Map each section. */
 	mm_identity_map(layout_text_begin(), layout_text_end(),
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 23091d3..d4c1d32 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -282,68 +282,4 @@
 	}
 }
 
-/** Mapping a single page should result in just that page being mapped. */
-TEST(mm, vm_identity_map_page)
-{
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	/* Start with an empty page table. */
-	pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	init_absent(table);
-	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	/* Try mapping the first page. */
-	ipaddr_t ipa = ipa_init(-1);
-	EXPECT_TRUE(mm_vm_identity_map_page(&ptable, pa_init(0), 0, &ipa));
-	EXPECT_THAT(ipa_addr(ipa), Eq(0));
-
-	/* Check that the first page is mapped, and nothing else. */
-	for (uint64_t i = 1; i < ENTRY_COUNT; ++i) {
-		EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
-	ASSERT_TRUE(arch_mm_pte_is_table(table[0], TOP_LEVEL));
-	pte_t *subtable_a = (pte_t *)ptr_from_va(
-		va_from_pa(arch_mm_table_from_pte(table[0])));
-	for (uint64_t i = 1; i < ENTRY_COUNT; ++i) {
-		EXPECT_THAT(subtable_a[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
-	ASSERT_TRUE(arch_mm_pte_is_table(subtable_a[0], TOP_LEVEL - 1));
-	pte_t *subtable_aa = (pte_t *)ptr_from_va(
-		va_from_pa(arch_mm_table_from_pte(subtable_a[0])));
-	for (uint64_t i = 1; i < ENTRY_COUNT; ++i) {
-		EXPECT_THAT(subtable_aa[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
-	EXPECT_TRUE(arch_mm_pte_is_block(subtable_aa[0], TOP_LEVEL - 2));
-	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(subtable_aa[0])), Eq(0));
-}
-
-/** Mapping a page that is already mapped should be a no-op. */
-TEST(mm, vm_identity_map_page_already_mapped)
-{
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	/* Start with a full page table mapping everything. */
-	pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
-	init_blocks(table, TOP_LEVEL, pa_init(0), 0);
-	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	/* Try mapping the first page. */
-	ipaddr_t ipa = ipa_init(-1);
-	EXPECT_TRUE(mm_vm_identity_map_page(&ptable, pa_init(0), 0, &ipa));
-	EXPECT_THAT(ipa_addr(ipa), Eq(0));
-
-	/*
-	 * The table should still be full of blocks, with no subtables or
-	 * anything else.
-	 */
-	for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
-		EXPECT_TRUE(arch_mm_pte_is_block(table[i], TOP_LEVEL))
-			<< "i=" << i;
-	}
-}
-
 } /* namespace */