Quit pretending that IPAs aren't identity mapped.

Information about memory sharing will be stored in the page table
entries rather than introducing a new data structure as that adds
complexity and new potential for memory management issues.

The information includes tracking whether memory is owned by a VM. If a
region of memory is borrowed by another, the memory can only be passed
back the the owner and this is checked by looking in the page table.
Identity mapping is needed so we know where to look in the table as,
otherwise, searching for a physical address is impractical.

Change-Id: I417376b3ecd31bc07518ac5c51b9fb7df2f4b3e1
diff --git a/inc/hf/addr.h b/inc/hf/addr.h
index 09a6c09..b67e1c2 100644
--- a/inc/hf/addr.h
+++ b/inc/hf/addr.h
@@ -117,6 +117,14 @@
 }
 
 /**
+ * Casts an intermediate physical address to a physical address.
+ */
+static inline paddr_t pa_from_ipa(ipaddr_t ipa)
+{
+	return pa_init(ipa_addr(ipa));
+}
+
+/**
  * Casts a pointer to a virtual address.
  */
 static inline vaddr_t va_from_ptr(const void *p)
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 472e0e3..8567acd 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -110,7 +110,6 @@
 		 struct mpool *ppool);
 bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode, struct mpool *ppool);
 bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode);
-bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa);
 
 bool mm_init(struct mpool *ppool);
 bool mm_cpu_init(void);
diff --git a/src/api.c b/src/api.c
index 7cbe1b1..4ab7416 100644
--- a/src/api.c
+++ b/src/api.c
@@ -212,26 +212,26 @@
 	 * these pages aren't and won't be shared.
 	 */
 
-	/*
-	 * Convert the intermediate physical addresses to physical address
-	 * provided the address was acessible from the VM which ensures that the
-	 * caller isn't trying to use another VM's memory.
-	 */
-	if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
-	    !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
+	/* Ensure the pages are accessible from the VM. */
+	if (!mm_vm_is_mapped(&vm->ptable, send, 0) ||
+	    !mm_vm_is_mapped(&vm->ptable, recv, 0)) {
 		ret = -1;
 		goto exit;
 	}
 
+	/* Convert to physical addresses. */
+	pa_send_begin = pa_from_ipa(send);
+	pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
+
+	pa_recv_begin = pa_from_ipa(recv);
+	pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
+
 	/* Fail if the same page is used for the send and receive pages. */
 	if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
 		ret = -1;
 		goto exit;
 	}
 
-	pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
-	pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
-
 	/* Map the send page as read-only in the hypervisor address space. */
 	vm->mailbox.send = mm_identity_map(pa_send_begin, pa_send_end,
 					   MM_MODE_R, &api_page_pool);
diff --git a/src/mm.c b/src/mm.c
index 16a5a93..ea239d0 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -807,22 +807,6 @@
 }
 
 /**
- * Translates an intermediate physical address to a physical address. Addresses
- * are currently identity mapped so this is a simple type convertion. Returns
- * true if the address was mapped in the table and the address was converted.
- */
-bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa)
-{
-	bool mapped = mm_vm_is_mapped(t, ipa, 0);
-
-	if (mapped) {
-		*pa = pa_init(ipa_addr(ipa));
-	}
-
-	return mapped;
-}
-
-/**
  * Updates the hypervisor page table such that the given physical address range
  * is mapped into the address space at the corresponding address range in the
  * architecture-agnostic mode provided.