Separate address types from memory management.
This allows extracting some code from the architecture specific headers.
Change-Id: I37f7d9955a10025ef491c4e2ca76a6ffaf123a6b
diff --git a/inc/addr.h b/inc/addr.h
new file mode 100644
index 0000000..24a910c
--- /dev/null
+++ b/inc/addr.h
@@ -0,0 +1,114 @@
+#ifndef _ADDR_H
+#define _ADDR_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "arch_addr.h"
+
+/* An opaque type for a physical address. */
+typedef struct {
+ uintpaddr_t pa;
+} paddr_t;
+
+/* An opaque type for an intermediate physical address. */
+typedef struct {
+ uintpaddr_t ipa;
+} ipaddr_t;
+
+/* An opaque type for a virtual address. */
+typedef struct {
+ uintvaddr_t va;
+} vaddr_t;
+
+/**
+ * Initializes a physical address.
+ */
+static inline paddr_t pa_init(uintpaddr_t p)
+{
+ return (paddr_t){.pa = p};
+}
+
+/**
+ * Extracts the absolute physical address.
+ */
+static inline uintpaddr_t pa_addr(paddr_t pa)
+{
+ return pa.pa;
+}
+
+/**
+ * Initializes an intermeditate physical address.
+ */
+static inline ipaddr_t ipa_init(uintvaddr_t v)
+{
+ return (ipaddr_t){.ipa = v};
+}
+
+/**
+ * Extracts the absolute intermediate physical address.
+ */
+static inline uintpaddr_t ipa_addr(ipaddr_t ipa)
+{
+ return ipa.ipa;
+}
+
+/**
+ * Initializes a virtual address.
+ */
+static inline vaddr_t va_init(uintvaddr_t v)
+{
+ return (vaddr_t){.va = v};
+}
+
+/**
+ * Extracts the absolute virtual address.
+ */
+static inline uintvaddr_t va_addr(vaddr_t va)
+{
+ return va.va;
+}
+
+/**
+ * Advances a virtual address.
+ */
+static inline vaddr_t va_add(vaddr_t va, size_t n)
+{
+ return va_init(va_addr(va) + n);
+}
+
+/**
+ * Casts a physical address to a virtual address.
+ */
+static inline vaddr_t va_from_pa(paddr_t pa)
+{
+ return va_init(pa_addr(pa));
+}
+
+/**
+ * Casts a virtual address to a physical address.
+ */
+static inline paddr_t pa_from_va(vaddr_t va)
+{
+ return pa_init(va_addr(va));
+}
+
+/**
+ * Casts a pointer to a virtual address.
+ */
+static inline vaddr_t va_from_ptr(const void *p)
+{
+ return (vaddr_t){.va = (uintvaddr_t)p};
+}
+
+/**
+ * Casts a virtual address to a pointer. Only use when the virtual address is
+ * mapped for the calling context.
+ * TODO: check the mapping for a range and return a memiter?
+ */
+static inline void *ptr_from_va(vaddr_t va)
+{
+ return (void *)va_addr(va);
+}
+
+#endif /* _ADDR_H */
diff --git a/inc/mm.h b/inc/mm.h
index 1e00310..74158a7 100644
--- a/inc/mm.h
+++ b/inc/mm.h
@@ -4,6 +4,7 @@
#include <stdbool.h>
#include <stdint.h>
+#include "addr.h"
#include "arch_mm.h"
struct mm_ptable {
@@ -11,11 +12,6 @@
uint32_t id;
};
-/* An opaque type for an intermediate physical address from a VM. */
-typedef struct {
- uintpaddr_t ipa;
-} ipaddr_t;
-
#define PAGE_SIZE (1 << PAGE_BITS)
/* The following are arch-independent page mapping modes. */
@@ -61,46 +57,12 @@
void mm_defrag(void);
/**
- * Initializes an intermeditate physical address.
- */
-static inline ipaddr_t ipa_init(uintvaddr_t v)
-{
- return (ipaddr_t){.ipa = v};
-}
-
-/**
- * Extracts the absolute intermediate physical address.
- */
-static inline uintpaddr_t ipa_addr(ipaddr_t ipa)
-{
- return ipa.ipa;
-}
-
-/**
- * Converts a physical address to a virtual address. Addresses are currently
- * identity mapped so this is a simple type convertion.
- */
-static inline vaddr_t mm_va_from_pa(paddr_t pa)
-{
- return va_init(pa_addr(pa));
-}
-
-/**
- * Converts a virtual address to a physical address. Addresses are currently
- * identity mapped so this is a simple type convertion.
- */
-static inline paddr_t mm_pa_from_va(vaddr_t va)
-{
- return pa_init(va_addr(va));
-}
-
-/**
* Converts an intermediate physical address to a physical address. Addresses
* are currently identity mapped so this is a simple type convertion. Returns
* true if the address was mapped in the table and the address was converted.
*/
-static inline bool mm_pa_from_ipa(struct mm_ptable *t, ipaddr_t ipa,
- paddr_t *pa)
+static inline bool mm_ptable_translate_ipa(struct mm_ptable *t, ipaddr_t ipa,
+ paddr_t *pa)
{
/* TODO: the ptable functions map physical to virtual addresses but they
* should really be mapping to intermediate physical addresses.
@@ -115,13 +77,4 @@
return false;
}
-/**
- * Converts a virtual address to a pointer. Only use when the virtual address
- * is mapped for the calling context.
- */
-static inline void *mm_ptr_from_va(vaddr_t va)
-{
- return (void *)va_addr(va);
-}
-
#endif /* _MM_H */
diff --git a/src/api.c b/src/api.c
index e860e11..fd8f06d 100644
--- a/src/api.c
+++ b/src/api.c
@@ -142,15 +142,15 @@
* provided the address was acessible from the VM which ensures that the
* caller isn't trying to use another VM's memory.
*/
- if (!mm_pa_from_ipa(&vm->ptable, send, &pa_send) ||
- !mm_pa_from_ipa(&vm->ptable, recv, &pa_recv)) {
+ if (!mm_ptable_translate_ipa(&vm->ptable, send, &pa_send) ||
+ !mm_ptable_translate_ipa(&vm->ptable, recv, &pa_recv)) {
ret = -1;
goto exit;
}
- send_begin = mm_va_from_pa(pa_send);
+ send_begin = va_from_pa(pa_send);
send_end = va_add(send_begin, PAGE_SIZE);
- recv_begin = mm_va_from_pa(pa_recv);
+ recv_begin = va_from_pa(pa_recv);
recv_end = va_add(recv_begin, PAGE_SIZE);
/* Map the send page as read-only in the hypervisor address space. */
@@ -170,8 +170,8 @@
}
/* Save pointers to the pages. */
- vm->rpc.send = mm_ptr_from_va(send_begin);
- vm->rpc.recv = mm_ptr_from_va(recv_begin);
+ vm->rpc.send = ptr_from_va(send_begin);
+ vm->rpc.recv = ptr_from_va(recv_begin);
/* TODO: Notify any waiters. */
diff --git a/src/arch/aarch64/inc/arch_addr.h b/src/arch/aarch64/inc/arch_addr.h
new file mode 100644
index 0000000..92bc2fa
--- /dev/null
+++ b/src/arch/aarch64/inc/arch_addr.h
@@ -0,0 +1,18 @@
+#ifndef _ARCH_ADDR_H
+#define _ARCH_ADDR_H
+
+#include <stdint.h>
+
+#define PAGE_LEVEL_BITS 9
+#define PAGE_BITS 12
+
+/* Integer type large enough to hold a physical address. */
+typedef uintptr_t uintpaddr_t;
+
+/* Integer type large enough to hold a virtual address. */
+typedef uintptr_t uintvaddr_t;
+
+/* A page table entry. */
+typedef uint64_t pte_t;
+
+#endif /* _ARCH_ADDR_H */
diff --git a/src/arch/aarch64/inc/arch_mm.h b/src/arch/aarch64/inc/arch_mm.h
index ba44387..2bd80f2 100644
--- a/src/arch/aarch64/inc/arch_mm.h
+++ b/src/arch/aarch64/inc/arch_mm.h
@@ -3,69 +3,8 @@
#include <stdbool.h>
#include <stddef.h>
-#include <stdint.h>
-/* Integer type large enough to hold a physical address. */
-typedef uintptr_t uintpaddr_t;
-
-/* Integer type large enough to hold a virtual address. */
-typedef uintptr_t uintvaddr_t;
-
-/* A page table entry. */
-typedef uint64_t pte_t;
-
-/* An opaque type for a physical address. */
-typedef struct {
- uintpaddr_t pa;
-} paddr_t;
-
-/* An opaque type for a virtual address. */
-typedef struct {
- uintvaddr_t va;
-} vaddr_t;
-
-/**
- * Initializes a physical address.
- */
-static inline paddr_t pa_init(uintpaddr_t p)
-{
- return (paddr_t){.pa = p};
-}
-
-/**
- * Extracts the absolute physical address.
- */
-static inline uintpaddr_t pa_addr(paddr_t pa)
-{
- return pa.pa;
-}
-
-/**
- * Initializes a virtual address.
- */
-static inline vaddr_t va_init(uintvaddr_t v)
-{
- return (vaddr_t){.va = v};
-}
-
-/**
- * Extracts the absolute virtual address.
- */
-static inline uintvaddr_t va_addr(vaddr_t va)
-{
- return va.va;
-}
-
-/**
- * Advances a virtual address.
- */
-static inline vaddr_t va_add(vaddr_t va, size_t n)
-{
- return va_init(va_addr(va) + n);
-}
-
-#define PAGE_LEVEL_BITS 9
-#define PAGE_BITS 12
+#include "addr.h"
/**
* Converts a physical address to a table PTE.
diff --git a/src/fdt_handler.c b/src/fdt_handler.c
index 8121790..99ea4d4 100644
--- a/src/fdt_handler.c
+++ b/src/fdt_handler.c
@@ -174,14 +174,14 @@
bool ret = false;
/* Map the fdt header in. */
- if (!mm_identity_map(mm_va_from_pa(fdt_addr),
- va_init(pa_addr(fdt_addr) + fdt_header_size()),
+ if (!mm_identity_map(va_from_pa(fdt_addr),
+ va_add(va_from_pa(fdt_addr), fdt_header_size()),
MM_MODE_R)) {
dlog("Unable to map FDT header.\n");
goto err_unmap_fdt_header;
}
- fdt = mm_ptr_from_va(mm_va_from_pa(fdt_addr));
+ fdt = ptr_from_va(va_from_pa(fdt_addr));
if (!fdt_root_node(&n, fdt)) {
dlog("FDT failed validation.\n");
@@ -189,8 +189,8 @@
}
/* Map the rest of the fdt in. */
- if (!mm_identity_map(mm_va_from_pa(fdt_addr),
- va_init(pa_addr(fdt_addr) + fdt_total_size(fdt)),
+ if (!mm_identity_map(va_from_pa(fdt_addr),
+ va_add(va_from_pa(fdt_addr), fdt_total_size(fdt)),
MM_MODE_R)) {
dlog("Unable to map full FDT.\n");
goto err_unmap_fdt_header;
@@ -213,13 +213,13 @@
ret = true;
out_unmap_fdt:
- mm_unmap(mm_va_from_pa(fdt_addr),
- va_init(pa_addr(fdt_addr) + fdt_total_size(fdt)), 0);
+ mm_unmap(va_from_pa(fdt_addr),
+ va_add(va_from_pa(fdt_addr), fdt_total_size(fdt)), 0);
return ret;
err_unmap_fdt_header:
- mm_unmap(mm_va_from_pa(fdt_addr),
- va_init(pa_addr(fdt_addr) + fdt_header_size()), 0);
+ mm_unmap(va_from_pa(fdt_addr),
+ va_add(va_from_pa(fdt_addr), fdt_header_size()), 0);
return false;
}
@@ -230,14 +230,14 @@
bool ret = false;
/* Map the fdt header in. */
- if (!mm_identity_map(mm_va_from_pa(fdt_addr),
- va_init(pa_addr(fdt_addr) + fdt_header_size()),
+ if (!mm_identity_map(va_from_pa(fdt_addr),
+ va_add(va_from_pa(fdt_addr), fdt_header_size()),
MM_MODE_R)) {
dlog("Unable to map FDT header.\n");
return false;
}
- fdt = mm_ptr_from_va(mm_va_from_pa(fdt_addr));
+ fdt = ptr_from_va(va_from_pa(fdt_addr));
if (!fdt_root_node(&n, fdt)) {
dlog("FDT failed validation.\n");
@@ -245,9 +245,9 @@
}
/* Map the fdt (+ a page) in r/w mode in preparation for updating it. */
- if (!mm_identity_map(mm_va_from_pa(fdt_addr),
- va_init(pa_addr(fdt_addr) + fdt_total_size(fdt) +
- PAGE_SIZE),
+ if (!mm_identity_map(va_from_pa(fdt_addr),
+ va_add(va_from_pa(fdt_addr),
+ fdt_total_size(fdt) + PAGE_SIZE),
MM_MODE_R | MM_MODE_W)) {
dlog("Unable to map FDT in r/w mode.\n");
goto err_unmap_fdt_header;
@@ -291,9 +291,9 @@
out_unmap_fdt:
/* Unmap FDT. */
- if (!mm_unmap(mm_va_from_pa(fdt_addr),
- va_init(pa_addr(fdt_addr) + fdt_total_size(fdt) +
- PAGE_SIZE),
+ if (!mm_unmap(va_from_pa(fdt_addr),
+ va_add(va_from_pa(fdt_addr),
+ fdt_total_size(fdt) + PAGE_SIZE),
0)) {
dlog("Unable to unmap writable FDT.\n");
return false;
@@ -301,7 +301,7 @@
return ret;
err_unmap_fdt_header:
- mm_unmap(mm_va_from_pa(fdt_addr),
- va_init(pa_addr(fdt_addr) + fdt_header_size()), 0);
+ mm_unmap(va_from_pa(fdt_addr),
+ va_add(va_from_pa(fdt_addr), fdt_header_size()), 0);
return false;
}
diff --git a/src/load.c b/src/load.c
index d36ac21..6fb7f82 100644
--- a/src/load.c
+++ b/src/load.c
@@ -15,14 +15,14 @@
*/
static bool copy_to_unmaped(paddr_t to, const void *from, size_t size)
{
- vaddr_t begin = mm_va_from_pa(to);
+ vaddr_t begin = va_from_pa(to);
vaddr_t end = va_add(begin, size);
if (!mm_identity_map(begin, end, MM_MODE_W)) {
return false;
}
- memcpy(mm_ptr_from_va(begin), from, size);
+ memcpy(ptr_from_va(begin), from, size);
mm_unmap(begin, end, 0);
@@ -211,20 +211,18 @@
MM_MODE_NOINVALIDATE);
/* Grant the VM access to the memory. */
- if (!mm_ptable_identity_map(
- &secondary_vm[count].ptable,
- mm_va_from_pa(*mem_end),
- va_add(mm_va_from_pa(*mem_end), mem),
- MM_MODE_R | MM_MODE_W | MM_MODE_X |
- MM_MODE_NOINVALIDATE)) {
+ if (!mm_ptable_identity_map(&secondary_vm[count].ptable,
+ va_from_pa(*mem_end),
+ va_add(va_from_pa(*mem_end), mem),
+ MM_MODE_R | MM_MODE_W | MM_MODE_X |
+ MM_MODE_NOINVALIDATE)) {
dlog("Unable to initialise memory for vm %u\n", count);
continue;
}
/* Deny the primary VM access to this memory. */
- if (!mm_ptable_unmap(&primary_vm.ptable,
- mm_va_from_pa(*mem_end),
- va_add(mm_va_from_pa(*mem_end), mem),
+ if (!mm_ptable_unmap(&primary_vm.ptable, va_from_pa(*mem_end),
+ va_add(va_from_pa(*mem_end), mem),
MM_MODE_NOINVALIDATE)) {
dlog("Unable to unmap secondary VM from primary VM\n");
return false;
diff --git a/src/main.c b/src/main.c
index 5f81a89..d41b1b3 100644
--- a/src/main.c
+++ b/src/main.c
@@ -68,12 +68,12 @@
pa_addr(params.initrd_end) - 1);
/* Map initrd in, and initialise cpio parser. */
- if (!mm_identity_map(mm_va_from_pa(params.initrd_begin),
- mm_va_from_pa(params.initrd_end), MM_MODE_R)) {
+ if (!mm_identity_map(va_from_pa(params.initrd_begin),
+ va_from_pa(params.initrd_end), MM_MODE_R)) {
panic("unable to map initrd in");
}
- memiter_init(&cpio, mm_ptr_from_va(mm_va_from_pa(params.initrd_begin)),
+ memiter_init(&cpio, ptr_from_va(va_from_pa(params.initrd_begin)),
pa_addr(params.initrd_end) - pa_addr(params.initrd_begin));
/* Load all VMs. */
@@ -87,10 +87,8 @@
}
/* Prepare to run by updating bootparams as seens by primary VM. */
- update.initrd_begin =
- mm_pa_from_va(va_init((uintvaddr_t)primary_initrd.next));
- update.initrd_end =
- mm_pa_from_va(va_init((uintvaddr_t)primary_initrd.limit));
+ update.initrd_begin = pa_from_va(va_from_ptr(primary_initrd.next));
+ update.initrd_end = pa_from_va(va_from_ptr(primary_initrd.limit));
update.reserved_begin = new_mem_end;
update.reserved_end =
pa_init(pa_addr(params.mem_end) - pa_addr(new_mem_end));
diff --git a/src/mm.c b/src/mm.c
index 9ecc313..d240c55 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -207,8 +207,8 @@
uint64_t attrs = arch_mm_mode_to_attrs(mode);
int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
int level = arch_mm_max_level(mode);
- pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
- paddr_t paddr = arch_mm_clear_pa(mm_pa_from_va(begin));
+ pte_t *table = ptr_from_va(va_from_pa(t->table));
+ paddr_t paddr = arch_mm_clear_pa(pa_from_va(begin));
begin = arch_mm_clear_va(begin);
end = arch_mm_clear_va(va_add(end, PAGE_SIZE - 1));
@@ -241,18 +241,18 @@
{
int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
int level = arch_mm_max_level(mode);
- pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
+ pte_t *table = ptr_from_va(va_from_pa(t->table));
begin = arch_mm_clear_va(begin);
end = arch_mm_clear_va(va_add(end, PAGE_SIZE - 1));
/* Also do updates in two steps, similarly to mm_ptable_identity_map. */
- if (!mm_map_level(begin, end, mm_pa_from_va(begin), 0, table, level,
+ if (!mm_map_level(begin, end, pa_from_va(begin), 0, table, level,
flags)) {
return false;
}
- mm_map_level(begin, end, mm_pa_from_va(begin), 0, table, level,
+ mm_map_level(begin, end, pa_from_va(begin), 0, table, level,
flags | MAP_FLAG_COMMIT);
/* Invalidate the tlb. */
@@ -272,9 +272,9 @@
{
size_t i;
uint64_t attrs = arch_mm_mode_to_attrs(mode);
- pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
+ pte_t *table = ptr_from_va(va_from_pa(t->table));
bool sync = !(mode & MM_MODE_NOSYNC);
- paddr_t pa = arch_mm_clear_pa(mm_pa_from_va(va));
+ paddr_t pa = arch_mm_clear_pa(pa_from_va(va));
va = arch_mm_clear_va(va);
@@ -319,7 +319,7 @@
*/
void mm_ptable_dump(struct mm_ptable *t, int mode)
{
- pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
+ pte_t *table = ptr_from_va(va_from_pa(t->table));
int max_level = arch_mm_max_level(mode);
mm_dump_table_recursive(table, max_level, max_level);
}
@@ -386,7 +386,7 @@
*/
bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode)
{
- pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
+ pte_t *table = ptr_from_va(va_from_pa(t->table));
int level = arch_mm_max_level(mode);
addr = arch_mm_clear_va(addr);