blob: 6ed2d1b2c8fdc4687653697f501b97db4ae8baa9 [file] [log] [blame]
#include "hf/load.h"
#include <stdbool.h>
#include "hf/api.h"
#include "hf/dlog.h"
#include "hf/memiter.h"
#include "hf/mm.h"
#include "hf/std.h"
#include "hf/vm.h"
/**
* Copies data to an unmapped location by mapping it for write, copying the
* data, then unmapping it.
*/
static bool copy_to_unmapped(paddr_t to, const void *from, size_t size)
{
paddr_t to_end = pa_add(to, size);
void *ptr;
ptr = mm_identity_map(to, to_end, MM_MODE_W);
if (!ptr) {
return false;
}
memcpy(ptr, from, size);
mm_unmap(to, to_end, 0);
return true;
}
/**
* Moves the kernel of the primary VM to its final destination.
*/
static bool relocate(const char *from, size_t size)
{
/* TODO: This is a hack. We must read the alignment from the binary. */
extern char bin_end[];
size_t tmp = (size_t)&bin_end[0];
paddr_t dest = pa_init((tmp + 0x80000 - 1) & ~(0x80000 - 1));
dlog("bin_end is at %p, copying to %p\n", &bin_end[0], pa_addr(dest));
return copy_to_unmapped(dest, from, size);
}
/**
* Looks for a file in the given cpio archive. The filename is not
* null-terminated, so we use a memory iterator to represent it. The file, if
* found, is returned in the "it" argument.
*/
static bool memiter_find_file(const struct memiter *cpio,
const struct memiter *filename,
struct memiter *it)
{
const char *fname;
const void *fcontents;
size_t fsize;
struct memiter iter = *cpio;
while (cpio_next(&iter, &fname, &fcontents, &fsize)) {
if (memiter_iseq(filename, fname)) {
memiter_init(it, fcontents, fsize);
return true;
}
}
return false;
}
/**
* Looks for a file in the given cpio archive. The file, if found, is returned
* in the "it" argument.
*/
static bool find_file(const struct memiter *cpio, const char *name,
struct memiter *it)
{
const char *fname;
const void *fcontents;
size_t fsize;
struct memiter iter = *cpio;
while (cpio_next(&iter, &fname, &fcontents, &fsize)) {
if (!strcmp(fname, name)) {
memiter_init(it, fcontents, fsize);
return true;
}
}
return false;
}
/**
* Loads the primary VM.
*/
// TODO: kernel_arg is a size_t???
bool load_primary(const struct memiter *cpio, size_t kernel_arg,
struct memiter *initrd)
{
struct memiter it;
if (!find_file(cpio, "vmlinuz", &it)) {
dlog("Unable to find vmlinuz\n");
return false;
}
if (!relocate(it.next, it.limit - it.next)) {
dlog("Unable to relocate kernel for primary vm.\n");
return false;
}
if (!find_file(cpio, "initrd.img", initrd)) {
dlog("Unable to find initrd.img\n");
return false;
}
{
uintpaddr_t tmp = (uintpaddr_t)&load_primary;
tmp = (tmp + 0x80000 - 1) & ~(0x80000 - 1);
if (!vm_init(&primary_vm, 0, MAX_CPUS)) {
dlog("Unable to initialise primary vm\n");
return false;
}
/* Map the 1TB of memory. */
/* TODO: We should do a whitelist rather than a blacklist. */
if (!mm_vm_identity_map(
&primary_vm.ptable, pa_init(0),
pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
MM_MODE_R | MM_MODE_W | MM_MODE_X |
MM_MODE_NOINVALIDATE,
NULL)) {
dlog("Unable to initialise memory for primary vm\n");
return false;
}
if (!mm_ptable_unmap_hypervisor(&primary_vm.ptable,
MM_MODE_NOINVALIDATE)) {
dlog("Unable to unmap hypervisor from primary vm\n");
return false;
}
vm_start_vcpu(&primary_vm, 0, ipa_init(tmp), kernel_arg);
}
return true;
}
/**
* Loads all secondary VMs in the given memory range. "mem_end" is updated to
* reflect the fact that some of the memory isn't available to the primary VM
* anymore.
*/
bool load_secondary(const struct memiter *cpio, paddr_t mem_begin,
paddr_t *mem_end)
{
struct memiter it;
struct memiter str;
uint64_t mem;
uint64_t cpu;
uint32_t count;
if (!find_file(cpio, "vms.txt", &it)) {
dlog("vms.txt is missing\n");
return true;
}
/* Round the last address down to the page size. */
*mem_end = pa_init(pa_addr(*mem_end) & ~(PAGE_SIZE - 1));
for (count = 0;
memiter_parse_uint(&it, &mem) && memiter_parse_uint(&it, &cpu) &&
memiter_parse_str(&it, &str) && count < MAX_VMS;
count++) {
struct memiter kernel;
paddr_t secondary_mem_begin;
paddr_t secondary_mem_end;
ipaddr_t secondary_entry;
if (!memiter_find_file(cpio, &str, &kernel)) {
dlog("Unable to load kernel for vm %u\n", count);
continue;
}
/* Round up to page size. */
mem = (mem + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
if (mem > pa_addr(*mem_end) - pa_addr(mem_begin)) {
dlog("Not enough memory for vm %u (%u bytes)\n", count,
mem);
continue;
}
if (mem < kernel.limit - kernel.next) {
dlog("Kernel is larger than available memory for vm "
"%u\n",
count);
continue;
}
secondary_mem_end = *mem_end;
*mem_end = pa_init(pa_addr(*mem_end) - mem);
secondary_mem_begin = *mem_end;
if (!copy_to_unmapped(*mem_end, kernel.next,
kernel.limit - kernel.next)) {
dlog("Unable to copy kernel for vm %u\n", count);
continue;
}
if (!vm_init(&secondary_vm[count], count + 1, cpu)) {
dlog("Unable to initialise vm %u\n", count);
continue;
}
/* TODO: Remove this. */
/* Grant VM access to uart. */
mm_vm_identity_map_page(&secondary_vm[count].ptable,
pa_init(PL011_BASE),
MM_MODE_R | MM_MODE_W | MM_MODE_D |
MM_MODE_NOINVALIDATE,
NULL);
/* Grant the VM access to the memory. */
if (!mm_vm_identity_map(&secondary_vm[count].ptable,
secondary_mem_begin, secondary_mem_end,
MM_MODE_R | MM_MODE_W | MM_MODE_X |
MM_MODE_NOINVALIDATE,
&secondary_entry)) {
dlog("Unable to initialise memory for vm %u\n", count);
continue;
}
/* Deny the primary VM access to this memory. */
if (!mm_vm_unmap(&primary_vm.ptable, secondary_mem_begin,
secondary_mem_end, MM_MODE_NOINVALIDATE)) {
dlog("Unable to unmap secondary VM from primary VM\n");
return false;
}
dlog("Loaded VM%u with %u vcpus, entry at 0x%x\n", count, cpu,
pa_addr(*mem_end));
vm_start_vcpu(&secondary_vm[count], 0, secondary_entry, 0);
}
secondary_vm_count = count;
return true;
}