Add support for multiple host CPUs.
Change-Id: I0fd2fd85732e8e7beeacc595a9ca92c3cd98e73a
diff --git a/inc/cpu.h b/inc/cpu.h
index c1ab1c6..d5bd29b 100644
--- a/inc/cpu.h
+++ b/inc/cpu.h
@@ -52,8 +52,9 @@
size_t cpu_index(struct cpu *c);
void cpu_irq_enable(struct cpu *c);
void cpu_irq_disable(struct cpu *c);
-bool cpu_on(struct cpu *c);
+bool cpu_on(struct cpu *c, size_t entry, size_t arg);
void cpu_off(struct cpu *c);
+struct cpu *cpu_find(size_t id);
void vcpu_init(struct vcpu *vcpu, struct vm *vm);
void vcpu_on(struct vcpu *vcpu);
diff --git a/inc/mm.h b/inc/mm.h
index df027dd..a6f637e 100644
--- a/inc/mm.h
+++ b/inc/mm.h
@@ -50,6 +50,7 @@
bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode);
bool mm_init(void);
+bool mm_cpu_init(void);
bool mm_map(vaddr_t begin, vaddr_t end, paddr_t paddr, int mode);
bool mm_unmap(vaddr_t begin, vaddr_t end, int mode);
void mm_defrag(void);
diff --git a/src/api.c b/src/api.c
index ae429f0..93b14c2 100644
--- a/src/api.c
+++ b/src/api.c
@@ -94,7 +94,7 @@
}
/**
- * Puts current vcpu in wait for interrupt mode, and returns to the primary
+ * Puts the current vcpu in wait for interrupt mode, and returns to the primary
* vm.
*/
struct vcpu *api_wait_for_interrupt(void)
@@ -132,7 +132,7 @@
/*
* Check that both pages are acessible from the VM, i.e., ensure that
- * the caller isn't try to use another VM's memory.
+ * the caller isn't trying to use another VM's memory.
*/
if (!mm_ptable_is_mapped(&vm->ptable, recv, 0) ||
!mm_ptable_is_mapped(&vm->ptable, send, 0)) {
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index 3eec780..2e5e75e 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -2,6 +2,7 @@
#include "arch_api.h"
#include "cpu.h"
#include "dlog.h"
+#include "psci.h"
#include "vm.h"
#include "msr.h"
@@ -11,6 +12,9 @@
struct vcpu *new;
};
+int32_t smc(size_t arg0, size_t arg1, size_t arg2, size_t arg3);
+void cpu_entry(struct cpu *c);
+
void irq_current(void)
{
dlog("IRQ from current\n");
@@ -50,24 +54,121 @@
}
}
+/**
+ * Handles PSCI requests received via HVC or SMC instructions from the primary
+ * VM only.
+ *
+ * Returns true if the request was a PSCI one, false otherwise.
+ */
+static bool psci_handler(uint32_t func, size_t arg0, size_t arg1, size_t arg2,
+ long *ret)
+{
+ struct cpu *c;
+ int32_t sret;
+
+ switch (func & ~PSCI_CONVENTION_MASK) {
+ case PSCI_VERSION:
+ /* Version is 0.2. */
+ *ret = 2;
+ break;
+
+ case PSCI_MIGRATE_INFO_TYPE:
+ /* Trusted OS does not require migration. */
+ *ret = 2;
+ break;
+
+ case PSCI_SYSTEM_OFF:
+ smc(PSCI_SYSTEM_OFF, 0, 0, 0);
+ for (;;) {
+ }
+ break;
+
+ case PSCI_SYSTEM_RESET:
+ smc(PSCI_SYSTEM_RESET, 0, 0, 0);
+ for (;;) {
+ }
+ break;
+
+ case PSCI_AFFINITY_INFO:
+ c = cpu_find(arg0);
+ if (!c) {
+ *ret = PSCI_RETURN_INVALID_PARAMETERS;
+ break;
+ }
+
+ if (arg1 != 0) {
+ *ret = PSCI_RETURN_NOT_SUPPORTED;
+ break;
+ }
+
+ sl_lock(&c->lock);
+ if (c->is_on) {
+ *ret = 0; /* ON */
+ } else {
+ *ret = 1; /* OFF */
+ }
+ sl_unlock(&c->lock);
+ break;
+
+ case PSCI_CPU_OFF:
+ cpu_off(cpu());
+ smc(PSCI_CPU_OFF, 0, 0, 0);
+ for (;;) {
+ }
+ break;
+
+ case PSCI_CPU_ON:
+ c = cpu_find(arg0);
+ if (!c) {
+ *ret = PSCI_RETURN_INVALID_PARAMETERS;
+ break;
+ }
+
+ if (cpu_on(c, arg1, arg2)) {
+ *ret = PSCI_RETURN_ALREADY_ON;
+ break;
+ }
+
+ /*
+ * There's a race when turning a CPU on when it's in the
+ * process of turning off. We need to loop here while it is
+ * reported that the CPU is on (because it's about to turn
+ * itself off).
+ */
+ do {
+ sret = smc(PSCI_CPU_ON, arg0, (size_t)&cpu_entry,
+ (size_t)c);
+ } while (sret == PSCI_RETURN_ALREADY_ON);
+
+ if (sret == PSCI_RETURN_SUCCESS) {
+ *ret = PSCI_RETURN_SUCCESS;
+ } else {
+ dlog("Unexpected return from PSCI_CPU_ON: 0x%x\n",
+ sret);
+ *ret = PSCI_RETURN_INTERNAL_FAILURE;
+ }
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
struct hvc_handler_return hvc_handler(size_t arg0, size_t arg1, size_t arg2,
size_t arg3)
{
- (void)arg3;
-
struct hvc_handler_return ret;
ret.new = NULL;
- switch (arg0) {
- case 0x84000000: /* PSCI_VERSION */
- ret.user_ret = 2;
- break;
+ if (cpu()->current->vm == &primary_vm &&
+ psci_handler(arg0, arg1, arg2, arg3, &ret.user_ret)) {
+ return ret;
+ }
- case 0x84000006: /* PSCI_MIGRATE */
- ret.user_ret = 2;
- break;
-
+ switch ((uint32_t)arg0 & ~PSCI_CONVENTION_MASK) {
case HF_VM_GET_COUNT:
ret.user_ret = api_vm_get_count();
break;
@@ -119,6 +220,7 @@
{
struct cpu *c = cpu();
struct vcpu *vcpu = c->current;
+ long ret;
switch (esr >> 26) {
case 0x01: /* EC = 000001, WFI or WFE. */
@@ -159,6 +261,18 @@
/* do nothing */
}
+ case 0x17: /* EC = 010111, SMC instruction. */
+ if (vcpu->vm != &primary_vm ||
+ !psci_handler(vcpu->regs.r[0], vcpu->regs.r[1],
+ vcpu->regs.r[2], vcpu->regs.r[3], &ret)) {
+ dlog("Unsupported SMC call: 0x%x\n", vcpu->regs.r[0]);
+ ret = -1;
+ }
+
+ /* Skip the SMC instruction. */
+ vcpu->regs.pc += (esr & (1u << 25)) ? 4 : 2;
+ break;
+
default:
dlog("Unknown lower sync exception pc=0x%x, esr=0x%x, "
"ec=0x%x\n",
@@ -168,5 +282,7 @@
}
}
+ vcpu->regs.r[0] = ret;
+
return NULL;
}
diff --git a/src/arch/aarch64/inc/arch_cpu.h b/src/arch/aarch64/inc/arch_cpu.h
index 1a3055f..6acee6e 100644
--- a/src/arch/aarch64/inc/arch_cpu.h
+++ b/src/arch/aarch64/inc/arch_cpu.h
@@ -65,13 +65,19 @@
(0xf << 6); /* DAIF bits set; disable interrupts. */
r->pc = pc;
r->r[0] = arg;
+ /* TODO: Determine if we need to set TSW. */
r->lazy.hcr_el2 = (1u << 31) | /* RW bit. */
+ (1u << 21) | /* TACR, trap access to ACTRL_EL1. */
+ (1u << 19) | /* TSC, trap SMC instructions. */
+ (1u << 20) | /* TIDCP, trap impl-defined funct. */
(1u << 2) | /* PTW, Protected Table Walk. */
(1u << 0); /* VM: enable stage-2 translation. */
if (!is_primary) {
- r->lazy.hcr_el2 |= (7u << 3) | /* AMO, IMO, FMO bits. */
- (3u << 13); /* TWI, TWE bits. */
+ r->lazy.hcr_el2 |= (7u << 3) | /* AMO, IMO, FMO bits. */
+ (1u << 9) | /* FB bit. */
+ (1u << 10) | /* BSU bits set to inner-sh. */
+ (3u << 13); /* TWI, TWE bits. */
}
}
@@ -92,29 +98,4 @@
r->lazy.hcr_el2 &= ~(1u << 7);
}
-/* TODO: Figure out what to do with this. */
-int32_t smc(size_t arg0, size_t arg1, size_t arg2, size_t arg3);
-
-static inline void arch_cpu_on(size_t id, void *ctx)
-{
- void cpu_entry(void *ctx);
- int32_t ret;
-
- /*
- * There's a race when turning a CPU on when it's in the process of
- * turning off. We need to loop here while it is reported that the CPU
- * is on (because it's about to turn itself off).
- */
- do {
- /* CPU_ON */
- ret = smc(0xC4000003, id, (size_t)&cpu_entry, (size_t)ctx);
- } while (ret == -4); /* ALREADY_ON */
-}
-
-static inline void arch_cpu_off(void)
-{
- /* CPU_OFF */
- smc(0xC4000002, 0, 0, 0);
-}
-
#endif /* _ARCH_CPU_H */
diff --git a/src/arch/aarch64/inc/arch_mm.h b/src/arch/aarch64/inc/arch_mm.h
index 3473f37..8662f78 100644
--- a/src/arch/aarch64/inc/arch_mm.h
+++ b/src/arch/aarch64/inc/arch_mm.h
@@ -176,7 +176,7 @@
}
uint64_t arch_mm_mode_to_attrs(int mode);
-bool arch_mm_init(paddr_t table);
+bool arch_mm_init(paddr_t table, bool first);
int arch_mm_max_level(int mode);
#endif /* _ARCH_MM_H */
diff --git a/src/arch/aarch64/inc/psci.h b/src/arch/aarch64/inc/psci.h
new file mode 100644
index 0000000..22f859f
--- /dev/null
+++ b/src/arch/aarch64/inc/psci.h
@@ -0,0 +1,45 @@
+#ifndef _PSCI_H
+#define _PSCI_H
+
+/* clang-format off */
+
+#define PSCI_CONVENTION_MASK (1u << 30)
+
+/* The following are function identifiers for PSCI. */
+#define PSCI_VERSION 0x84000000
+#define PSCI_CPU_SUSPEND 0x84000001
+#define PSCI_CPU_OFF 0x84000002
+#define PSCI_CPU_ON 0x84000003
+#define PSCI_AFFINITY_INFO 0x84000004
+#define PSCI_MIGRATE 0x84000005
+#define PSCI_MIGRATE_INFO_TYPE 0x84000006
+#define PSCI_MIGRATE_INFO_UP_CPU 0x84000007
+#define PSCI_SYSTEM_OFF 0x84000008
+#define PSCI_SYSTEM_RESET 0x84000009
+#define PSCI_FEATURES 0x8400000a
+#define PSCI_CPU_FREEZE 0x8400000b
+#define PSCI_CPU_DEFAULT_SUSPEND 0x8400000c
+#define PSCI_NODE_HW_STATE 0x8400000d
+#define PSCI_SYSTEM_SUSPEND 0x8400000e
+#define PSCI_SET_SYSPEND_MODE 0x8400000f
+#define PSCI_STAT_RESIDENCY 0x84000010
+#define PSCI_STAT_COUNT 0x84000011
+#define PSCI_SYSTEM_RESET2 0x84000012
+#define PSCI_MEM_PROTECT 0x84000013
+#define PSCI_MEM_PROTECT_CHECK_RANGE 0x84000014
+
+/* The following are return codes for PSCI. */
+#define PSCI_RETURN_SUCCESS 0
+#define PSCI_RETURN_NOT_SUPPORTED -1
+#define PSCI_RETURN_INVALID_PARAMETERS -2
+#define PSCI_RETURN_DENIED -3
+#define PSCI_RETURN_ALREADY_ON -4
+#define PSCI_RETURN_ON_PENDING -5
+#define PSCI_RETURN_INTERNAL_FAILURE -6
+#define PSCI_NOT_PRESENT -7
+#define PSCI_DISABLE -8
+#define PSCI_INVALID_ADDRESS -9
+
+/* clang-format on */
+
+#endif /* _PSCI_H */
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index c5c1b4f..53506bc 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -144,7 +144,7 @@
return mm_max_s2_level;
}
-bool arch_mm_init(paddr_t table)
+bool arch_mm_init(paddr_t table, bool first)
{
static const int pa_bits_table[16] = {32, 36, 40, 42, 44, 48};
uint64_t features = read_msr(id_aa64mmfr0_el1);
@@ -165,7 +165,9 @@
return false;
}
- dlog("Supported bits in physical address: %d\n", pa_bits);
+ if (first) {
+ dlog("Supported bits in physical address: %d\n", pa_bits);
+ }
/*
* Determine sl0 based on the number of bits. The maximum value is given
@@ -179,7 +181,9 @@
sl0 = 1;
}
- dlog("Number of page table levels: %d\n", mm_max_s2_level + 1);
+ if (first) {
+ dlog("Number of page table levels: %d\n", mm_max_s2_level + 1);
+ }
v = (1u << 31) | /* RES1. */
((features & 0xf) << 16) | /* PS, matching features. */
diff --git a/src/cpu.c b/src/cpu.c
index 581bc11..6cd0a2b 100644
--- a/src/cpu.c
+++ b/src/cpu.c
@@ -1,5 +1,6 @@
#include "cpu.h"
+#include "api.h"
#include "arch_cpu.h"
#include "dlog.h"
#include "std.h"
@@ -24,14 +25,14 @@
for (i = 0; i < MAX_CPUS; i++) {
struct cpu *c = cpus + i;
cpu_init(c);
- c->id = i; /* TODO: Initialize ID. */
+ c->id = i; /* TODO: Initialize ID based on fdt. */
c->stack_bottom = callstacks + STACK_SIZE * (i + 1);
}
}
size_t cpu_index(struct cpu *c)
{
- return cpus - c;
+ return c - cpus;
}
void cpu_init(struct cpu *c)
@@ -60,7 +61,7 @@
/**
* Turns CPU on and returns the previous state.
*/
-bool cpu_on(struct cpu *c)
+bool cpu_on(struct cpu *c, size_t entry, size_t arg)
{
bool prev;
@@ -70,23 +71,38 @@
sl_unlock(&c->lock);
if (!prev) {
- /* The CPU is currently off, we need to turn it on. */
- arch_cpu_on(c->id, c);
+ struct vcpu *vcpu = primary_vm.vcpus + cpu_index(c);
+ arch_regs_init(&vcpu->regs, entry, arg, true);
+ vcpu_on(vcpu);
}
return prev;
}
-/*
- * This must be called only from the same CPU.
+/**
+ * Prepares the CPU for turning itself off.
*/
void cpu_off(struct cpu *c)
{
sl_lock(&c->lock);
c->is_on = false;
sl_unlock(&c->lock);
+}
- arch_cpu_off();
+/**
+ * Searches for a CPU based on its id.
+ */
+struct cpu *cpu_find(size_t id)
+{
+ size_t i;
+
+ for (i = 0; i < MAX_CPUS; i++) {
+ if (cpus[i].id == id) {
+ return cpus + i;
+ }
+ }
+
+ return NULL;
}
void vcpu_init(struct vcpu *vcpu, struct vm *vm)
@@ -95,7 +111,8 @@
sl_init(&vcpu->lock);
vcpu->vm = vm;
vcpu->state = vcpu_state_off;
- /* TODO: Initialize vmid register. */
+ /* TODO: This needs to be moved to arch-dependent code. */
+ vcpu->regs.lazy.vmpidr_el2 = vcpu - vm->vcpus;
}
void vcpu_on(struct vcpu *vcpu)
diff --git a/src/load.c b/src/load.c
index 1b75008..7ab012e 100644
--- a/src/load.c
+++ b/src/load.c
@@ -135,8 +135,6 @@
vm_start_vcpu(&primary_vm, 0, tmp, kernel_arg, true);
}
- vm_set_current(&primary_vm);
-
return true;
}
diff --git a/src/main.c b/src/main.c
index 8862ba8..7938fec 100644
--- a/src/main.c
+++ b/src/main.c
@@ -120,5 +120,11 @@
dlog("Starting up cpu %d\n", cpu_index(c));
+ if (!mm_cpu_init()) {
+ panic("mm_cpu_init failed");
+ }
+
+ vm_set_current(&primary_vm);
+
return primary_vm.vcpus + cpu_index(c);
}
diff --git a/src/mm.c b/src/mm.c
index 00c5d3f..f2062c8 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -461,7 +461,12 @@
mm_map((vaddr_t)data_begin, (vaddr_t)data_end, (paddr_t)data_begin,
MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
- return arch_mm_init((paddr_t)ptable.table);
+ return arch_mm_init((paddr_t)ptable.table, true);
+}
+
+bool mm_cpu_init(void)
+{
+ return arch_mm_init((paddr_t)ptable.table, false);
}
/**