Store current vcpu instead of cpu in tpidr_el2.
This allows us to avoid an extra indirection in the most common paths.
Change-Id: I2ea71ee1a56ee8b94f7f516465081e88e82d8539
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 514ac2f..5359f86 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -22,15 +22,19 @@
#include "vmapi/hf/call.h"
int64_t api_vm_get_count(void);
-int64_t api_vcpu_get_count(uint32_t vm_id);
+int64_t api_vcpu_get_count(uint32_t vm_id, const struct vcpu *current);
struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
+ const struct vcpu *current,
struct vcpu **next);
-int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv);
+int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv,
+ const struct vcpu *current);
-int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu **next);
+int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu *current,
+ struct vcpu **next);
struct hf_mailbox_receive_return api_mailbox_receive(bool block,
+ struct vcpu *current,
struct vcpu **next);
-int64_t api_mailbox_clear(void);
+int64_t api_mailbox_clear(const struct vcpu *current);
-struct vcpu *api_wait_for_interrupt(void);
-struct vcpu *api_yield(void);
+struct vcpu *api_wait_for_interrupt(struct vcpu *current);
+struct vcpu *api_yield(struct vcpu *current);
diff --git a/inc/hf/cpu.h b/inc/hf/cpu.h
index 809c21e..1d3d640 100644
--- a/inc/hf/cpu.h
+++ b/inc/hf/cpu.h
@@ -45,6 +45,7 @@
struct vcpu {
struct spinlock lock;
enum vcpu_state state;
+ struct cpu *cpu;
struct vm *vm;
struct vcpu *mailbox_next;
struct arch_regs regs;
@@ -55,8 +56,6 @@
/* CPU identifier. Doesn't have to be contiguous. */
size_t id;
- struct vcpu *current;
-
/* Pointer to bottom of the stack. */
void *stack_bottom;
diff --git a/src/api.c b/src/api.c
index 3b8d921..8761e61 100644
--- a/src/api.c
+++ b/src/api.c
@@ -34,26 +34,24 @@
* to cause HF_VCPU_RUN to return and the primary VM to regain control of the
* cpu.
*/
-static struct vcpu *api_switch_to_primary(struct hf_vcpu_run_return primary_ret,
+static struct vcpu *api_switch_to_primary(struct vcpu *current,
+ struct hf_vcpu_run_return primary_ret,
enum vcpu_state secondary_state)
{
- struct vcpu *vcpu = cpu()->current;
struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
- struct vcpu *next = &primary->vcpus[cpu_index(cpu())];
+ struct vcpu *next = &primary->vcpus[cpu_index(current->cpu)];
/* Switch back to primary VM. */
vm_set_current(primary);
- /*
- * Set the return value for the primary VM's call to HF_VCPU_RUN.
- */
+ /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
arch_regs_set_retval(&next->regs,
hf_vcpu_run_return_encode(primary_ret));
- /* Mark the vcpu as waiting. */
- sl_lock(&vcpu->lock);
- vcpu->state = secondary_state;
- sl_unlock(&vcpu->lock);
+ /* Mark the current vcpu as waiting. */
+ sl_lock(¤t->lock);
+ current->state = secondary_state;
+ sl_unlock(¤t->lock);
return next;
}
@@ -62,24 +60,25 @@
* Returns to the primary vm leaving the current vcpu ready to be scheduled
* again.
*/
-struct vcpu *api_yield(void)
+struct vcpu *api_yield(struct vcpu *current)
{
struct hf_vcpu_run_return ret = {
.code = HF_VCPU_RUN_YIELD,
};
- return api_switch_to_primary(ret, vcpu_state_ready);
+ return api_switch_to_primary(current, ret, vcpu_state_ready);
}
/**
* Puts the current vcpu in wait for interrupt mode, and returns to the primary
* vm.
*/
-struct vcpu *api_wait_for_interrupt(void)
+struct vcpu *api_wait_for_interrupt(struct vcpu *current)
{
struct hf_vcpu_run_return ret = {
.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
};
- return api_switch_to_primary(ret, vcpu_state_blocked_interrupt);
+ return api_switch_to_primary(current, ret,
+ vcpu_state_blocked_interrupt);
}
/**
@@ -93,12 +92,12 @@
/**
* Returns the number of vcpus configured in the given VM.
*/
-int64_t api_vcpu_get_count(uint32_t vm_id)
+int64_t api_vcpu_get_count(uint32_t vm_id, const struct vcpu *current)
{
struct vm *vm;
/* Only the primary VM needs to know about vcpus for scheduling. */
- if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
+ if (current->vm->id != HF_PRIMARY_VM_ID) {
return -1;
}
@@ -114,6 +113,7 @@
* Runs the given vcpu of the given vm.
*/
struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
+ const struct vcpu *current,
struct vcpu **next)
{
struct vm *vm;
@@ -123,7 +123,7 @@
};
/* Only the primary VM can switch vcpus. */
- if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
+ if (current->vm->id != HF_PRIMARY_VM_ID) {
goto out;
}
@@ -149,6 +149,7 @@
if (vcpu->state != vcpu_state_ready) {
ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
} else {
+ vcpu->cpu = current->cpu;
vcpu->state = vcpu_state_running;
vm_set_current(vm);
*next = vcpu;
@@ -164,9 +165,10 @@
* Configures the VM to send/receive data through the specified pages. The pages
* must not be shared.
*/
-int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
+int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv,
+ const struct vcpu *current)
{
- struct vm *vm = cpu()->current->vm;
+ struct vm *vm = current->vm;
paddr_t pa_send_begin;
paddr_t pa_send_end;
paddr_t pa_recv_begin;
@@ -246,9 +248,10 @@
* Copies data from the sender's send buffer to the recipient's receive buffer
* and notifies the recipient.
*/
-int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu **next)
+int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu *current,
+ struct vcpu **next)
{
- struct vm *from = cpu()->current->vm;
+ struct vm *from = current->vm;
struct vm *to;
const void *from_buf;
uint16_t vcpu;
@@ -378,7 +381,7 @@
}
/* Switch to primary for scheduling and return success to the sender. */
- *next = api_switch_to_primary(primary_ret, vcpu_state_ready);
+ *next = api_switch_to_primary(current, primary_ret, vcpu_state_ready);
return 0;
}
@@ -389,10 +392,10 @@
* No new messages can be received until the mailbox has been cleared.
*/
struct hf_mailbox_receive_return api_mailbox_receive(bool block,
+ struct vcpu *current,
struct vcpu **next)
{
- struct vcpu *vcpu = cpu()->current;
- struct vm *vm = vcpu->vm;
+ struct vm *vm = current->vm;
struct hf_mailbox_receive_return ret = {
.vm_id = HF_INVALID_VM_ID,
};
@@ -420,17 +423,16 @@
goto out;
}
- sl_lock(&vcpu->lock);
- vcpu->state = vcpu_state_blocked_mailbox;
+ sl_lock(¤t->lock);
+ current->state = vcpu_state_blocked_mailbox;
/* Push vcpu into waiter list. */
- vcpu->mailbox_next = vm->mailbox.recv_waiter;
- vm->mailbox.recv_waiter = vcpu;
- sl_unlock(&vcpu->lock);
+ current->mailbox_next = vm->mailbox.recv_waiter;
+ vm->mailbox.recv_waiter = current;
+ sl_unlock(¤t->lock);
/* Switch back to primary vm to block. */
- *next = api_wait_for_interrupt();
-
+ *next = api_wait_for_interrupt(current);
out:
sl_unlock(&vm->lock);
@@ -442,9 +444,9 @@
* must have copied out all data they wish to preserve as new messages will
* overwrite the old and will arrive asynchronously.
*/
-int64_t api_mailbox_clear(void)
+int64_t api_mailbox_clear(const struct vcpu *current)
{
- struct vm *vm = cpu()->current->vm;
+ struct vm *vm = current->vm;
int64_t ret;
sl_lock(&vm->lock);
diff --git a/src/arch/aarch64/cpu_entry.S b/src/arch/aarch64/cpu_entry.S
index 39e1378..2c1047d 100644
--- a/src/arch/aarch64/cpu_entry.S
+++ b/src/arch/aarch64/cpu_entry.S
@@ -6,9 +6,6 @@
/* Disable interrupts. */
msr DAIFSet, #0xf
- /* Save pointer to CPU struct for later reference. */
- msr tpidr_el2, x0
-
/* Use SPx (instead of SP0). */
msr spsel, #1
@@ -21,7 +18,7 @@
add x30, x30, :lo12:vector_table_el2
msr vbar_el2, x30
- /* Call into C code. */
+ /* Call into C code, x0 holds the cpu pointer. */
bl cpu_main
/* Run the vcpu returned by cpu_main. */
diff --git a/src/arch/aarch64/entry.S b/src/arch/aarch64/entry.S
index 7b9d648..ac705b0 100644
--- a/src/arch/aarch64/entry.S
+++ b/src/arch/aarch64/entry.S
@@ -66,4 +66,3 @@
/* Branch to the entry point for the specific image. */
4: b image_entry
-
diff --git a/src/arch/aarch64/exceptions.S b/src/arch/aarch64/exceptions.S
index 9fec775..8864f5f 100644
--- a/src/arch/aarch64/exceptions.S
+++ b/src/arch/aarch64/exceptions.S
@@ -87,7 +87,6 @@
/* Get the current vcpu. */
mrs x0, tpidr_el2
- ldr x0, [x0, #CPU_CURRENT]
/* Save volatile registers. */
add x0, x0, #VCPU_REGS
@@ -114,7 +113,6 @@
bl irq_lower
mrs x1, tpidr_el2
- ldr x1, [x1, #CPU_CURRENT]
cbnz x0, vcpu_switch
/* vcpu is not changing. */
@@ -220,9 +218,8 @@
.globl vcpu_restore_all_and_run
vcpu_restore_all_and_run:
- /* Update cpu()->current. */
- mrs x2, tpidr_el2
- str x0, [x2, #CPU_CURRENT]
+ /* Update current(). */
+ msr tpidr_el2, x0
/* Get a pointer to the lazy registers. */
add x0, x0, #VCPU_LAZY
@@ -318,7 +315,6 @@
slow_sync_lower_64:
/* Get the current vcpu. */
mrs x18, tpidr_el2
- ldr x18, [x18, #CPU_CURRENT]
/* Save volatile registers. */
add x18, x18, #VCPU_REGS
@@ -348,7 +344,6 @@
/* Switch to the vcpu returned by sync_lower_exception. */
mrs x1, tpidr_el2
- ldr x1, [x1, #CPU_CURRENT]
cbnz x0, vcpu_switch
/* vcpu is not changing. */
@@ -358,7 +353,6 @@
sync_lower_64_switch:
/* We'll have to switch, so save volatile state before doing so. */
mrs x18, tpidr_el2
- ldr x18, [x18, #CPU_CURRENT]
/* Store zeroes in volatile register storage, except x0. */
add x18, x18, #VCPU_REGS
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index 8fa5733..7eba337 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -32,6 +32,11 @@
int32_t smc(size_t arg0, size_t arg1, size_t arg2, size_t arg3);
void cpu_entry(struct cpu *c);
+static struct vcpu *current(void)
+{
+ return (struct vcpu *)read_msr(tpidr_el2);
+}
+
void irq_current(void)
{
dlog("IRQ from current\n");
@@ -128,7 +133,7 @@
break;
case PSCI_CPU_OFF:
- cpu_off(cpu());
+ cpu_off(current()->cpu);
smc(PSCI_CPU_OFF, 0, 0, 0);
for (;;) {
}
@@ -180,7 +185,7 @@
ret.new = NULL;
- if (cpu()->current->vm->id == HF_PRIMARY_VM_ID) {
+ if (current()->vm->id == HF_PRIMARY_VM_ID) {
int32_t psci_ret;
if (psci_handler(arg0, arg1, arg2, arg3, &psci_ret)) {
ret.user_ret = psci_ret;
@@ -194,29 +199,31 @@
break;
case HF_VCPU_GET_COUNT:
- ret.user_ret = api_vcpu_get_count(arg1);
+ ret.user_ret = api_vcpu_get_count(arg1, current());
break;
case HF_VCPU_RUN:
ret.user_ret = hf_vcpu_run_return_encode(
- api_vcpu_run(arg1, arg2, &ret.new));
+ api_vcpu_run(arg1, arg2, current(), &ret.new));
break;
case HF_VM_CONFIGURE:
- ret.user_ret = api_vm_configure(ipa_init(arg1), ipa_init(arg2));
+ ret.user_ret = api_vm_configure(ipa_init(arg1), ipa_init(arg2),
+ current());
break;
case HF_MAILBOX_SEND:
- ret.user_ret = api_mailbox_send(arg1, arg2, &ret.new);
+ ret.user_ret =
+ api_mailbox_send(arg1, arg2, current(), &ret.new);
break;
case HF_MAILBOX_RECEIVE:
ret.user_ret = hf_mailbox_receive_return_encode(
- api_mailbox_receive(arg1, &ret.new));
+ api_mailbox_receive(arg1, current(), &ret.new));
break;
case HF_MAILBOX_CLEAR:
- ret.user_ret = api_mailbox_clear();
+ ret.user_ret = api_mailbox_clear(current());
break;
default:
@@ -231,13 +238,12 @@
/* TODO: Only switch if we know the interrupt was not for the secondary
* VM. */
/* Switch back to primary VM, interrupts will be handled there. */
- return api_yield();
+ return api_yield(current());
}
struct vcpu *sync_lower_exception(uint64_t esr)
{
- struct cpu *c = cpu();
- struct vcpu *vcpu = c->current;
+ struct vcpu *vcpu = current();
int32_t ret;
switch (esr >> 26) {
@@ -246,7 +252,7 @@
if (esr & 1) {
return NULL;
}
- return api_wait_for_interrupt();
+ return api_wait_for_interrupt(current());
case 0x24: /* EC = 100100, Data abort. */
dlog("Data abort: pc=0x%x, esr=0x%x, ec=0x%x", vcpu->regs.pc,
diff --git a/src/arch/aarch64/inc/hf/arch/cpu.h b/src/arch/aarch64/inc/hf/arch/cpu.h
index 28061a6..e74c796 100644
--- a/src/arch/aarch64/inc/hf/arch/cpu.h
+++ b/src/arch/aarch64/inc/hf/arch/cpu.h
@@ -56,13 +56,6 @@
} lazy;
};
-static inline struct cpu *cpu(void)
-{
- struct cpu *p;
- __asm__ volatile("mrs %0, tpidr_el2" : "=r"(p));
- return p;
-}
-
static inline void arch_irq_disable(void)
{
__asm__ volatile("msr DAIFSet, #0xf");
diff --git a/src/arch/aarch64/offsets.c b/src/arch/aarch64/offsets.c
index 2c2db4f..558b956 100644
--- a/src/arch/aarch64/offsets.c
+++ b/src/arch/aarch64/offsets.c
@@ -21,7 +21,6 @@
#include "hf/cpu.h"
#include "hf/decl_offsets.h"
-DECL(CPU_CURRENT, struct cpu, current);
DECL(CPU_STACK_BOTTOM, struct cpu, stack_bottom);
DECL(VCPU_REGS, struct vcpu, regs);
DECL(VCPU_LAZY, struct vcpu, regs.lazy);
diff --git a/src/arch/fake/inc/hf/arch/cpu.h b/src/arch/fake/inc/hf/arch/cpu.h
index 421a7f7..270942e 100644
--- a/src/arch/fake/inc/hf/arch/cpu.h
+++ b/src/arch/fake/inc/hf/arch/cpu.h
@@ -25,12 +25,6 @@
uint16_t vcpu_index;
};
-static inline struct cpu *cpu(void)
-{
- /* TODO: */
- return NULL;
-}
-
static inline void arch_irq_disable(void)
{
/* TODO */
diff --git a/src/main.c b/src/main.c
index df27ea2..db191a8 100644
--- a/src/main.c
+++ b/src/main.c
@@ -128,10 +128,10 @@
* The entry point of CPUs when they are turned on. It is supposed to initialise
* all state and return the first vCPU to run.
*/
-struct vcpu *cpu_main(void)
+struct vcpu *cpu_main(struct cpu *c)
{
- struct cpu *c = cpu();
struct vm *primary;
+ struct vcpu *vcpu;
/*
* Do global one-time initialisation just once. We avoid using atomics
@@ -152,5 +152,7 @@
primary = vm_get(HF_PRIMARY_VM_ID);
vm_set_current(primary);
- return &primary->vcpus[cpu_index(c)];
+ vcpu = &primary->vcpus[cpu_index(c)];
+ vcpu->cpu = c;
+ return vcpu;
}