Move locking out of vcpu_on to callers.

Also introduces vcpu_lock and related functions for locking a vcpu struct.

Change-Id: I870b9fb7786f7e31ff424c9debbb0dbf72a770e4
diff --git a/inc/hf/cpu.h b/inc/hf/cpu.h
index 45dbb4f..3c5db07 100644
--- a/inc/hf/cpu.h
+++ b/inc/hf/cpu.h
@@ -93,6 +93,11 @@
 	bool regs_available;
 };
 
+/** Encapsulates a vCPU whose lock is held. */
+struct vcpu_locked {
+	struct vcpu *vcpu;
+};
+
 /* TODO: Update alignment such that cpus are in different cache lines. */
 struct cpu {
 	/** CPU identifier. Doesn't have to be contiguous. */
@@ -123,8 +128,10 @@
 void cpu_off(struct cpu *c);
 struct cpu *cpu_find(uint64_t id);
 
+struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
+void vcpu_unlock(struct vcpu_locked *locked);
 void vcpu_init(struct vcpu *vcpu, struct vm *vm);
-void vcpu_on(struct vcpu *vcpu, ipaddr_t entry, uintreg_t arg);
+void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
 size_t vcpu_index(const struct vcpu *vcpu);
 void vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry,
 				    uintreg_t arg);
diff --git a/src/cpu.c b/src/cpu.c
index 2c7bef2..7a2ffe2 100644
--- a/src/cpu.c
+++ b/src/cpu.c
@@ -124,8 +124,11 @@
 	if (!prev) {
 		struct vm *vm = vm_get(HF_PRIMARY_VM_ID);
 		struct vcpu *vcpu = &vm->vcpus[cpu_index(c)];
+		struct vcpu_locked vcpu_locked;
 
-		vcpu_on(vcpu, entry, arg);
+		vcpu_locked = vcpu_lock(vcpu);
+		vcpu_on(vcpu_locked, entry, arg);
+		vcpu_unlock(&vcpu_locked);
 	}
 
 	return prev;
@@ -157,6 +160,30 @@
 	return NULL;
 }
 
+/**
+ * Locks the given vCPU and updates `locked` to hold the newly locked vCPU.
+ */
+struct vcpu_locked vcpu_lock(struct vcpu *vcpu)
+{
+	struct vcpu_locked locked = {
+		.vcpu = vcpu,
+	};
+
+	sl_lock(&vcpu->lock);
+
+	return locked;
+}
+
+/**
+ * Unlocks a vCPU previously locked with vpu_lock, and updates `locked` to
+ * reflect the fact that the vCPU is no longer locked.
+ */
+void vcpu_unlock(struct vcpu_locked *locked)
+{
+	sl_unlock(&locked->vcpu->lock);
+	locked->vcpu = NULL;
+}
+
 void vcpu_init(struct vcpu *vcpu, struct vm *vm)
 {
 	memset_s(vcpu, sizeof(*vcpu), 0, sizeof(*vcpu));
@@ -166,13 +193,14 @@
 	vcpu->state = VCPU_STATE_OFF;
 }
 
-void vcpu_on(struct vcpu *vcpu, ipaddr_t entry, uintreg_t arg)
+/**
+ * Initialise the registers for the given vCPU and set the state to
+ * VCPU_STATE_READY. The caller must hold the vCPU lock while calling this.
+ */
+void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg)
 {
-	arch_regs_set_pc_arg(&vcpu->regs, entry, arg);
-
-	sl_lock(&vcpu->lock);
-	vcpu->state = VCPU_STATE_READY;
-	sl_unlock(&vcpu->lock);
+	arch_regs_set_pc_arg(&vcpu.vcpu->regs, entry, arg);
+	vcpu.vcpu->state = VCPU_STATE_READY;
 }
 
 size_t vcpu_index(const struct vcpu *vcpu)
@@ -186,10 +214,12 @@
 void vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry,
 				    uintreg_t arg)
 {
+	struct vcpu_locked vcpu_locked;
 	struct vm *vm = vcpu->vm;
 
 	assert(vm->id != HF_PRIMARY_VM_ID);
 
+	vcpu_locked = vcpu_lock(vcpu);
 	/*
 	 * Set vCPU registers to a clean state ready for boot. As this is a
 	 * secondary which can migrate between pCPUs, the ID of the vCPU is
@@ -198,7 +228,8 @@
 	 */
 	arch_regs_reset(&vcpu->regs, false, vm->id, vcpu_index(vcpu),
 			vm->ptable.root);
-	vcpu_on(vcpu, entry, arg);
+	vcpu_on(vcpu_locked, entry, arg);
+	vcpu_unlock(&vcpu_locked);
 }
 
 /**
diff --git a/src/load.c b/src/load.c
index 56751aa..aa1f19d 100644
--- a/src/load.c
+++ b/src/load.c
@@ -131,6 +131,7 @@
 
 	{
 		struct vm *vm;
+		struct vcpu_locked vcpu_locked;
 
 		if (!vm_init(MAX_CPUS, ppool, &vm)) {
 			dlog("Unable to initialise primary vm\n");
@@ -157,7 +158,9 @@
 			return false;
 		}
 
-		vcpu_on(&vm->vcpus[0], ipa_from_pa(primary_begin), kernel_arg);
+		vcpu_locked = vcpu_lock(&vm->vcpus[0]);
+		vcpu_on(vcpu_locked, ipa_from_pa(primary_begin), kernel_arg);
+		vcpu_unlock(&vcpu_locked);
 	}
 
 	return true;