Accomodate primary being and indexable VM.
Now that the primary VM is addressable for RPCs, it is given the ID 0 by
convention. The scheduling needs to ignore the priamry VM as it is
already handled by the traditional scheduler.
Bug: 116705004
Change-Id: I84f813afe7bf5013305ae70fc5fdad6a243fafd2
diff --git a/main.c b/main.c
index 2b35114..d39d17d 100644
--- a/main.c
+++ b/main.c
@@ -11,7 +11,7 @@
struct hf_vcpu {
spinlock_t lock;
- uint32_t vm_index;
+ struct hf_vm *vm;
uint32_t vcpu_index;
struct task_struct *task;
struct hrtimer timer;
@@ -19,6 +19,7 @@
};
struct hf_vm {
+ uint32_t id;
long vcpu_count;
struct hf_vcpu *vcpu;
};
@@ -63,7 +64,7 @@
spin_unlock_irqrestore(&vcpu->lock, flags);
/* Call into hafnium to run vcpu. */
- ret = hf_vcpu_run(vcpu->vm_index, vcpu->vcpu_index);
+ ret = hf_vcpu_run(vcpu->vm->id, vcpu->vcpu_index);
/* A negative return value indicates that this vcpu needs to
* sleep for the given number of nanoseconds.
@@ -94,8 +95,8 @@
/* Wake up another vcpu. */
case HF_VCPU_RUN_WAKE_UP:
{
- struct hf_vm *vm = &hf_vms[vcpu->vm_index];
long target = HF_VCPU_RUN_DATA(ret);
+ struct hf_vm *vm = vcpu->vm;
if (target < vm->vcpu_count)
wake_up_process(vm->vcpu[target].task);
}
@@ -189,11 +190,12 @@
/* Copy data to send buffer. */
memcpy(page_address(hf_send_page), buf, count);
- ret = hf_rpc_request(0, count);
+
+ vm = &hf_vms[0];
+ ret = hf_rpc_request(vm->id, count);
if (ret < 0)
return -EAGAIN;
- vm = &hf_vms[0];
if (ret > vm->vcpu_count)
return -EINVAL;
@@ -259,12 +261,13 @@
/* Get the number of VMs and allocate storage for them. */
ret = hf_vm_get_count();
- if (ret < 0) {
+ if (ret < 1) {
pr_err("Unable to retrieve number of VMs: %ld\n", ret);
return ret;
}
- hf_vm_count = ret;
+ /* Only track the secondary VMs. */
+ hf_vm_count = ret - 1;
hf_vms = kmalloc(sizeof(struct hf_vm) * hf_vm_count, GFP_KERNEL);
if (!hf_vms)
return -ENOMEM;
@@ -273,9 +276,12 @@
for (i = 0; i < hf_vm_count; i++) {
struct hf_vm *vm = &hf_vms[i];
- ret = hf_vcpu_get_count(i);
+ /* Adjust the ID as only the secondaries are tracked. */
+ vm->id = i + 1;
+
+ ret = hf_vcpu_get_count(vm->id);
if (ret < 0) {
- pr_err("HF_VCPU_GET_COUNT failed for vm=%ld: %ld", i,
+ pr_err("HF_VCPU_GET_COUNT failed for vm=%d: %ld", vm->id,
ret);
hf_free_resources(i);
return ret;
@@ -285,8 +291,8 @@
vm->vcpu = kmalloc(sizeof(struct hf_vcpu) * vm->vcpu_count,
GFP_KERNEL);
if (!vm->vcpu) {
- pr_err("No memory for %ld vcpus for vm %ld",
- vm->vcpu_count, i);
+ pr_err("No memory for %ld vcpus for vm %d",
+ vm->vcpu_count, vm->id);
hf_free_resources(i);
return -ENOMEM;
}
@@ -295,11 +301,11 @@
for (j = 0; j < vm->vcpu_count; j++) {
struct hf_vcpu *vcpu = &vm->vcpu[j];
vcpu->task = kthread_create(hf_vcpu_thread, vcpu,
- "vcpu_thread_%ld_%ld",
- i, j);
+ "vcpu_thread_%d_%ld",
+ vm->id, j);
if (IS_ERR(vcpu->task)) {
- pr_err("Error creating task (vm=%ld,vcpu=%ld)"
- ": %ld\n", i, j, PTR_ERR(vcpu->task));
+ pr_err("Error creating task (vm=%d,vcpu=%ld)"
+ ": %ld\n", vm->id, j, PTR_ERR(vcpu->task));
vm->vcpu_count = j;
hf_free_resources(i + 1);
return PTR_ERR(vcpu->task);
@@ -307,7 +313,7 @@
get_task_struct(vcpu->task);
spin_lock_init(&vcpu->lock);
- vcpu->vm_index = i;
+ vcpu->vm = vm;
vcpu->vcpu_index = j;
vcpu->pending_irq = false;
}
@@ -322,8 +328,10 @@
/* Dump vm/vcpu count info. */
pr_info("Hafnium successfully loaded with %ld VMs:\n", hf_vm_count);
- for (i = 0; i < hf_vm_count; i++)
- pr_info("\tVM %ld: %ld vCPUS\n", i, hf_vms[i].vcpu_count);
+ for (i = 0; i < hf_vm_count; i++) {
+ struct hf_vm *vm = &hf_vms[i];
+ pr_info("\tVM %d: %ld vCPUS\n", vm->id, vm->vcpu_count);
+ }
/* Create the sysfs interface to interrupt vcpus. */
hf_sysfs_obj = kobject_create_and_add("hafnium", kernel_kobj);