Implement API to inject virtual interrupts into VMs.

This doesn't yet work properly in multiple-PE configurations, and doesn't have
any concept of priorities.

Bug: 117270899
Change-Id: Id62c59d78d0604b934aeca75ea459248db660488
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 5359f86..692fbac 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -38,3 +38,9 @@
 
 struct vcpu *api_wait_for_interrupt(struct vcpu *current);
 struct vcpu *api_yield(struct vcpu *current);
+
+int64_t api_enable_interrupt(uint32_t intid, bool enable, struct vcpu *current);
+uint32_t api_get_and_acknowledge_interrupt(struct vcpu *current);
+int64_t api_inject_interrupt(uint32_t target_vm_id, uint32_t target_vcpu_idx,
+			     uint32_t intid, struct vcpu *current,
+			     struct vcpu **next);
diff --git a/inc/hf/cpu.h b/inc/hf/cpu.h
index 60261fe..d2029dd 100644
--- a/inc/hf/cpu.h
+++ b/inc/hf/cpu.h
@@ -25,6 +25,11 @@
 #include "hf/addr.h"
 #include "hf/spinlock.h"
 
+#include "vmapi/hf/types.h"
+
+/** The number of bits in each element of the interrupt bitfields. */
+#define INTERRUPT_REGISTER_BITS 32
+
 enum vcpu_state {
 	/** The vcpu is switched off. */
 	vcpu_state_off,
@@ -42,6 +47,13 @@
 	vcpu_state_blocked_interrupt,
 };
 
+struct interrupts {
+	/** Bitfield keeping track of which interrupts are enabled. */
+	uint32_t interrupt_enabled[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
+	/** Bitfield keeping track of which interrupts are pending. */
+	uint32_t interrupt_pending[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
+};
+
 struct vcpu {
 	struct spinlock lock;
 	enum vcpu_state state;
@@ -49,6 +61,7 @@
 	struct vm *vm;
 	struct vcpu *mailbox_next;
 	struct arch_regs regs;
+	struct interrupts interrupts;
 };
 
 /* TODO: Update alignment such that cpus are in different cache lines. */
diff --git a/inc/hf/std.h b/inc/hf/std.h
index 55922fa..39d3d08 100644
--- a/inc/hf/std.h
+++ b/inc/hf/std.h
@@ -27,6 +27,8 @@
 size_t strlen(const char *str);
 int strcmp(const char *a, const char *b);
 
+#define ctz(x) __builtin_ctz(x)
+
 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 
 #define be16toh(v) __builtin_bswap16(v)
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index 102fd6c..7de8160 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -16,6 +16,8 @@
 
 #pragma once
 
+#include "hf/arch/cpu.h"
+
 #include "hf/abi.h"
 #include "hf/types.h"
 
@@ -23,13 +25,16 @@
 /* clang-format off */
 
 /* TODO: Define constants below according to spec. */
-#define HF_VCPU_RUN         0xff00
-#define HF_VM_GET_COUNT     0xff01
-#define HF_VCPU_GET_COUNT   0xff02
-#define HF_VM_CONFIGURE     0xff03
-#define HF_MAILBOX_SEND     0xff04
-#define HF_MAILBOX_RECEIVE  0xff05
-#define HF_MAILBOX_CLEAR    0xff06
+#define HF_VCPU_RUN                      0xff00
+#define HF_VM_GET_COUNT                  0xff01
+#define HF_VCPU_GET_COUNT                0xff02
+#define HF_VM_CONFIGURE                  0xff03
+#define HF_MAILBOX_SEND                  0xff04
+#define HF_MAILBOX_RECEIVE               0xff05
+#define HF_MAILBOX_CLEAR                 0xff06
+#define HF_ENABLE_INTERRUPT              0xff07
+#define HF_GET_AND_ACKNOWLEDGE_INTERRUPT 0xff08
+#define HF_INJECT_INTERRUPT              0xff09
 
 /** The amount of data that can be sent to a mailbox. */
 #define HF_MAILBOX_SIZE 4096
@@ -118,3 +123,39 @@
 {
 	return hf_call(HF_MAILBOX_CLEAR, 0, 0, 0);
 }
+
+/**
+ * Enables or disables a given interrupt ID.
+ *
+ * Returns 0 on success, or -1 if the intid is invalid.
+ */
+static inline uint64_t hf_enable_interrupt(uint32_t intid, bool enable)
+{
+	return hf_call(HF_ENABLE_INTERRUPT, intid, enable, 0);
+}
+
+/**
+ * Gets the ID of the pending interrupt (if any) and acknowledge it.
+ *
+ * Returns HF_INVALID_INTID if there are no pending interrupts.
+ */
+static inline uint32_t hf_get_and_acknowledge_interrupt()
+{
+	return hf_call(HF_GET_AND_ACKNOWLEDGE_INTERRUPT, 0, 0, 0);
+}
+
+/**
+ * Injects a virtual interrupt of the given ID into the given target vCPU.
+ * This doesn't cause the vCPU to actually be run immediately; it will be taken
+ * when the vCPU is next run, which is up to the scheduler.
+ *
+ * Returns 0 on success, or -1 if the target VM or vCPU doesn't exist or
+ * the interrupt ID is invalid.
+ */
+static inline int64_t hf_inject_interrupt(uint32_t target_vm_id,
+					  uint32_t target_vcpu_idx,
+					  uint32_t intid)
+{
+	return hf_call(HF_INJECT_INTERRUPT, target_vm_id, target_vcpu_idx,
+		       intid);
+}
diff --git a/inc/vmapi/hf/types.h b/inc/vmapi/hf/types.h
index 45aa257..697a990 100644
--- a/inc/vmapi/hf/types.h
+++ b/inc/vmapi/hf/types.h
@@ -39,3 +39,9 @@
 /* Invalid values for fields to indicate absence or errors. */
 #define HF_INVALID_VM_ID 0xffffffff
 #define HF_INVALID_VCPU 0xffff
+
+/** The number of virtual interrupt IDs which are supported. */
+#define HF_NUM_INTIDS 64
+
+/** Interrupt ID returned when there is no interrupt pending. */
+#define HF_INVALID_INTID 0xffffffff
diff --git a/src/api.c b/src/api.c
index 526e299..dac5388 100644
--- a/src/api.c
+++ b/src/api.c
@@ -18,6 +18,9 @@
 
 #include <assert.h>
 
+#include "hf/arch/cpu.h"
+
+#include "hf/dlog.h"
 #include "hf/std.h"
 #include "hf/vm.h"
 
@@ -450,3 +453,176 @@
 
 	return ret;
 }
+
+/**
+ * Enables or disables a given interrupt ID for the calling vCPU.
+ *
+ * Returns 0 on success, or -1 if the intid is invalid.
+ */
+int64_t api_enable_interrupt(uint32_t intid, bool enable, struct vcpu *current)
+{
+	uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
+	uint32_t intid_mask = 1u << (intid % INTERRUPT_REGISTER_BITS);
+	if (intid >= HF_NUM_INTIDS) {
+		return -1;
+	}
+
+	sl_lock(&current->lock);
+	if (enable) {
+		current->interrupts.interrupt_enabled[intid_index] |=
+			intid_mask;
+		/* If it is pending, change state and trigger a virtual IRQ. */
+		if (current->interrupts.interrupt_pending[intid_index] &
+		    intid_mask) {
+			arch_regs_set_virtual_interrupt(&current->regs, true);
+		}
+	} else {
+		current->interrupts.interrupt_enabled[intid_index] &=
+			~intid_mask;
+	}
+
+	sl_unlock(&current->lock);
+	return 0;
+}
+
+/**
+ * Returns the ID of the next pending interrupt for the calling vCPU, and
+ * acknowledges it (i.e. marks it as no longer pending). Returns
+ * HF_INVALID_INTID if there are no pending interrupts.
+ */
+uint32_t api_get_and_acknowledge_interrupt(struct vcpu *current)
+{
+	uint8_t i;
+	uint32_t first_interrupt = HF_INVALID_INTID;
+	bool interrupts_remain = false;
+
+	/*
+	 * Find the first enabled and pending interrupt ID, return it, and
+	 * deactivate it.
+	 */
+	sl_lock(&current->lock);
+	for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
+		uint32_t enabled_and_pending =
+			current->interrupts.interrupt_enabled[i] &
+			current->interrupts.interrupt_pending[i];
+		if (enabled_and_pending == 0) {
+			continue;
+		}
+
+		if (first_interrupt != HF_INVALID_INTID) {
+			interrupts_remain = true;
+			break;
+		}
+
+		uint8_t bit_index = ctz(enabled_and_pending);
+		/* Mark it as no longer pending. */
+		current->interrupts.interrupt_pending[i] &= ~(1u << bit_index);
+		first_interrupt = i * INTERRUPT_REGISTER_BITS + bit_index;
+
+		enabled_and_pending = current->interrupts.interrupt_enabled[i] &
+				      current->interrupts.interrupt_pending[i];
+		if (enabled_and_pending != 0) {
+			interrupts_remain = true;
+			break;
+		}
+	}
+	/*
+	 * If there are no more enabled and pending interrupts left, clear the
+	 * VI bit.
+	 */
+	arch_regs_set_virtual_interrupt(&current->regs, interrupts_remain);
+
+	sl_unlock(&current->lock);
+	return first_interrupt;
+}
+
+/**
+ * Return wheher the current vCPU is allowed to inject an interrupt into the
+ * given VM and vCPU.
+ */
+static inline bool is_injection_allowed(uint32_t target_vm_id,
+					struct vcpu *current)
+{
+	uint32_t current_vm_id = current->vm->id;
+	/*
+	 * The primary VM is allowed to inject interrupts into any VM. Secondary
+	 * VMs are only allowed to inject interrupts into their own vCPUs.
+	 */
+	return current_vm_id == HF_PRIMARY_VM_ID ||
+	       current_vm_id == target_vm_id;
+}
+
+/**
+ * Injects a virtual interrupt of the given ID into the given target vCPU.
+ * This doesn't cause the vCPU to actually be run immediately; it will be taken
+ * when the vCPU is next run, which is up to the scheduler.
+ *
+ * Returns 0 on success, or -1 if the target VM or vCPU doesn't exist, the
+ * interrupt ID is invalid, or the current VM is not allowed to inject
+ * interrupts to the target VM.
+ */
+int64_t api_inject_interrupt(uint32_t target_vm_id, uint32_t target_vcpu_idx,
+			     uint32_t intid, struct vcpu *current,
+			     struct vcpu **next)
+{
+	uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
+	uint32_t intid_mask = 1u << (intid % INTERRUPT_REGISTER_BITS);
+	struct vcpu *target_vcpu;
+	struct vm *target_vm = vm_get(target_vm_id);
+
+	if (intid >= HF_NUM_INTIDS) {
+		return -1;
+	}
+	if (target_vm == NULL) {
+		return -1;
+	}
+	if (target_vcpu_idx >= target_vm->vcpu_count) {
+		/* The requested vcpu must exist. */
+		return -1;
+	}
+	if (!is_injection_allowed(target_vm_id, current)) {
+		return -1;
+	}
+	target_vcpu = &target_vm->vcpus[target_vcpu_idx];
+
+	dlog("Injecting IRQ %d for VM %d VCPU %d from VM %d VCPU %d\n", intid,
+	     target_vm_id, target_vcpu_idx, current->vm->id, current->cpu->id);
+
+	sl_lock(&target_vcpu->lock);
+
+	/* Make it pending. */
+	target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
+
+	/* If it is enabled, change state and trigger a virtual IRQ. */
+	if (target_vcpu->interrupts.interrupt_enabled[intid_index] &
+	    intid_mask) {
+		dlog("IRQ %d is enabled for VM %d VCPU %d, setting VI.\n",
+		     intid, target_vm_id, target_vcpu_idx);
+		arch_regs_set_virtual_interrupt(&target_vcpu->regs, true);
+
+		if (target_vcpu->state == vcpu_state_blocked_interrupt) {
+			dlog("Changing state from blocked_interrupt to "
+			     "ready.\n");
+			target_vcpu->state = vcpu_state_ready;
+		}
+
+		if (current->vm->id != HF_PRIMARY_VM_ID &&
+		    current != target_vcpu) {
+			/*
+			 * Switch to the primary so that it can switch to the
+			 * target.
+			 */
+			struct hf_vcpu_run_return ret = {
+				.code = HF_VCPU_RUN_WAKE_UP,
+				.wake_up.vm_id = target_vm_id,
+				.wake_up.vcpu = target_vcpu_idx,
+			};
+			*next = api_switch_to_primary(current, ret,
+						      vcpu_state_ready);
+		}
+	}
+
+	sl_unlock(&target_vcpu->lock);
+
+	return 0;
+}
diff --git a/src/arch/aarch64/BUILD.gn b/src/arch/aarch64/BUILD.gn
index 8909fb8..b6efde9 100644
--- a/src/arch/aarch64/BUILD.gn
+++ b/src/arch/aarch64/BUILD.gn
@@ -25,6 +25,7 @@
   ]
 
   sources += [
+    "cpu.c",
     "handler.c",
     "mm.c",
     "offsets.c",
diff --git a/src/arch/aarch64/cpu.c b/src/arch/aarch64/cpu.c
new file mode 100644
index 0000000..ee7e056
--- /dev/null
+++ b/src/arch/aarch64/cpu.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/cpu.h"
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include "hf/arch/cpu.h"
+
+#include "hf/addr.h"
+#include "hf/dlog.h"
+
+#include "msr.h"
+
+#define HCR_EL2_VI (1u << 7)
+
+void arch_regs_set_virtual_interrupt(struct arch_regs *r, bool enable)
+{
+	if (enable) {
+		r->lazy.hcr_el2 |= HCR_EL2_VI;
+	} else {
+		r->lazy.hcr_el2 &= ~HCR_EL2_VI;
+	}
+	if (&current()->regs == r) {
+		write_msr(hcr_el2, r->lazy.hcr_el2);
+	}
+}
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index bdc3d46..d04a368 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -32,11 +32,6 @@
 int32_t smc(uintreg_t arg0, uintreg_t arg1, uintreg_t arg2, uintreg_t arg3);
 void cpu_entry(struct cpu *c);
 
-static struct vcpu *current(void)
-{
-	return (struct vcpu *)read_msr(tpidr_el2);
-}
-
 void irq_current_exception(uintreg_t elr, uintreg_t spsr)
 {
 	(void)elr;
@@ -254,6 +249,19 @@
 		ret.user_ret = api_mailbox_clear(current());
 		break;
 
+	case HF_ENABLE_INTERRUPT:
+		ret.user_ret = api_enable_interrupt(arg1, arg2, current());
+		break;
+
+	case HF_GET_AND_ACKNOWLEDGE_INTERRUPT:
+		ret.user_ret = api_get_and_acknowledge_interrupt(current());
+		break;
+
+	case HF_INJECT_INTERRUPT:
+		ret.user_ret = api_inject_interrupt(arg1, arg2, arg3, current(),
+						    &ret.new);
+		break;
+
 	default:
 		ret.user_ret = -1;
 	}
diff --git a/src/arch/aarch64/inc/hf/arch/cpu.h b/src/arch/aarch64/inc/hf/arch/cpu.h
index 9fb334f..175551f 100644
--- a/src/arch/aarch64/inc/hf/arch/cpu.h
+++ b/src/arch/aarch64/inc/hf/arch/cpu.h
@@ -124,3 +124,5 @@
 {
 	r->r[0] = v;
 }
+
+void arch_regs_set_virtual_interrupt(struct arch_regs *r, bool enable);
diff --git a/src/arch/aarch64/msr.h b/src/arch/aarch64/msr.h
index fbc7188..9d09d7a 100644
--- a/src/arch/aarch64/msr.h
+++ b/src/arch/aarch64/msr.h
@@ -33,3 +33,8 @@
 				 :                            \
 				 : "rZ"((uintreg_t)(value))); \
 	} while (0)
+
+static inline struct vcpu *current(void)
+{
+	return (struct vcpu *)read_msr(tpidr_el2);
+}
diff --git a/src/arch/fake/inc/hf/arch/cpu.h b/src/arch/fake/inc/hf/arch/cpu.h
index d8ede27..8cac590 100644
--- a/src/arch/fake/inc/hf/arch/cpu.h
+++ b/src/arch/fake/inc/hf/arch/cpu.h
@@ -16,6 +16,7 @@
 
 #pragma once
 
+#include <stdbool.h>
 #include <stdint.h>
 
 #include "hf/addr.h"
@@ -25,6 +26,7 @@
 struct arch_regs {
 	uintreg_t r[5];
 	uintreg_t vcpu_index;
+	bool virtual_interrupt;
 };
 
 static inline void arch_irq_disable(void)
@@ -48,6 +50,7 @@
 	(void)pc;
 	r->r[0] = arg;
 }
+
 static inline void arch_regs_set_vcpu_index(struct arch_regs *r, uint16_t index)
 {
 	r->vcpu_index = index;
@@ -57,3 +60,9 @@
 {
 	r->r[0] = v;
 }
+
+static inline void arch_regs_set_virtual_interrupt(struct arch_regs *r,
+						   bool enable)
+{
+	r->virtual_interrupt = enable;
+}