Set or clear VI bit in hvc_handler just before returning, using count.

A count of enabled/pending interrupts is kept for this purpose whenever
the enabled and pending bits are updated. This handles the case where the
target vCPU was already running on a different physical CPU when an
interrupt was injected and so it had to be kicked by the primary.

Also add comments explaining what happens in case that the target of an
injected interrupt is already running on a different physical CPU.

Also also, add a test for the case of injecting an interrupt ID which is
not enabled, then enabling it later.

Bug: 117270899
Change-Id: I200f547a5a72332a5e24b5109a3e6e7b66c0b59e
diff --git a/inc/hf/cpu.h b/inc/hf/cpu.h
index b477dec..10bb01f 100644
--- a/inc/hf/cpu.h
+++ b/inc/hf/cpu.h
@@ -52,6 +52,12 @@
 	uint32_t interrupt_enabled[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
 	/** Bitfield keeping track of which interrupts are pending. */
 	uint32_t interrupt_pending[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
+	/**
+	 * The number of interrupts which are currently both enabled and
+	 * pending. i.e. the number of bits set in interrupt_enable &
+	 * interrupt_pending.
+	 */
+	uint32_t enabled_and_pending_count;
 };
 
 struct retval_state {
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index a7c58a2..a8904b6 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -167,8 +167,13 @@
  * This doesn't cause the vCPU to actually be run immediately; it will be taken
  * when the vCPU is next run, which is up to the scheduler.
  *
- * Returns 0 on success, or -1 if the target VM or vCPU doesn't exist or
- * the interrupt ID is invalid.
+ * Returns:
+ *  - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
+ *    ID is invalid, or the current VM is not allowed to inject interrupts to
+ *    the target VM.
+ *  - 0 on success if no further action is needed.
+ *  - 1 if it was called by the primary VM and the primary VM now needs to wake
+ *    up or kick the target vCPU.
  */
 static inline int64_t hf_inject_interrupt(uint32_t target_vm_id,
 					  uint32_t target_vcpu_idx,
diff --git a/src/api.c b/src/api.c
index 7e6e4e6..e122107 100644
--- a/src/api.c
+++ b/src/api.c
@@ -556,14 +556,26 @@
 
 	sl_lock(&current->lock);
 	if (enable) {
+		/*
+		 * If it is pending and was not enabled before, increment the
+		 * count.
+		 */
+		if (current->interrupts.interrupt_pending[intid_index] &
+		    ~current->interrupts.interrupt_enabled[intid_index] &
+		    intid_mask) {
+			current->interrupts.enabled_and_pending_count++;
+		}
 		current->interrupts.interrupt_enabled[intid_index] |=
 			intid_mask;
-		/* If it is pending, change state and trigger a virtual IRQ. */
-		if (current->interrupts.interrupt_pending[intid_index] &
-		    intid_mask) {
-			arch_regs_set_virtual_interrupt(&current->regs, true);
-		}
 	} else {
+		/*
+		 * If it is pending and was enabled before, decrement the count.
+		 */
+		if (current->interrupts.interrupt_pending[intid_index] &
+		    current->interrupts.interrupt_enabled[intid_index] &
+		    intid_mask) {
+			current->interrupts.enabled_and_pending_count--;
+		}
 		current->interrupts.interrupt_enabled[intid_index] &=
 			~intid_mask;
 	}
@@ -581,7 +593,6 @@
 {
 	uint8_t i;
 	uint32_t first_interrupt = HF_INVALID_INTID;
-	bool interrupts_remain = false;
 
 	/*
 	 * Find the first enabled and pending interrupt ID, return it, and
@@ -592,32 +603,19 @@
 		uint32_t enabled_and_pending =
 			current->interrupts.interrupt_enabled[i] &
 			current->interrupts.interrupt_pending[i];
-		if (enabled_and_pending == 0) {
-			continue;
-		}
-
-		if (first_interrupt != HF_INVALID_INTID) {
-			interrupts_remain = true;
-			break;
-		}
-
-		uint8_t bit_index = ctz(enabled_and_pending);
-		/* Mark it as no longer pending. */
-		current->interrupts.interrupt_pending[i] &= ~(1u << bit_index);
-		first_interrupt = i * INTERRUPT_REGISTER_BITS + bit_index;
-
-		enabled_and_pending = current->interrupts.interrupt_enabled[i] &
-				      current->interrupts.interrupt_pending[i];
 		if (enabled_and_pending != 0) {
-			interrupts_remain = true;
+			uint8_t bit_index = ctz(enabled_and_pending);
+			/*
+			 * Mark it as no longer pending and decrement the count.
+			 */
+			current->interrupts.interrupt_pending[i] &=
+				~(1u << bit_index);
+			current->interrupts.enabled_and_pending_count--;
+			first_interrupt =
+				i * INTERRUPT_REGISTER_BITS + bit_index;
 			break;
 		}
 	}
-	/*
-	 * If there are no more enabled and pending interrupts left, clear the
-	 * VI bit.
-	 */
-	arch_regs_set_virtual_interrupt(&current->regs, interrupts_remain);
 
 	sl_unlock(&current->lock);
 	return first_interrupt;
@@ -644,9 +642,13 @@
  * This doesn't cause the vCPU to actually be run immediately; it will be taken
  * when the vCPU is next run, which is up to the scheduler.
  *
- * Returns 0 on success, or -1 if the target VM or vCPU doesn't exist, the
- * interrupt ID is invalid, or the current VM is not allowed to inject
- * interrupts to the target VM.
+ * Returns:
+ *  - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
+ *    ID is invalid, or the current VM is not allowed to inject interrupts to
+ *    the target VM.
+ *  - 0 on success if no further action is needed.
+ *  - 1 if it was called by the primary VM and the primary VM now needs to wake
+ *    up or kick the target vCPU.
  */
 int64_t api_inject_interrupt(uint32_t target_vm_id, uint32_t target_vcpu_idx,
 			     uint32_t intid, struct vcpu *current,
@@ -657,6 +659,7 @@
 	struct vcpu *target_vcpu;
 	struct vm *target_vm = vm_get(target_vm_id);
 	bool need_vm_lock;
+	int64_t ret = 0;
 
 	if (intid >= HF_NUM_INTIDS) {
 		return -1;
@@ -688,86 +691,105 @@
 	 * lock until we are done, so nothing should change in such as way that
 	 * we need the VM lock after all.
 	 */
-	need_vm_lock = (target_vcpu->interrupts.interrupt_enabled[intid_index] &
-			intid_mask) &&
-		       target_vcpu->state == vcpu_state_blocked_mailbox;
+	need_vm_lock =
+		(target_vcpu->interrupts.interrupt_enabled[intid_index] &
+		 ~target_vcpu->interrupts.interrupt_pending[intid_index] &
+		 intid_mask) &&
+		target_vcpu->state == vcpu_state_blocked_mailbox;
 	if (need_vm_lock) {
 		sl_unlock(&target_vcpu->lock);
 		sl_lock(&target_vm->lock);
 		sl_lock(&target_vcpu->lock);
 	}
 
-	/* Make it pending. */
-	target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
+	/*
+	 * We only need to change state and (maybe) trigger a virtual IRQ if it
+	 * is enabled and was not previously pending. Otherwise we can skip
+	 * everything except setting the pending bit.
+	 *
+	 * If you change this logic make sure to update the need_vm_lock logic
+	 * above to match.
+	 */
+	if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
+	      ~target_vcpu->interrupts.interrupt_pending[intid_index] &
+	      intid_mask)) {
+		goto out;
+	}
+
+	/* Increment the count. */
+	target_vcpu->interrupts.enabled_and_pending_count++;
 
 	/*
-	 * If it is enabled, change state and trigger a virtual IRQ. If you
-	 * change this logic make sure to update the need_vm_lock logic above to
-	 * match.
+	 * Only need to update state if there was not already an
+	 * interrupt enabled and pending.
 	 */
-	if (target_vcpu->interrupts.interrupt_enabled[intid_index] &
-	    intid_mask) {
-		dlog("IRQ %d is enabled for VM %d VCPU %d, setting VI.\n",
-		     intid, target_vm_id, target_vcpu_idx);
-		arch_regs_set_virtual_interrupt(&target_vcpu->regs, true);
+	if (target_vcpu->interrupts.enabled_and_pending_count != 1) {
+		goto out;
+	}
 
-		if (target_vcpu->state == vcpu_state_blocked_interrupt) {
-			target_vcpu->state = vcpu_state_ready;
-		} else if (target_vcpu->state == vcpu_state_blocked_mailbox) {
-			/*
-			 * If you change this logic make sure to update the
-			 * need_vm_lock logic above to match.
-			 */
-			target_vcpu->state = vcpu_state_ready;
+	if (target_vcpu->state == vcpu_state_blocked_interrupt) {
+		target_vcpu->state = vcpu_state_ready;
+	} else if (target_vcpu->state == vcpu_state_blocked_mailbox) {
+		/*
+		 * If you change this logic make sure to update the need_vm_lock
+		 * logic above to match.
+		 */
+		target_vcpu->state = vcpu_state_ready;
 
-			/* Take target vCPU out of mailbox recv_waiter list. */
+		/* Take target vCPU out of mailbox recv_waiter list. */
+		/*
+		 * TODO: Consider using a doubly-linked list for
+		 * the receive waiter list to avoid the linear
+		 * search here.
+		 */
+		struct vcpu **previous_next_pointer =
+			&target_vm->mailbox.recv_waiter;
+		while (*previous_next_pointer != NULL &&
+		       *previous_next_pointer != target_vcpu) {
 			/*
-			 * TODO: Consider using a doubly-linked list for the
-			 * receive waiter list to avoid the linear search here.
+			 * TODO(qwandor): Do we need to lock the vCPUs somehow
+			 * while we walk the linked list, or is the VM lock
+			 * enough?
 			 */
-			struct vcpu **previous_next_pointer =
-				&target_vm->mailbox.recv_waiter;
-			while (*previous_next_pointer != NULL &&
-			       *previous_next_pointer != target_vcpu) {
-				/*
-				 * TODO(qwandor): Do we need to lock the vCPUs
-				 * somehow while we walk the linked list, or is
-				 * the VM lock enough?
-				 */
-				previous_next_pointer =
-					&(*previous_next_pointer)->mailbox_next;
-			}
-			if (*previous_next_pointer == NULL) {
-				dlog("Target VCPU state is "
-				     "vcpu_state_blocked_mailbox but is not in "
-				     "VM mailbox waiter list. This should "
-				     "never happen.\n");
-			} else {
-				*previous_next_pointer =
-					target_vcpu->mailbox_next;
-			}
+			previous_next_pointer =
+				&(*previous_next_pointer)->mailbox_next;
 		}
-
-		if (current->vm->id != HF_PRIMARY_VM_ID &&
-		    current != target_vcpu) {
-			/*
-			 * Switch to the primary so that it can switch to the
-			 * target.
-			 */
-			struct hf_vcpu_run_return ret = {
-				.code = HF_VCPU_RUN_WAKE_UP,
-				.wake_up.vm_id = target_vm_id,
-				.wake_up.vcpu = target_vcpu_idx,
-			};
-			*next = api_switch_to_primary(current, ret,
-						      vcpu_state_ready);
+		if (*previous_next_pointer == NULL) {
+			dlog("Target VCPU state is vcpu_state_blocked_mailbox "
+			     "but is not in VM mailbox waiter list. This "
+			     "should never happen.\n");
+		} else {
+			*previous_next_pointer = target_vcpu->mailbox_next;
 		}
 	}
 
+	if (current->vm->id == HF_PRIMARY_VM_ID) {
+		/*
+		 * If the call came from the primary VM, let it know that it
+		 * should run or kick the target vCPU.
+		 */
+		ret = 1;
+	} else if (current != target_vcpu) {
+		/*
+		 * Switch to the primary so that it can switch to the target, or
+		 * kick it if it is already running on a different physical CPU.
+		 */
+		struct hf_vcpu_run_return ret = {
+			.code = HF_VCPU_RUN_WAKE_UP,
+			.wake_up.vm_id = target_vm_id,
+			.wake_up.vcpu = target_vcpu_idx,
+		};
+		*next = api_switch_to_primary(current, ret, vcpu_state_ready);
+	}
+
+out:
+	/* Either way, make it pending. */
+	target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
+
 	sl_unlock(&target_vcpu->lock);
 	if (need_vm_lock) {
 		sl_unlock(&target_vm->lock);
 	}
 
-	return 0;
+	return ret;
 }
diff --git a/src/arch/aarch64/BUILD.gn b/src/arch/aarch64/BUILD.gn
index 7254faf..e9d2926 100644
--- a/src/arch/aarch64/BUILD.gn
+++ b/src/arch/aarch64/BUILD.gn
@@ -38,7 +38,6 @@
 # Implementation of the arch interface for aarch64.
 source_set("arch") {
   sources = [
-    "cpu.c",
     "mm.c",
   ]
 }
diff --git a/src/arch/aarch64/cpu.c b/src/arch/aarch64/cpu.c
deleted file mode 100644
index ee7e056..0000000
--- a/src/arch/aarch64/cpu.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2018 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "hf/cpu.h"
-
-#include <stdbool.h>
-#include <stddef.h>
-
-#include "hf/arch/cpu.h"
-
-#include "hf/addr.h"
-#include "hf/dlog.h"
-
-#include "msr.h"
-
-#define HCR_EL2_VI (1u << 7)
-
-void arch_regs_set_virtual_interrupt(struct arch_regs *r, bool enable)
-{
-	if (enable) {
-		r->lazy.hcr_el2 |= HCR_EL2_VI;
-	} else {
-		r->lazy.hcr_el2 &= ~HCR_EL2_VI;
-	}
-	if (&current()->regs == r) {
-		write_msr(hcr_el2, r->lazy.hcr_el2);
-	}
-}
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index 990b212..39b3010 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -25,6 +25,8 @@
 #include "psci.h"
 #include "smc.h"
 
+#define HCR_EL2_VI (1u << 7)
+
 struct hvc_handler_return {
 	uintreg_t user_ret;
 	struct vcpu *new;
@@ -32,6 +34,11 @@
 
 void cpu_entry(struct cpu *c);
 
+static inline struct vcpu *current(void)
+{
+	return (struct vcpu *)read_msr(tpidr_el2);
+}
+
 void irq_current_exception(uintreg_t elr, uintreg_t spsr)
 {
 	(void)elr;
@@ -201,6 +208,33 @@
 	return true;
 }
 
+/**
+ * Sets or clears the VI bit in the HCR_EL2 register saved in the given
+ * arch_regs.
+ */
+static void set_virtual_interrupt(struct arch_regs *r, bool enable)
+{
+	if (enable) {
+		r->lazy.hcr_el2 |= HCR_EL2_VI;
+	} else {
+		r->lazy.hcr_el2 &= ~HCR_EL2_VI;
+	}
+}
+
+/**
+ * Sets or clears the VI bit in the HCR_EL2 register.
+ */
+static void set_virtual_interrupt_current(bool enable)
+{
+	uintreg_t hcr_el2 = read_msr(hcr_el2);
+	if (enable) {
+		hcr_el2 |= HCR_EL2_VI;
+	} else {
+		hcr_el2 &= ~HCR_EL2_VI;
+	}
+	write_msr(hcr_el2, hcr_el2);
+}
+
 struct hvc_handler_return hvc_handler(uintreg_t arg0, uintreg_t arg1,
 				      uintreg_t arg2, uintreg_t arg3)
 {
@@ -275,6 +309,24 @@
 		ret.user_ret = -1;
 	}
 
+	/* Set or clear VI bit. */
+	if (ret.new == NULL) {
+		/*
+		 * Not switching vCPUs, set the bit for the current vCPU
+		 * directly in the register.
+		 */
+		set_virtual_interrupt_current(
+			current()->interrupts.enabled_and_pending_count > 0);
+	} else {
+		/*
+		 * About to switch vCPUs, set the bit for the vCPU to which we
+		 * are switching in the saved copy of the register.
+		 */
+		set_virtual_interrupt(
+			&ret.new->regs,
+			ret.new->interrupts.enabled_and_pending_count > 0);
+	}
+
 	return ret;
 }
 
diff --git a/src/arch/aarch64/inc/hf/arch/cpu.h b/src/arch/aarch64/inc/hf/arch/cpu.h
index 11f07d7..12a7207 100644
--- a/src/arch/aarch64/inc/hf/arch/cpu.h
+++ b/src/arch/aarch64/inc/hf/arch/cpu.h
@@ -137,5 +137,3 @@
 {
 	r->r[0] = v;
 }
-
-void arch_regs_set_virtual_interrupt(struct arch_regs *r, bool enable);
diff --git a/src/arch/aarch64/msr.h b/src/arch/aarch64/msr.h
index 9d09d7a..fbc7188 100644
--- a/src/arch/aarch64/msr.h
+++ b/src/arch/aarch64/msr.h
@@ -33,8 +33,3 @@
 				 :                            \
 				 : "rZ"((uintreg_t)(value))); \
 	} while (0)
-
-static inline struct vcpu *current(void)
-{
-	return (struct vcpu *)read_msr(tpidr_el2);
-}
diff --git a/src/arch/fake/inc/hf/arch/cpu.h b/src/arch/fake/inc/hf/arch/cpu.h
index cd8a47c..291a005 100644
--- a/src/arch/fake/inc/hf/arch/cpu.h
+++ b/src/arch/fake/inc/hf/arch/cpu.h
@@ -60,9 +60,3 @@
 {
 	r->r[0] = v;
 }
-
-static inline void arch_regs_set_virtual_interrupt(struct arch_regs *r,
-						   bool enable)
-{
-	r->virtual_interrupt = enable;
-}
diff --git a/test/vmapi/inc/constants.h b/test/vmapi/inc/constants.h
index 1be98cd..aa30def 100644
--- a/test/vmapi/inc/constants.h
+++ b/test/vmapi/inc/constants.h
@@ -15,5 +15,6 @@
  */
 
 #define SELF_INTERRUPT_ID 5
-#define EXTERNAL_INTERRUPT_ID 7
+#define EXTERNAL_INTERRUPT_ID_A 7
 #define EXTERNAL_INTERRUPT_ID_B 8
+#define EXTERNAL_INTERRUPT_ID_C 9
diff --git a/test/vmapi/primary_with_secondaries.c b/test/vmapi/primary_with_secondaries.c
index 10b06d0..621e713 100644
--- a/test/vmapi/primary_with_secondaries.c
+++ b/test/vmapi/primary_with_secondaries.c
@@ -333,7 +333,7 @@
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_INTERRUPT);
 
 	/* Inject the interrupt and wait for a message. */
-	hf_inject_interrupt(INTERRUPTIBLE_VM_ID, 0, EXTERNAL_INTERRUPT_ID);
+	hf_inject_interrupt(INTERRUPTIBLE_VM_ID, 0, EXTERNAL_INTERRUPT_ID_A);
 	run_res = hf_vcpu_run(INTERRUPTIBLE_VM_ID, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
@@ -343,7 +343,7 @@
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 
 	/* Inject the interrupt again, and wait for the same message. */
-	hf_inject_interrupt(INTERRUPTIBLE_VM_ID, 0, EXTERNAL_INTERRUPT_ID);
+	hf_inject_interrupt(INTERRUPTIBLE_VM_ID, 0, EXTERNAL_INTERRUPT_ID_A);
 	run_res = hf_vcpu_run(INTERRUPTIBLE_VM_ID, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
@@ -369,7 +369,7 @@
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_INTERRUPT);
 
 	/* Inject the interrupt and wait for a message. */
-	hf_inject_interrupt(INTERRUPTIBLE_VM_ID, 0, EXTERNAL_INTERRUPT_ID);
+	hf_inject_interrupt(INTERRUPTIBLE_VM_ID, 0, EXTERNAL_INTERRUPT_ID_A);
 	run_res = hf_vcpu_run(INTERRUPTIBLE_VM_ID, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
@@ -407,7 +407,7 @@
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_INTERRUPT);
 
 	/* Inject the interrupt and wait for a message. */
-	hf_inject_interrupt(INTERRUPTIBLE_VM_ID, 0, EXTERNAL_INTERRUPT_ID);
+	hf_inject_interrupt(INTERRUPTIBLE_VM_ID, 0, EXTERNAL_INTERRUPT_ID_A);
 	run_res = hf_vcpu_run(INTERRUPTIBLE_VM_ID, 0);
 	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
 	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
@@ -428,3 +428,40 @@
 		  0);
 	EXPECT_EQ(hf_mailbox_clear(), 0);
 }
+
+/**
+ * Inject an interrupt which the target VM has not enabled, and then send a
+ * message telling it to enable that interrupt ID. It should then (and only
+ * then) send a message back.
+ */
+TEST(interrupts, inject_interrupt_disabled)
+{
+	const char expected_response[] = "Got IRQ 09.";
+	const char message[] = "Enable interrupt C";
+	struct hf_vcpu_run_return run_res;
+
+	/* Configure mailbox pages. */
+	EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
+	run_res = hf_vcpu_run(INTERRUPTIBLE_VM_ID, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_INTERRUPT);
+
+	/* Inject the interrupt and expect not to get a message. */
+	hf_inject_interrupt(INTERRUPTIBLE_VM_ID, 0, EXTERNAL_INTERRUPT_ID_C);
+	run_res = hf_vcpu_run(INTERRUPTIBLE_VM_ID, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_INTERRUPT);
+	EXPECT_EQ(hf_mailbox_clear(), -1);
+
+	/*
+	 * Now send a message to the secondary to enable the interrupt ID, and
+	 * expect the response from the interrupt we sent before.
+	 */
+	memcpy(send_page, message, sizeof(message));
+	EXPECT_EQ(hf_mailbox_send(INTERRUPTIBLE_VM_ID, sizeof(message)), 0);
+	run_res = hf_vcpu_run(INTERRUPTIBLE_VM_ID, 0);
+	EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+	EXPECT_EQ(run_res.message.size, sizeof(expected_response));
+	EXPECT_EQ(
+		memcmp(recv_page, expected_response, sizeof(expected_response)),
+		0);
+	EXPECT_EQ(hf_mailbox_clear(), 0);
+}
diff --git a/test/vmapi/secondaries/interruptible.c b/test/vmapi/secondaries/interruptible.c
index 73c6da0..34ccd1d 100644
--- a/test/vmapi/secondaries/interruptible.c
+++ b/test/vmapi/secondaries/interruptible.c
@@ -77,19 +77,27 @@
 
 	exception_setup();
 	hf_enable_interrupt(SELF_INTERRUPT_ID, true);
-	hf_enable_interrupt(EXTERNAL_INTERRUPT_ID, true);
+	hf_enable_interrupt(EXTERNAL_INTERRUPT_ID_A, true);
 	hf_enable_interrupt(EXTERNAL_INTERRUPT_ID_B, true);
 	arch_irq_enable();
 
 	/* Loop, echo messages back to the sender. */
 	for (;;) {
 		const char ping_message[] = "Ping";
+		const char enable_message[] = "Enable interrupt C";
 		received_message = mailbox_receive_retry();
-		if (received_message.vm_id == 0 && received_message.size == 5 &&
+		if (received_message.vm_id == HF_PRIMARY_VM_ID &&
+		    received_message.size == sizeof(ping_message) &&
 		    memcmp(recv_page, ping_message, sizeof(ping_message)) ==
 			    0) {
 			/* Interrupt ourselves */
 			hf_inject_interrupt(4, 0, SELF_INTERRUPT_ID);
+		} else if (received_message.vm_id == HF_PRIMARY_VM_ID &&
+			   received_message.size == sizeof(enable_message) &&
+			   memcmp(recv_page, enable_message,
+				  sizeof(enable_message)) == 0) {
+			/* Enable interrupt ID C. */
+			hf_enable_interrupt(EXTERNAL_INTERRUPT_ID_C, true);
 		} else {
 			dlog("Got unexpected message from VM %d, size %d.\n",
 			     received_message.vm_id, received_message.size);