Bring back state update in api_switch_to_primary.

We need it to take vCPUs from running to ready state, otherwise vCPUs
cannot run anymore. It is safe to do so because we already require that
VM locks be acquired before vCPU locks (when they are held concurrently):
api_mailbox_send already does this.

Also added a test to catch errors when sending/receiving messages leaves
a vCPU is non-runnable state.

Change-Id: I18d9aed89224876abbb2df2039437bad43603ab1
diff --git a/src/api.c b/src/api.c
index 39d0da0..526e299 100644
--- a/src/api.c
+++ b/src/api.c
@@ -23,6 +23,14 @@
 
 #include "vmapi/hf/call.h"
 
+/*
+ * To eliminate the risk of deadlocks, we define a partial order for the
+ * acquisition of locks held concurrently by the same physical CPU. Our current
+ * ordering requirements are as follows:
+ *
+ * vm::lock -> vcpu::lock
+ */
+
 static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
 	      "Currently, a page is mapped for the send and receive buffers so "
 	      "the maximum request is the size of a page.");
@@ -35,7 +43,8 @@
  * cpu.
  */
 static struct vcpu *api_switch_to_primary(struct vcpu *current,
-					  struct hf_vcpu_run_return primary_ret)
+					  struct hf_vcpu_run_return primary_ret,
+					  enum vcpu_state secondary_state)
 {
 	struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
 	struct vcpu *next = &primary->vcpus[cpu_index(current->cpu)];
@@ -44,6 +53,11 @@
 	arch_regs_set_retval(&next->regs,
 			     hf_vcpu_run_return_encode(primary_ret));
 
+	/* Mark the current vcpu as waiting. */
+	sl_lock(&current->lock);
+	current->state = secondary_state;
+	sl_unlock(&current->lock);
+
 	return next;
 }
 
@@ -56,7 +70,7 @@
 	struct hf_vcpu_run_return ret = {
 		.code = HF_VCPU_RUN_YIELD,
 	};
-	return api_switch_to_primary(current, ret);
+	return api_switch_to_primary(current, ret, vcpu_state_ready);
 }
 
 /**
@@ -68,13 +82,8 @@
 	struct hf_vcpu_run_return ret = {
 		.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
 	};
-
-	/* Mark the current vcpu as waiting for interrupt. */
-	sl_lock(&current->lock);
-	current->state = vcpu_state_blocked_interrupt;
-	sl_unlock(&current->lock);
-
-	return api_switch_to_primary(current, ret);
+	return api_switch_to_primary(current, ret,
+				     vcpu_state_blocked_interrupt);
 }
 
 /**
@@ -301,7 +310,8 @@
 			.code = HF_VCPU_RUN_MESSAGE,
 			.message.size = size,
 		};
-		*next = api_switch_to_primary(current, primary_ret);
+		*next = api_switch_to_primary(current, primary_ret,
+					      vcpu_state_ready);
 		ret = 0;
 		goto out;
 	}
@@ -350,7 +360,8 @@
 			.wake_up.vm_id = to->id,
 			.wake_up.vcpu = vcpu,
 		};
-		*next = api_switch_to_primary(current, primary_ret);
+		*next = api_switch_to_primary(current, primary_ret,
+					      vcpu_state_ready);
 		ret = 0;
 	}
 
diff --git a/test/vm/primary_with_secondaries.c b/test/vm/primary_with_secondaries.c
index 6a37090..3aa6800 100644
--- a/test/vm/primary_with_secondaries.c
+++ b/test/vm/primary_with_secondaries.c
@@ -43,6 +43,43 @@
 /* clang-format on */
 
 /**
+ * Reverses the order of the elements in the given array.
+ */
+void reverse(char *s, size_t len)
+{
+	size_t i;
+
+	for (i = 0; i < len / 2; i++) {
+		char t = s[i];
+		s[i] = s[len - 1 - i];
+		s[len - 1 - i] = t;
+	}
+}
+
+/**
+ * Finds the next lexicographic permutation of the given array, if there is one.
+ */
+void next_permutation(char *s, size_t len)
+{
+	size_t i, j;
+
+	for (i = len - 2; i < len; i--) {
+		const char t = s[i];
+		if (t >= s[i + 1]) {
+			continue;
+		}
+
+		for (j = len - 1; t >= s[j]; j--) {
+		}
+
+		s[i] = s[j];
+		s[j] = t;
+		reverse(s + i + 1, len - i - 1);
+		return;
+	}
+}
+
+/**
  * Confirm there are 3 secondary VMs as well as this primary VM.
  */
 TEST(hf_vm_get_count, three_secondary_vms)
@@ -182,6 +219,35 @@
 }
 
 /**
+ * Repeatedly send a message and receive it back from the echo VM.
+ */
+TEST(mailbox, repeated_echo)
+{
+	char message[] = "Echo this back to me!";
+	struct hf_vcpu_run_return run_res;
+	uint8_t i;
+
+	/* Configure mailbox pages. */
+	EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
+
+	for (i = 0; i < 100; i++) {
+		/* Run secondary until it reaches the wait for messages. */
+		run_res = hf_vcpu_run(ECHO_VM_ID, 0);
+		EXPECT_EQ(run_res.code, HF_VCPU_RUN_WAIT_FOR_INTERRUPT);
+
+		/* Set the message, echo it and check it didn't change. */
+		next_permutation(message, sizeof(message) - 1);
+		memcpy(send_page, message, sizeof(message));
+		EXPECT_EQ(hf_mailbox_send(ECHO_VM_ID, sizeof(message)), 0);
+		run_res = hf_vcpu_run(ECHO_VM_ID, 0);
+		EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE);
+		EXPECT_EQ(run_res.message.size, sizeof(message));
+		EXPECT_EQ(memcmp(recv_page, message, sizeof(message)), 0);
+		EXPECT_EQ(hf_mailbox_clear(), 0);
+	}
+}
+
+/**
  * Send a message to relay_a which will forward it to relay_b where it will be
  * sent back here.
  */