Add api for communication betwen primary and secondary VMs.
diff --git a/inc/api.h b/inc/api.h
index edcd9e5..851c2da 100644
--- a/inc/api.h
+++ b/inc/api.h
@@ -9,9 +9,18 @@
 extern uint32_t secondary_vm_count;
 extern struct vm primary_vm;
 
+struct vcpu *api_switch_to_primary(size_t primary_retval,
+				   enum vcpu_state secondary_state);
+
 int32_t api_vm_get_count(void);
 int32_t api_vcpu_get_count(uint32_t vm_idx);
 int32_t api_vcpu_run(uint32_t vm_idx, uint32_t vcpu_idx, struct vcpu **next);
 struct vcpu *api_wait_for_interrupt(void);
+int32_t api_vm_configure(paddr_t send, paddr_t recv);
+
+int32_t api_rpc_request(uint32_t vm_idx, size_t size);
+int32_t api_rpc_read_request(bool block, struct vcpu **next);
+int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next);
+int32_t api_rpc_ack(void);
 
 #endif /* _API_H */
diff --git a/inc/cpu.h b/inc/cpu.h
index 30433ad..c1ab1c6 100644
--- a/inc/cpu.h
+++ b/inc/cpu.h
@@ -8,11 +8,20 @@
 #include "arch_cpu.h"
 #include "spinlock.h"
 
+enum vcpu_state {
+	vcpu_state_off,
+	vcpu_state_ready,
+	vcpu_state_running,
+	vcpu_state_blocked_rpc,
+	vcpu_state_blocked_interrupt,
+};
+
 struct vcpu {
 	struct spinlock lock;
-	bool is_on;
-	struct arch_regs regs;
+	enum vcpu_state state;
 	struct vm *vm;
+	struct vcpu *rpc_next;
+	struct arch_regs regs;
 };
 
 /* TODO: Update alignment such that cpus are in different cache lines. */
diff --git a/inc/mm.h b/inc/mm.h
index 7eb3169..df027dd 100644
--- a/inc/mm.h
+++ b/inc/mm.h
@@ -45,6 +45,7 @@
 		   paddr_t paddr, int mode);
 bool mm_ptable_map_page(struct mm_ptable *t, vaddr_t va, paddr_t pa, int mode);
 bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode);
+bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode);
 void mm_ptable_defrag(struct mm_ptable *t, int mode);
 bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode);
 
diff --git a/inc/vm.h b/inc/vm.h
index 847d5b7..2fb3bbf 100644
--- a/inc/vm.h
+++ b/inc/vm.h
@@ -4,7 +4,23 @@
 #include "cpu.h"
 #include "mm.h"
 
+enum rpc_state {
+	rpc_state_idle,
+	rpc_state_pending,
+	rpc_state_inflight,
+};
+
+struct rpc {
+	enum rpc_state state;
+	int16_t recv_bytes;
+	void *recv;
+	const void *send;
+	struct vcpu *recv_waiter;
+};
+
 struct vm {
+	struct spinlock lock;
+	struct rpc rpc;
 	struct mm_ptable ptable;
 	uint32_t vcpu_count;
 	struct vcpu vcpus[MAX_CPUS];
diff --git a/src/api.c b/src/api.c
index 0030473..ae429f0 100644
--- a/src/api.c
+++ b/src/api.c
@@ -1,6 +1,7 @@
 #include "api.h"
 
 #include "arch_api.h"
+#include "std.h"
 #include "vm.h"
 
 struct vm secondary_vm[MAX_VMS];
@@ -8,6 +9,32 @@
 struct vm primary_vm;
 
 /**
+ * Switches the physical CPU back to the corresponding vcpu of the primary VM.
+ */
+struct vcpu *api_switch_to_primary(size_t primary_retval,
+				   enum vcpu_state secondary_state)
+{
+	struct vcpu *vcpu = cpu()->current;
+	struct vcpu *next = &primary_vm.vcpus[cpu_index(cpu())];
+
+	/* Switch back to primary VM. */
+	vm_set_current(&primary_vm);
+
+	/*
+	 * Inidicate to primary VM that this vcpu blocked waiting for an
+	 * interrupt.
+	 */
+	arch_regs_set_retval(&next->regs, primary_retval);
+
+	/* Mark the vcpu as waiting. */
+	sl_lock(&vcpu->lock);
+	vcpu->state = secondary_state;
+	sl_unlock(&vcpu->lock);
+
+	return next;
+}
+
+/**
  * Returns the number of VMs configured to run.
  */
 int32_t api_vm_get_count(void)
@@ -32,8 +59,9 @@
  */
 int32_t api_vcpu_run(uint32_t vm_idx, uint32_t vcpu_idx, struct vcpu **next)
 {
-	struct vm *vm = secondary_vm + vm_idx;
+	struct vm *vm;
 	struct vcpu *vcpu;
+	int32_t ret;
 
 	/* Only the primary VM can switch vcpus. */
 	if (cpu()->current->vm != &primary_vm) {
@@ -44,15 +72,25 @@
 		return HF_VCPU_WAIT_FOR_INTERRUPT;
 	}
 
-	vcpu = vm->vcpus + vcpu_idx;
-	if (vcpu_idx >= vm->vcpu_count || !vcpu->is_on) {
+	vm = secondary_vm + vm_idx;
+	if (vcpu_idx >= vm->vcpu_count) {
 		return HF_VCPU_WAIT_FOR_INTERRUPT;
 	}
 
-	vm_set_current(vm);
-	*next = vcpu;
+	vcpu = vm->vcpus + vcpu_idx;
 
-	return HF_VCPU_YIELD;
+	sl_lock(&vcpu->lock);
+	if (vcpu->state != vcpu_state_ready) {
+		ret = HF_VCPU_WAIT_FOR_INTERRUPT;
+	} else {
+		vcpu->state = vcpu_state_running;
+		vm_set_current(vm);
+		*next = vcpu;
+		ret = HF_VCPU_YIELD;
+	}
+	sl_unlock(&vcpu->lock);
+
+	return ret;
 }
 
 /**
@@ -61,16 +99,289 @@
  */
 struct vcpu *api_wait_for_interrupt(void)
 {
-	struct vcpu *vcpu = &primary_vm.vcpus[cpu_index(cpu())];
+	return api_switch_to_primary(HF_VCPU_WAIT_FOR_INTERRUPT,
+				     vcpu_state_blocked_interrupt);
+}
 
-	/* Switch back to primary VM. */
-	vm_set_current(&primary_vm);
+/**
+ * Configures the VM to send/receive data through the specified pages. The pages
+ * must not be shared.
+ */
+int32_t api_vm_configure(paddr_t send, paddr_t recv)
+{
+	struct vm *vm = cpu()->current->vm;
+	int32_t ret;
+
+	/* Fail if addresses are not page-aligned. */
+	if ((recv & (PAGE_SIZE - 1)) || (send & (PAGE_SIZE - 1))) {
+		return -1;
+	}
+
+	sl_lock(&vm->lock);
+
+	/* We only allow these to be setup once. */
+	if (vm->rpc.recv || vm->rpc.send) {
+		ret = -1;
+		goto exit;
+	}
 
 	/*
-	 * Inidicate to primary VM that this vcpu blocked waiting for an
-	 * interrupt.
+	 * TODO: Once memory sharing is implemented, we need to make sure that
+	 * these pages aren't and won't be shared.
 	 */
-	arch_regs_set_retval(&vcpu->regs, HF_VCPU_WAIT_FOR_INTERRUPT);
 
-	return vcpu;
+	/*
+	 * Check that both pages are acessible from the VM, i.e., ensure that
+	 * the caller isn't try to use another VM's memory.
+	 */
+	if (!mm_ptable_is_mapped(&vm->ptable, recv, 0) ||
+	    !mm_ptable_is_mapped(&vm->ptable, send, 0)) {
+		ret = -1;
+		goto exit;
+	}
+
+	/* Map the send page as read-only in the hypervisor address space. */
+	if (!mm_map((vaddr_t)send, (vaddr_t)send + PAGE_SIZE, send,
+		    MM_MODE_R)) {
+		ret = -1;
+		goto exit;
+	}
+
+	/*
+	 * Map the receive page as writable in the hypervisor address space. On
+	 * failure, unmap the send page before returning.
+	 */
+	if (!mm_map((vaddr_t)recv, (vaddr_t)recv + PAGE_SIZE, recv,
+		    MM_MODE_W)) {
+		mm_unmap((vaddr_t)send, (vaddr_t)send + PAGE_SIZE, 0);
+		ret = -1;
+		goto exit;
+	}
+
+	/* Save pointers to the pages. */
+	vm->rpc.send = (const void *)(vaddr_t)send;
+	vm->rpc.recv = (void *)(vaddr_t)recv;
+
+	/* TODO: Notify any waiters. */
+
+	ret = 0;
+exit:
+	sl_unlock(&vm->lock);
+
+	return ret;
+}
+
+/**
+ * Sends an RPC request from the primary VM to a secondary VM. Data is copied
+ * from the caller's send buffer to the destination's receive buffer.
+ */
+int32_t api_rpc_request(uint32_t vm_idx, size_t size)
+{
+	struct vm *from = cpu()->current->vm;
+	struct vm *to;
+	const void *from_buf;
+	int32_t ret;
+
+	/* Basic argument validation. */
+	if (size > PAGE_SIZE || vm_idx >= secondary_vm_count) {
+		return -1;
+	}
+
+	/* Only the primary VM can make calls. */
+	if (from != &primary_vm) {
+		return -1;
+	}
+
+	/*
+	 * Check that the sender has configured its send buffer. It is safe to
+	 * use from_buf after releasing the lock because the buffer cannot be
+	 * modified once it's configured.
+	 */
+	sl_lock(&from->lock);
+	from_buf = from->rpc.send;
+	sl_unlock(&from->lock);
+	if (!from_buf) {
+		return -1;
+	}
+
+	to = secondary_vm + vm_idx;
+	sl_lock(&to->lock);
+
+	if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
+		/* Fail if the target isn't currently ready to receive data. */
+		ret = -1;
+	} else {
+		/* Copy data. */
+		memcpy(to->rpc.recv, from_buf, size);
+		to->rpc.recv_bytes = size;
+
+		if (!to->rpc.recv_waiter) {
+			to->rpc.state = rpc_state_pending;
+			ret = 0;
+		} else {
+			struct vcpu *to_vcpu = to->rpc.recv_waiter;
+
+			to->rpc.state = rpc_state_inflight;
+
+			/*
+			 * Take target vcpu out of waiter list and mark as ready
+			 * to run again.
+			 */
+			sl_lock(&to_vcpu->lock);
+			to->rpc.recv_waiter = to_vcpu->rpc_next;
+			to_vcpu->state = vcpu_state_ready;
+			arch_regs_set_retval(&to_vcpu->regs, size);
+			sl_unlock(&to_vcpu->lock);
+
+			ret = to_vcpu - to->vcpus + 1;
+		}
+	}
+
+	sl_unlock(&to->lock);
+
+	return ret;
+}
+
+/**
+ * Reads a request sent from a previous call to api_rpc_request. If one isn't
+ * available, this function can optionally block the caller until one becomes
+ * available.
+ *
+ * Once the caller has completed handling a request, it must indicate it by
+ * either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
+ * until the current one is acknowledged.
+ */
+int32_t api_rpc_read_request(bool block, struct vcpu **next)
+{
+	struct vcpu *vcpu = cpu()->current;
+	struct vm *vm = vcpu->vm;
+	int32_t ret;
+
+	/* Only the secondary VMs can receive calls. */
+	if (vm == &primary_vm) {
+		return -1;
+	}
+
+	sl_lock(&vm->lock);
+	if (vm->rpc.state == rpc_state_pending) {
+		ret = vm->rpc.recv_bytes;
+		vm->rpc.state = rpc_state_inflight;
+	} else if (!block) {
+		ret = -1;
+	} else {
+		sl_lock(&vcpu->lock);
+		vcpu->state = vcpu_state_blocked_rpc;
+
+		/* Push vcpu into waiter list. */
+		vcpu->rpc_next = vm->rpc.recv_waiter;
+		vm->rpc.recv_waiter = vcpu;
+		sl_unlock(&vcpu->lock);
+
+		/* Switch back to primary vm. */
+		*next = &primary_vm.vcpus[cpu_index(cpu())];
+		vm_set_current(&primary_vm);
+
+		/*
+		 * Inidicate to primary VM that this vcpu blocked waiting for an
+		 * interrupt.
+		 */
+		arch_regs_set_retval(&(*next)->regs,
+				     HF_VCPU_WAIT_FOR_INTERRUPT);
+		ret = 0;
+	}
+	sl_unlock(&vm->lock);
+
+	return ret;
+}
+
+/**
+ * Sends a reply from a secondary VM to the primary VM. Data is copied from the
+ * caller's send buffer to the destination's receive buffer.
+ *
+ * It can optionally acknowledge the pending request.
+ */
+int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
+{
+	struct vm *from = cpu()->current->vm;
+	struct vm *to;
+	const void *from_buf;
+	/* Basic argument validation. */
+	if (size > PAGE_SIZE) {
+		return -1;
+	}
+
+	/* Only the secondary VM can send responses. */
+	if (from == &primary_vm) {
+		return -1;
+	}
+
+	/* Acknowledge the current pending request if requested. */
+	if (ack) {
+		api_rpc_ack();
+	}
+
+	/*
+	 * Check that the sender has configured its send buffer. It is safe to
+	 * use from_buf after releasing the lock because the buffer cannot be
+	 * modified once it's configured.
+	 */
+	sl_lock(&from->lock);
+	from_buf = from->rpc.send;
+	sl_unlock(&from->lock);
+	if (!from_buf) {
+		return -1;
+	}
+
+	to = &primary_vm;
+	sl_lock(&to->lock);
+
+	if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
+		/*
+		 * Fail if the target isn't currently ready to receive a
+		 * response.
+		 */
+		sl_unlock(&to->lock);
+		return -1;
+	}
+
+	/* Copy data. */
+	memcpy(to->rpc.recv, from_buf, size);
+	to->rpc.recv_bytes = size;
+	to->rpc.state = rpc_state_inflight;
+	sl_unlock(&to->lock);
+
+	/*
+	 * Switch back to primary VM so that it is aware that a response was
+	 * received. But we leave the current vcpu still runnable.
+	 */
+	*next = api_switch_to_primary((size << 8) | HF_VCPU_RESPONSE_READY,
+				      vcpu_state_ready);
+
+	return 0;
+}
+
+/**
+ * Acknowledges that either a request or a reply has been received and handled.
+ * After this call completes, the caller will be able to receive additional
+ * requests or replies.
+ */
+int32_t api_rpc_ack(void)
+{
+	struct vm *vm = cpu()->current->vm;
+	int32_t ret;
+
+	sl_lock(&vm->lock);
+	if (vm->rpc.state != rpc_state_inflight) {
+		ret = -1;
+	} else {
+		ret = 0;
+		vm->rpc.state = rpc_state_idle;
+	}
+	sl_unlock(&vm->lock);
+
+	if (ret == 0) {
+		/* TODO: Notify waiters, if any. */
+	}
+
+	return ret;
 }
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index 6c00067..3eec780 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -38,8 +38,9 @@
 		}
 
 	default:
-		dlog("Unknown sync exception pc=0x%x, esr=0x%x, ec=0x%x\n", elr,
-		     esr, esr >> 26);
+		dlog("Unknown current sync exception pc=0x%x, esr=0x%x, "
+		     "ec=0x%x\n",
+		     elr, esr, esr >> 26);
 		for (;;) {
 			/* do nothing */
 		}
@@ -79,6 +80,26 @@
 		ret.user_ret = api_vcpu_run(arg1, arg2, &ret.new);
 		break;
 
+	case HF_VM_CONFIGURE:
+		ret.user_ret = api_vm_configure(arg1, arg2);
+		break;
+
+	case HF_RPC_REQUEST:
+		ret.user_ret = api_rpc_request(arg1, arg2);
+		break;
+
+	case HF_RPC_READ_REQUEST:
+		ret.user_ret = api_rpc_read_request(arg1, &ret.new);
+		break;
+
+	case HF_RPC_ACK:
+		ret.user_ret = api_rpc_ack();
+		break;
+
+	case HF_RPC_REPLY:
+		ret.user_ret = api_rpc_reply(arg1, arg2, &ret.new);
+		break;
+
 	default:
 		ret.user_ret = -1;
 	}
@@ -90,10 +111,8 @@
 {
 	/* TODO: Only switch if we know the interrupt was not for the secondary
 	 * VM. */
-
 	/* Switch back to primary VM, interrupts will be handled there. */
-	vm_set_current(&primary_vm);
-	return &primary_vm.vcpus[cpu_index(cpu())];
+	return api_switch_to_primary(HF_VCPU_YIELD, vcpu_state_ready);
 }
 
 struct vcpu *sync_lower_exception(uint64_t esr)
@@ -124,8 +143,25 @@
 			/* do nothing */
 		}
 
+	case 0x20: /* EC = 100000, Instruction abort. */
+		dlog("Instruction abort: pc=0x%x, esr=0x%x, ec=0x%x",
+		     vcpu->regs.pc, esr, esr >> 26);
+		if (!(esr & (1u << 10))) { /* Check FnV bit. */
+			dlog(", far=0x%x, hpfar=0x%x", read_msr(far_el2),
+			     read_msr(hpfar_el2) << 8);
+		} else {
+			dlog(", far=invalid");
+		}
+
+		dlog(", vttbr_el2=0x%x", read_msr(vttbr_el2));
+		dlog("\n");
+		for (;;) {
+			/* do nothing */
+		}
+
 	default:
-		dlog("Unknown sync exception pc=0x%x, esr=0x%x, ec=0x%x\n",
+		dlog("Unknown lower sync exception pc=0x%x, esr=0x%x, "
+		     "ec=0x%x\n",
 		     vcpu->regs.pc, esr, esr >> 26);
 		for (;;) {
 			/* do nothing */
diff --git a/src/arch/aarch64/inc/arch_api.h b/src/arch/aarch64/inc/arch_api.h
index 82b1429..51a9880 100644
--- a/src/arch/aarch64/inc/arch_api.h
+++ b/src/arch/aarch64/inc/arch_api.h
@@ -8,11 +8,17 @@
 #define HF_VCPU_YIELD              0x00
 #define HF_VCPU_WAIT_FOR_INTERRUPT 0x01
 #define HF_VCPU_WAKE_UP            0x02
+#define HF_VCPU_RESPONSE_READY     0x03
 
 /* TODO: Define constants below according to spec. */
-#define HF_VCPU_RUN       0xff00
-#define HF_VM_GET_COUNT   0xff01
-#define HF_VCPU_GET_COUNT 0xff02
+#define HF_VCPU_RUN         0xff00
+#define HF_VM_GET_COUNT     0xff01
+#define HF_VCPU_GET_COUNT   0xff02
+#define HF_VM_CONFIGURE     0xff03
+#define HF_RPC_REQUEST      0xff04
+#define HF_RPC_READ_REQUEST 0xff05
+#define HF_RPC_ACK          0xff06
+#define HF_RPC_REPLY        0xff07
 
 /* clang-format on */
 
diff --git a/src/cpu.c b/src/cpu.c
index cab40b8..581bc11 100644
--- a/src/cpu.c
+++ b/src/cpu.c
@@ -94,19 +94,20 @@
 	memset(vcpu, 0, sizeof(*vcpu));
 	sl_init(&vcpu->lock);
 	vcpu->vm = vm;
+	vcpu->state = vcpu_state_off;
 	/* TODO: Initialize vmid register. */
 }
 
 void vcpu_on(struct vcpu *vcpu)
 {
 	sl_lock(&vcpu->lock);
-	vcpu->is_on = true;
+	vcpu->state = vcpu_state_ready;
 	sl_unlock(&vcpu->lock);
 }
 
 void vcpu_off(struct vcpu *vcpu)
 {
 	sl_lock(&vcpu->lock);
-	vcpu->is_on = false;
+	vcpu->state = vcpu_state_off;
 	sl_unlock(&vcpu->lock);
 }
diff --git a/src/mm.c b/src/mm.c
index afdbf50..00c5d3f 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -341,6 +341,50 @@
 }
 
 /**
+ * Determines if the given virtual address is mapped in the given page table
+ * by recursively traversing all levels of the page table.
+ */
+static bool mm_is_mapped_recursive(const pte_t *table, vaddr_t addr, int level)
+{
+	pte_t pte;
+	vaddr_t va_level_end = mm_level_end(addr, level);
+
+	/* It isn't mapped if it doesn't fit in the table. */
+	if (addr >= va_level_end) {
+		return false;
+	}
+
+	pte = table[mm_index(addr, level)];
+
+	if (level == 0) {
+		return arch_mm_pte_is_present(pte);
+	}
+
+	if (arch_mm_is_block_allowed(level) && arch_mm_pte_is_block(pte)) {
+		return true;
+	}
+
+	if (arch_mm_pte_is_table(pte)) {
+		return mm_is_mapped_recursive(arch_mm_pte_to_table(pte), addr,
+					      level - 1);
+	}
+
+	return false;
+}
+
+/**
+ * Determines if the given virtual address is mapped in the given page table.
+ */
+bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode)
+{
+	int level = arch_mm_max_level(mode);
+
+	addr = arch_mm_clear_va(addr);
+
+	return mm_is_mapped_recursive(t->table, addr, level);
+}
+
+/**
  * Initialises the given page table.
  */
 bool mm_ptable_init(struct mm_ptable *t, uint32_t id, int mode)
diff --git a/src/vm.c b/src/vm.c
index b1d26c9..cadda86 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -1,12 +1,16 @@
 #include "vm.h"
 
 #include "cpu.h"
+#include "std.h"
 
 bool vm_init(struct vm *vm, uint32_t id, uint32_t vcpu_count)
 {
 	uint32_t i;
 
+	memset(vm, 0, sizeof(*vm));
+
 	vm->vcpu_count = vcpu_count;
+	vm->rpc.state = rpc_state_idle;
 
 	/* Do basic initialization of vcpus. */
 	for (i = 0; i < vcpu_count; i++) {