Use VM ID offset for wait entries to avoid overflow.

Change-Id: I52a1c4a69eb65d7bd7b63988057aa7d4c254ea1e
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index 7986701..8d5727e 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -137,6 +137,8 @@
 struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2);
 void vm_unlock(struct vm_locked *locked);
 struct vcpu *vm_get_vcpu(struct vm *vm, spci_vcpu_index_t vcpu_index);
+struct wait_entry *vm_get_wait_entry(struct vm *vm, spci_vm_id_t for_vm);
+spci_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry);
 
 bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
 		     uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
diff --git a/src/api.c b/src/api.c
index 5a65f06..d131ca1 100644
--- a/src/api.c
+++ b/src/api.c
@@ -875,7 +875,7 @@
 		 */
 		if (notify) {
 			struct wait_entry *entry =
-				&from->wait_entries[to.vm->id];
+				vm_get_wait_entry(from, to.vm->id);
 
 			/* Append waiter only if it's not there yet. */
 			if (list_empty(&entry->wait_links)) {
@@ -1161,7 +1161,7 @@
 	entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
 			     ready_links);
 	list_remove(&entry->ready_links);
-	ret = entry - vm->wait_entries;
+	ret = vm_id_for_wait_entry(vm, entry);
 
 exit:
 	sl_unlock(&vm->lock);
diff --git a/src/vm.c b/src/vm.c
index da2460b..3d71c51 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -149,6 +149,30 @@
 }
 
 /**
+ * Gets `vm`'s wait entry for waiting on the `for_vm`.
+ */
+struct wait_entry *vm_get_wait_entry(struct vm *vm, spci_vm_id_t for_vm)
+{
+	uint16_t index;
+
+	CHECK(for_vm >= HF_VM_ID_OFFSET);
+	index = for_vm - HF_VM_ID_OFFSET;
+	CHECK(index < MAX_VMS);
+
+	return &vm->wait_entries[index];
+}
+
+/**
+ * Gets the ID of the VM which the given VM's wait entry is for.
+ */
+spci_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
+{
+	uint16_t index = entry - vm->wait_entries;
+
+	return index + HF_VM_ID_OFFSET;
+}
+
+/**
  * Map a range of addresses to the VM in both the MMU and the IOMMU.
  *
  * mm_vm_defrag should always be called after a series of page table updates,