Let hvc_handler return 4 registers rather than just one.

Bug: 132395846
Change-Id: Ibc86ba2f24fdfcb55f08aae48e5769cea31c2cd5
diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
index 1c4d642..55de601 100644
--- a/src/arch/aarch64/hypervisor/exceptions.S
+++ b/src/arch/aarch64/hypervisor/exceptions.S
@@ -143,25 +143,43 @@
 	b.ne slow_sync_lower
 
 	/*
+	 * Make room for hvc_handler_return on stack, and point x8 (the indirect
+	 * result location register in the AAPCS64 standard) to it.
+	 * hvc_handler_return is returned this way according to paragraph
+	 * 5.4.2.B.3 and section 5.5 because it is larger than 16 bytes.
+	 */
+	stp xzr, xzr, [sp, #-16]!
+	stp xzr, xzr, [sp, #-16]!
+	stp xzr, xzr, [sp, #-16]!
+	mov x8, sp
+
+	/*
 	 * Save x29 and x30, which are not saved by the callee, then jump to
 	 * HVC handler.
 	 */
 	stp x29, x30, [sp, #-16]!
 	bl hvc_handler
 	ldp x29, x30, [sp], #16
-	cbnz x1, sync_lower_switch
 
-	/* Zero out all volatile registers (except x0) and return. */
+	/* Get the hvc_handler_return back off the stack. */
+	ldp x0, x1, [sp], #16
+	ldp x2, x3, [sp], #16
+	ldr x4, [sp], #16
+
+	cbnz x4, sync_lower_switch
+
+	/*
+	 * Zero out volatile registers (except x0-x3, which contain results) and
+	 * return.
+	 */
 	stp xzr, xzr, [sp, #-16]!
-	ldp x1, x2, [sp]
-	ldp x3, x4, [sp]
-	ldp x5, x6, [sp]
-	ldp x7, x8, [sp]
-	ldp x9, x10, [sp]
-	ldp x11, x12, [sp]
-	ldp x13, x14, [sp]
-	ldp x15, x16, [sp], #16
-	mov x17, xzr
+	ldp x4, x5, [sp]
+	ldp x6, x7, [sp]
+	ldp x8, x9, [sp]
+	ldp x10, x11, [sp]
+	ldp x12, x13, [sp]
+	ldp x14, x15, [sp]
+	ldp x16, x17, [sp], #16
 
 	/* Restore x18, which was saved on the stack. */
 	ldr x18, [sp], #16
@@ -261,9 +279,9 @@
 	/* We'll have to switch, so save volatile state before doing so. */
 	mrs x18, tpidr_el2
 
-	/* Store zeroes in volatile register storage, except x0. */
-	stp x0, xzr, [x18, #VCPU_REGS + 8 * 0]
-	stp xzr, xzr, [x18, #VCPU_REGS + 8 * 2]
+	/* Store zeroes in volatile register storage, except x0-x3. */
+	stp x0, x1, [x18, #VCPU_REGS + 8 * 0]
+	stp x2, x3, [x18, #VCPU_REGS + 8 * 2]
 	stp xzr, xzr, [x18, #VCPU_REGS + 8 * 4]
 	stp xzr, xzr, [x18, #VCPU_REGS + 8 * 6]
 	stp xzr, xzr, [x18, #VCPU_REGS + 8 * 8]
@@ -283,7 +301,7 @@
 	stp x2, x3, [x18, #VCPU_REGS + 8 * 31]
 
 	/* Save lazy state, then switch to new vcpu. */
-	mov x0, x1
+	mov x0, x4
 
 	/* Intentional fallthrough. */
 /**
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 1282b81..d896c80 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -50,7 +50,7 @@
 #define GET_NEXT_PC_INC(esr) (((esr) & (1u << 25)) ? 4 : 2)
 
 struct hvc_handler_return {
-	uintreg_t user_ret;
+	smc_res_t user_ret;
 	struct vcpu *new;
 };
 
@@ -370,76 +370,77 @@
 
 	ret.new = NULL;
 
-	if (psci_handler(current(), arg0, arg1, arg2, arg3, &ret.user_ret,
+	if (psci_handler(current(), arg0, arg1, arg2, arg3, &ret.user_ret.res0,
 			 &ret.new)) {
 		return ret;
 	}
 
-	if (spci_handler(arg0, arg1, arg2, arg3, &ret.user_ret, &ret.new)) {
+	if (spci_handler(arg0, arg1, arg2, arg3, &ret.user_ret.res0,
+			 &ret.new)) {
 		update_vi(ret.new);
 		return ret;
 	}
 
 	switch ((uint32_t)arg0) {
 	case HF_VM_GET_ID:
-		ret.user_ret = api_vm_get_id(current());
+		ret.user_ret.res0 = api_vm_get_id(current());
 		break;
 
 	case HF_VM_GET_COUNT:
-		ret.user_ret = api_vm_get_count();
+		ret.user_ret.res0 = api_vm_get_count();
 		break;
 
 	case HF_VCPU_GET_COUNT:
-		ret.user_ret = api_vcpu_get_count(arg1, current());
+		ret.user_ret.res0 = api_vcpu_get_count(arg1, current());
 		break;
 
 	case HF_VCPU_RUN:
-		ret.user_ret = hf_vcpu_run_return_encode(
+		ret.user_ret.res0 = hf_vcpu_run_return_encode(
 			api_vcpu_run(arg1, arg2, current(), &ret.new));
 		break;
 
 	case HF_VM_CONFIGURE:
-		ret.user_ret = api_vm_configure(ipa_init(arg1), ipa_init(arg2),
-						current(), &ret.new);
+		ret.user_ret.res0 = api_vm_configure(
+			ipa_init(arg1), ipa_init(arg2), current(), &ret.new);
 		break;
 
 	case HF_MAILBOX_CLEAR:
-		ret.user_ret = api_mailbox_clear(current(), &ret.new);
+		ret.user_ret.res0 = api_mailbox_clear(current(), &ret.new);
 		break;
 
 	case HF_MAILBOX_WRITABLE_GET:
-		ret.user_ret = api_mailbox_writable_get(current());
+		ret.user_ret.res0 = api_mailbox_writable_get(current());
 		break;
 
 	case HF_MAILBOX_WAITER_GET:
-		ret.user_ret = api_mailbox_waiter_get(arg1, current());
+		ret.user_ret.res0 = api_mailbox_waiter_get(arg1, current());
 		break;
 
 	case HF_INTERRUPT_ENABLE:
-		ret.user_ret = api_interrupt_enable(arg1, arg2, current());
+		ret.user_ret.res0 = api_interrupt_enable(arg1, arg2, current());
 		break;
 
 	case HF_INTERRUPT_GET:
-		ret.user_ret = api_interrupt_get(current());
+		ret.user_ret.res0 = api_interrupt_get(current());
 		break;
 
 	case HF_INTERRUPT_INJECT:
-		ret.user_ret = api_interrupt_inject(arg1, arg2, arg3, current(),
-						    &ret.new);
+		ret.user_ret.res0 = api_interrupt_inject(arg1, arg2, arg3,
+							 current(), &ret.new);
 		break;
 
 	case HF_SHARE_MEMORY:
-		ret.user_ret =
+		ret.user_ret.res0 =
 			api_share_memory(arg1 >> 32, ipa_init(arg2), arg3,
 					 arg1 & 0xffffffff, current());
 		break;
 
 	case HF_DEBUG_LOG:
-		ret.user_ret = api_debug_log(arg1, current());
+		ret.user_ret.res0 = api_debug_log(arg1, current());
 		break;
 
 	default:
-		ret.user_ret = -1;
+		ret.user_ret.res0 = -1;
 	}
 
 	update_vi(ret.new);