hvc calls return a 64-bit value.
64-bits gives the space required to return useful information. 32-bit
systems will need to use multiple registers to return the data.
Change-Id: Iaa94772e693b7aff04a12ad4a9628f355d44c0e8
diff --git a/driver/linux b/driver/linux
index b722f95..bb7ae41 160000
--- a/driver/linux
+++ b/driver/linux
@@ -1 +1 @@
-Subproject commit b722f95ee1dfd26dc4d54628aa343170a7d3b2de
+Subproject commit bb7ae41a9456dc4e051468e61320cac0d5248f87
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 42c371b..a936558 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -3,15 +3,15 @@
#include "hf/cpu.h"
#include "hf/vm.h"
-int32_t api_vm_get_count(void);
-int32_t api_vcpu_get_count(uint32_t vm_id);
-int32_t api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, struct vcpu **next);
-int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv);
+int64_t api_vm_get_count(void);
+int64_t api_vcpu_get_count(uint32_t vm_id);
+int64_t api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, struct vcpu **next);
+int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv);
-int32_t api_rpc_request(uint32_t vm_id, size_t size);
-int32_t api_rpc_read_request(bool block, struct vcpu **next);
-int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next);
-int32_t api_rpc_ack(void);
+int64_t api_rpc_request(uint32_t vm_id, size_t size);
+int64_t api_rpc_read_request(bool block, struct vcpu **next);
+int64_t api_rpc_reply(size_t size, bool ack, struct vcpu **next);
+int64_t api_rpc_ack(void);
struct vcpu *api_wait_for_interrupt(void);
struct vcpu *api_yield(void);
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index 418f0c0..0650868 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -51,12 +51,12 @@
* This function must be implemented to trigger the architecture specific
* mechanism to call to the hypervisor.
*/
-size_t hf_call(size_t arg0, size_t arg1, size_t arg2, size_t arg3);
+int64_t hf_call(size_t arg0, size_t arg1, size_t arg2, size_t arg3);
/**
* Runs the given vcpu of the given vm.
*/
-static inline int32_t hf_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx)
+static inline int64_t hf_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx)
{
return hf_call(HF_VCPU_RUN, vm_id, vcpu_idx, 0);
}
@@ -64,7 +64,7 @@
/**
* Returns the number of secondary VMs.
*/
-static inline int32_t hf_vm_get_count(void)
+static inline int64_t hf_vm_get_count(void)
{
return hf_call(HF_VM_GET_COUNT, 0, 0, 0);
}
@@ -72,7 +72,7 @@
/**
* Returns the number of VCPUs configured in the given secondary VM.
*/
-static inline int32_t hf_vcpu_get_count(uint32_t vm_id)
+static inline int64_t hf_vcpu_get_count(uint32_t vm_id)
{
return hf_call(HF_VCPU_GET_COUNT, vm_id, 0, 0);
}
@@ -81,7 +81,7 @@
* Configures the pages to send/receive data through. The pages must not be
* shared.
*/
-static inline int32_t hf_vm_configure(hf_ipaddr_t send, hf_ipaddr_t recv)
+static inline int64_t hf_vm_configure(hf_ipaddr_t send, hf_ipaddr_t recv)
{
return hf_call(HF_VM_CONFIGURE, send, recv, 0);
}
@@ -90,7 +90,7 @@
* Called by the primary VM to send an RPC request to a secondary VM. Data is
* copied from the caller's send buffer to the destination's receive buffer.
*/
-static inline int32_t hf_rpc_request(uint32_t vm_id, size_t size)
+static inline int64_t hf_rpc_request(uint32_t vm_id, size_t size)
{
return hf_call(HF_RPC_REQUEST, vm_id, size, 0);
}
@@ -104,7 +104,7 @@
* either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
* until the current one is acknowledged.
*/
-static inline int32_t hf_rpc_read_request(bool block)
+static inline int64_t hf_rpc_read_request(bool block)
{
return hf_call(HF_RPC_READ_REQUEST, block, 0, 0);
}
@@ -114,7 +114,7 @@
* After this call completes, the caller will be able to receive additional
* requests or replies.
*/
-static inline int32_t hf_rpc_ack(void)
+static inline int64_t hf_rpc_ack(void)
{
return hf_call(HF_RPC_ACK, 0, 0, 0);
}
@@ -125,7 +125,7 @@
*
* It can optionally acknowledge the pending request.
*/
-static inline int32_t hf_rpc_reply(size_t size, bool ack)
+static inline int64_t hf_rpc_reply(size_t size, bool ack)
{
return hf_call(HF_RPC_REPLY, size, ack, 0);
}
diff --git a/src/api.c b/src/api.c
index 6b9e37c..11a2632 100644
--- a/src/api.c
+++ b/src/api.c
@@ -41,7 +41,7 @@
/**
* Returns the number of VMs configured to run.
*/
-int32_t api_vm_get_count(void)
+int64_t api_vm_get_count(void)
{
return vm_get_count();
}
@@ -49,7 +49,7 @@
/**
* Returns the number of vcpus configured in the given VM.
*/
-int32_t api_vcpu_get_count(uint32_t vm_id)
+int64_t api_vcpu_get_count(uint32_t vm_id)
{
struct vm *vm;
@@ -69,11 +69,11 @@
/**
* Runs the given vcpu of the given vm.
*/
-int32_t api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, struct vcpu **next)
+int64_t api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, struct vcpu **next)
{
struct vm *vm;
struct vcpu *vcpu;
- int32_t ret;
+ int64_t ret;
/* Only the primary VM can switch vcpus. */
if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
@@ -119,14 +119,14 @@
* Configures the VM to send/receive data through the specified pages. The pages
* must not be shared.
*/
-int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
+int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
{
struct vm *vm = cpu()->current->vm;
paddr_t pa_send_begin;
paddr_t pa_send_end;
paddr_t pa_recv_begin;
paddr_t pa_recv_end;
- int32_t ret;
+ int64_t ret;
/* Fail if addresses are not page-aligned. */
if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
@@ -199,12 +199,12 @@
* Sends an RPC request from the primary VM to a secondary VM. Data is copied
* from the caller's send buffer to the destination's receive buffer.
*/
-int32_t api_rpc_request(uint32_t vm_id, size_t size)
+int64_t api_rpc_request(uint32_t vm_id, size_t size)
{
struct vm *from = cpu()->current->vm;
struct vm *to;
const void *from_buf;
- int32_t ret;
+ int64_t ret;
/* Basic argument validation. */
if (size > HF_RPC_REQUEST_MAX_SIZE) {
@@ -285,12 +285,12 @@
* either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
* until the current one is acknowledged.
*/
-int32_t api_rpc_read_request(bool block, struct vcpu **next)
+int64_t api_rpc_read_request(bool block, struct vcpu **next)
{
struct vcpu *vcpu = cpu()->current;
struct vm *vm = vcpu->vm;
struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
- int32_t ret;
+ int64_t ret;
/* Only the secondary VMs can receive calls. */
if (vm->id == HF_PRIMARY_VM_ID) {
@@ -337,7 +337,7 @@
*
* It can optionally acknowledge the pending request.
*/
-int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
+int64_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
{
struct vm *from = cpu()->current->vm;
struct vm *to;
@@ -404,10 +404,10 @@
* After this call completes, the caller will be able to receive additional
* requests or replies.
*/
-int32_t api_rpc_ack(void)
+int64_t api_rpc_ack(void)
{
struct vm *vm = cpu()->current->vm;
- int32_t ret;
+ int64_t ret;
sl_lock(&vm->lock);
if (vm->rpc.state != rpc_state_inflight) {
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index 88aaf38..b982bf9 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -9,7 +9,7 @@
#include "psci.h"
struct hvc_handler_return {
- long user_ret;
+ uint64_t user_ret;
struct vcpu *new;
};
@@ -62,7 +62,7 @@
* Returns true if the request was a PSCI one, false otherwise.
*/
static bool psci_handler(uint32_t func, size_t arg0, size_t arg1, size_t arg2,
- long *ret)
+ int32_t *ret)
{
struct cpu *c;
int32_t sret;
@@ -164,9 +164,12 @@
ret.new = NULL;
- if (cpu()->current->vm->id == HF_PRIMARY_VM_ID &&
- psci_handler(arg0, arg1, arg2, arg3, &ret.user_ret)) {
- return ret;
+ if (cpu()->current->vm->id == HF_PRIMARY_VM_ID) {
+ int32_t psci_ret;
+ if (psci_handler(arg0, arg1, arg2, arg3, &psci_ret)) {
+ ret.user_ret = psci_ret;
+ return ret;
+ }
}
switch ((uint32_t)arg0 & ~PSCI_CONVENTION_MASK) {
@@ -221,7 +224,7 @@
{
struct cpu *c = cpu();
struct vcpu *vcpu = c->current;
- long ret;
+ int32_t ret;
switch (esr >> 26) {
case 0x01: /* EC = 000001, WFI or WFE. */