Implement SPCI_RXTX_MAP to replace hf_vm_configure.
Bug: 132421502
Change-Id: I699f412f5090dead6a4c0dbfdc12f40b5aa8fe20
diff --git a/driver/linux b/driver/linux
index 73c3279..2c6e751 160000
--- a/driver/linux
+++ b/driver/linux
@@ -1 +1 @@
-Subproject commit 73c32791927ae5f7703ae0126003d156550d16ae
+Subproject commit 2c6e751257503312fdfa992a3740da79de6f85a5
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 91f02b2..292f036 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -28,8 +28,6 @@
spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id,
const struct vcpu *current);
void api_regs_state_saved(struct vcpu *vcpu);
-int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv, struct vcpu *current,
- struct vcpu **next);
int64_t api_mailbox_writable_get(const struct vcpu *current);
int64_t api_mailbox_waiter_get(spci_vm_id_t vm_id, const struct vcpu *current);
int64_t api_share_memory(spci_vm_id_t vm_id, ipaddr_t addr, size_t size,
@@ -55,6 +53,9 @@
struct spci_value api_spci_msg_recv(bool block, struct vcpu *current,
struct vcpu **next);
struct spci_value api_spci_rx_release(struct vcpu *current, struct vcpu **next);
+struct spci_value api_spci_rxtx_map(ipaddr_t send, ipaddr_t recv,
+ uint32_t page_count, struct vcpu *current,
+ struct vcpu **next);
void api_yield(struct vcpu *current, struct vcpu **next);
struct spci_value api_spci_version(void);
struct spci_value api_spci_id_get(const struct vcpu *current);
diff --git a/inc/vmapi/hf/abi.h b/inc/vmapi/hf/abi.h
index 434c7c9..ac6fe4b 100644
--- a/inc/vmapi/hf/abi.h
+++ b/inc/vmapi/hf/abi.h
@@ -25,17 +25,16 @@
/* TODO: Define constants below according to spec. */
#define HF_VM_GET_COUNT 0xff01
#define HF_VCPU_GET_COUNT 0xff02
-#define HF_VM_CONFIGURE 0xff03
-#define HF_MAILBOX_WRITABLE_GET 0xff04
-#define HF_MAILBOX_WAITER_GET 0xff05
-#define HF_INTERRUPT_ENABLE 0xff06
-#define HF_INTERRUPT_GET 0xff07
-#define HF_INTERRUPT_INJECT 0xff08
-#define HF_SHARE_MEMORY 0xff09
+#define HF_MAILBOX_WRITABLE_GET 0xff03
+#define HF_MAILBOX_WAITER_GET 0xff04
+#define HF_INTERRUPT_ENABLE 0xff05
+#define HF_INTERRUPT_GET 0xff06
+#define HF_INTERRUPT_INJECT 0xff07
+#define HF_SHARE_MEMORY 0xff08
/* Custom SPCI-like calls returned from SPCI_RUN. */
-#define HF_SPCI_RUN_WAIT_FOR_INTERRUPT 0xff0a
-#define HF_SPCI_RUN_WAKE_UP 0xff0b
+#define HF_SPCI_RUN_WAIT_FOR_INTERRUPT 0xff09
+#define HF_SPCI_RUN_WAKE_UP 0xff0a
/* This matches what Trusty and its ATF module currently use. */
#define HF_DEBUG_LOG 0xbd000000
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index ad12122..6ec1694 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -83,14 +83,24 @@
* shared.
*
* Returns:
- * - -1 on failure.
- * - 0 on success if no further action is needed.
- * - 1 if it was called by the primary VM and the primary VM now needs to wake
- * up or kick waiters.
+ * - SPCI_ERROR SPCI_INVALID_PARAMETERS if the given addresses are not properly
+ * aligned or are the same.
+ * - SPCI_ERROR SPCI_NO_MEMORY if the hypervisor was unable to map the buffers
+ * due to insuffient page table memory.
+ * - SPCI_ERROR SPCI_DENIED if the pages are already mapped or are not owned by
+ * the caller.
+ * - SPCI_SUCCESS on success if no further action is needed.
+ * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
+ * needs to wake up or kick waiters.
*/
-static inline int64_t hf_vm_configure(hf_ipaddr_t send, hf_ipaddr_t recv)
+static inline struct spci_value spci_rxtx_map(hf_ipaddr_t send,
+ hf_ipaddr_t recv)
{
- return hf_call(HF_VM_CONFIGURE, send, recv, 0);
+ return spci_call(
+ (struct spci_value){.func = SPCI_RXTX_MAP_32,
+ .arg1 = send,
+ .arg2 = recv,
+ .arg3 = HF_MAILBOX_SIZE / SPCI_PAGE_SIZE});
}
/**
diff --git a/inc/vmapi/hf/spci.h b/inc/vmapi/hf/spci.h
index fe8e7df..1c3ae01 100644
--- a/inc/vmapi/hf/spci.h
+++ b/inc/vmapi/hf/spci.h
@@ -73,6 +73,13 @@
#define SPCI_SLEEP_INDEFINITE 0
+/**
+ * For use where the SPCI specification refers explicitly to '4K pages'. Not to
+ * be confused with PAGE_SIZE, which is the translation granule Hafnium is
+ * configured to use.
+ */
+#define SPCI_PAGE_SIZE 4096
+
/* The maximum length possible for a single message. */
#define SPCI_MSG_PAYLOAD_MAX HF_MAILBOX_SIZE
diff --git a/src/api.c b/src/api.c
index afb227b..bdade61 100644
--- a/src/api.c
+++ b/src/api.c
@@ -604,31 +604,31 @@
* after they've succeeded. If a secondary VM is running and there are waiters,
* it also switches back to the primary VM for it to wake waiters up.
*/
-static int64_t api_waiter_result(struct vm_locked locked_vm,
- struct vcpu *current, struct vcpu **next)
+static struct spci_value api_waiter_result(struct vm_locked locked_vm,
+ struct vcpu *current,
+ struct vcpu **next)
{
struct vm *vm = locked_vm.vm;
- struct spci_value ret = {
- .func = SPCI_RX_RELEASE_32,
- };
if (list_empty(&vm->mailbox.waiter_list)) {
/* No waiters, nothing else to do. */
- return 0;
+ return (struct spci_value){.func = SPCI_SUCCESS_32};
}
if (vm->id == HF_PRIMARY_VM_ID) {
/* The caller is the primary VM. Tell it to wake up waiters. */
- return 1;
+ return (struct spci_value){.func = SPCI_RX_RELEASE_32};
}
/*
* Switch back to the primary VM, informing it that there are waiters
* that need to be notified.
*/
- *next = api_switch_to_primary(current, ret, VCPU_STATE_READY);
+ *next = api_switch_to_primary(
+ current, (struct spci_value){.func = SPCI_RX_RELEASE_32},
+ VCPU_STATE_READY);
- return 0;
+ return (struct spci_value){.func = SPCI_SUCCESS_32};
}
/**
@@ -768,14 +768,19 @@
* must not be shared.
*
* Returns:
- * - -1 on failure.
- * - 0 on success if no further action is needed.
- * - 1 if it was called by the primary VM and the primary VM now needs to wake
- * up or kick waiters. Waiters should be retrieved by calling
- * hf_mailbox_waiter_get.
+ * - SPCI_ERROR SPCI_INVALID_PARAMETERS if the given addresses are not properly
+ * aligned or are the same.
+ * - SPCI_ERROR SPCI_NO_MEMORY if the hypervisor was unable to map the buffers
+ * due to insuffient page table memory.
+ * - SPCI_ERROR SPCI_DENIED if the pages are already mapped or are not owned by
+ * the caller.
+ * - SPCI_SUCCESS on success if no further action is needed.
+ * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
+ * needs to wake up or kick waiters.
*/
-int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv, struct vcpu *current,
- struct vcpu **next)
+struct spci_value api_spci_rxtx_map(ipaddr_t send, ipaddr_t recv,
+ uint32_t page_count, struct vcpu *current,
+ struct vcpu **next)
{
struct vm *vm = current->vm;
struct vm_locked vm_locked;
@@ -785,24 +790,29 @@
paddr_t pa_recv_end;
uint32_t orig_send_mode;
uint32_t orig_recv_mode;
- int64_t ret;
+ struct spci_value ret;
+
+ /* Hafnium only supports a fixed size of RX/TX buffers. */
+ if (page_count != HF_MAILBOX_SIZE / SPCI_PAGE_SIZE) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
/* Fail if addresses are not page-aligned. */
if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
!is_aligned(ipa_addr(recv), PAGE_SIZE)) {
- return -1;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
/* Convert to physical addresses. */
pa_send_begin = pa_from_ipa(send);
- pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
+ pa_send_end = pa_add(pa_send_begin, HF_MAILBOX_SIZE);
pa_recv_begin = pa_from_ipa(recv);
- pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
+ pa_recv_end = pa_add(pa_recv_begin, HF_MAILBOX_SIZE);
/* Fail if the same page is used for the send and receive pages. */
if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
- return -1;
+ return spci_error(SPCI_INVALID_PARAMETERS);
}
/*
@@ -817,7 +827,8 @@
/* We only allow these to be setup once. */
if (vm->mailbox.send || vm->mailbox.recv) {
- goto fail;
+ ret = spci_error(SPCI_DENIED);
+ goto exit;
}
/*
@@ -829,28 +840,27 @@
!api_mode_valid_owned_and_exclusive(orig_send_mode) ||
(orig_send_mode & MM_MODE_R) == 0 ||
(orig_send_mode & MM_MODE_W) == 0) {
- goto fail;
+ ret = spci_error(SPCI_DENIED);
+ goto exit;
}
if (!mm_vm_get_mode(&vm->ptable, recv, ipa_add(recv, PAGE_SIZE),
&orig_recv_mode) ||
!api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
(orig_recv_mode & MM_MODE_R) == 0) {
- goto fail;
+ ret = spci_error(SPCI_DENIED);
+ goto exit;
}
if (!api_vm_configure_pages(vm_locked, pa_send_begin, pa_send_end,
orig_send_mode, pa_recv_begin, pa_recv_end,
orig_recv_mode)) {
- goto fail;
+ ret = spci_error(SPCI_NO_MEMORY);
+ goto exit;
}
/* Tell caller about waiters, if any. */
ret = api_waiter_result(vm_locked, current, next);
- goto exit;
-
-fail:
- ret = -1;
exit:
vm_unlock(&vm_locked);
@@ -1254,11 +1264,7 @@
break;
case MAILBOX_STATE_READ:
- if (api_waiter_result(locked, current, next)) {
- ret = (struct spci_value){.func = SPCI_RX_RELEASE_32};
- } else {
- ret = (struct spci_value){.func = SPCI_SUCCESS_32};
- }
+ ret = api_waiter_result(locked, current, next);
vm->mailbox.state = MAILBOX_STATE_EMPTY;
break;
}
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index cf98bdb..737ed83 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -327,6 +327,11 @@
case SPCI_RX_RELEASE_32:
*args = api_spci_rx_release(current(), next);
return true;
+ case SPCI_RXTX_MAP_32:
+ *args = api_spci_rxtx_map(ipa_init(args->arg1),
+ ipa_init(args->arg2), args->arg3,
+ current(), next);
+ return true;
case SPCI_YIELD_32:
api_yield(current(), next);
@@ -448,11 +453,6 @@
vcpu->regs.r[0] = api_vcpu_get_count(args.arg1, vcpu);
break;
- case HF_VM_CONFIGURE:
- vcpu->regs.r[0] = api_vm_configure(
- ipa_init(args.arg1), ipa_init(args.arg2), vcpu, &next);
- break;
-
case HF_MAILBOX_WRITABLE_GET:
vcpu->regs.r[0] = api_mailbox_writable_get(vcpu);
break;
diff --git a/test/hftest/service.c b/test/hftest/service.c
index e0ce071..94adea8 100644
--- a/test/hftest/service.c
+++ b/test/hftest/service.c
@@ -98,7 +98,7 @@
/* Prepare the context. */
/* Set up the mailbox. */
- hf_vm_configure(send_addr, recv_addr);
+ spci_rxtx_map(send_addr, recv_addr);
/* Receive the name of the service to run. */
ret = spci_msg_wait();
diff --git a/test/linux/hftest_socket.c b/test/linux/hftest_socket.c
index 2e89d2b..1e55551 100644
--- a/test/linux/hftest_socket.c
+++ b/test/linux/hftest_socket.c
@@ -66,7 +66,7 @@
/* Prepare the context. */
/* Set up the mailbox. */
- hf_vm_configure(send_addr, recv_addr);
+ spci_rxtx_map(send_addr, recv_addr);
spci_rx_release();
diff --git a/test/vmapi/arch/aarch64/gicv3/busy_secondary.c b/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
index 3bf660b..e04905a 100644
--- a/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
+++ b/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
@@ -38,7 +38,8 @@
SET_UP(busy_secondary)
{
system_setup();
- EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
SERVICE_SELECT(SERVICE_VM0, "busy", send_buffer);
}
diff --git a/test/vmapi/arch/aarch64/gicv3/gicv3.c b/test/vmapi/arch/aarch64/gicv3/gicv3.c
index a96f55a..e2cc67e 100644
--- a/test/vmapi/arch/aarch64/gicv3/gicv3.c
+++ b/test/vmapi/arch/aarch64/gicv3/gicv3.c
@@ -89,7 +89,8 @@
{
struct spci_value run_res;
- EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
SERVICE_SELECT(SERVICE_VM0, "read_systemreg_ctlr", send_buffer);
run_res = spci_run(SERVICE_VM0, 0);
@@ -105,7 +106,8 @@
{
struct spci_value run_res;
- EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
SERVICE_SELECT(SERVICE_VM0, "write_systemreg_ctlr", send_buffer);
run_res = spci_run(SERVICE_VM0, 0);
@@ -121,7 +123,8 @@
{
struct spci_value run_res;
- EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
SERVICE_SELECT(SERVICE_VM0, "write_systemreg_sre", send_buffer);
run_res = spci_run(SERVICE_VM0, 0);
diff --git a/test/vmapi/arch/aarch64/gicv3/timer_secondary.c b/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
index b339987..9996a2d 100644
--- a/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
+++ b/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
@@ -28,7 +28,8 @@
{
system_setup();
- EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
SERVICE_SELECT(SERVICE_VM0, "timer", send_buffer);
interrupt_enable(VIRTUAL_TIMER_IRQ, true);
diff --git a/test/vmapi/primary_only/faults.c b/test/vmapi/primary_only/faults.c
index bf56b15..c56d314 100644
--- a/test/vmapi/primary_only/faults.c
+++ b/test/vmapi/primary_only/faults.c
@@ -70,7 +70,8 @@
sl_lock(&s.lock);
/* Configure the VM's buffers. */
- EXPECT_EQ(hf_vm_configure((hf_ipaddr_t)&tx[0], (hf_ipaddr_t)&rx[0]), 0);
+ EXPECT_EQ(spci_rxtx_map((hf_ipaddr_t)&tx[0], (hf_ipaddr_t)&rx[0]).func,
+ SPCI_SUCCESS_32);
/* Tell other CPU to stop and wait for it. */
s.done = true;
diff --git a/test/vmapi/primary_with_secondaries/no_services.c b/test/vmapi/primary_with_secondaries/no_services.c
index 9d43cd7..33b83e0 100644
--- a/test/vmapi/primary_with_secondaries/no_services.c
+++ b/test/vmapi/primary_with_secondaries/no_services.c
@@ -110,15 +110,15 @@
/**
* The configured send/receive addresses can't be device memory.
*/
-TEST(hf_vm_configure, fails_with_device_memory)
+TEST(spci_rxtx_map, fails_with_device_memory)
{
- EXPECT_EQ(hf_vm_configure(PAGE_SIZE, PAGE_SIZE * 2), -1);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(PAGE_SIZE, PAGE_SIZE * 2), SPCI_DENIED);
}
/**
* The configured send/receive addresses can't be unaligned.
*/
-TEST(hf_vm_configure, fails_with_unaligned_pointer)
+TEST(spci_rxtx_map, fails_with_unaligned_pointer)
{
uint8_t maybe_aligned[2];
hf_ipaddr_t unaligned_addr = (hf_ipaddr_t)&maybe_aligned[1];
@@ -127,36 +127,44 @@
/* Check the the address is unaligned. */
ASSERT_EQ(unaligned_addr & 1, 1);
- EXPECT_EQ(hf_vm_configure(aligned_addr, unaligned_addr), -1);
- EXPECT_EQ(hf_vm_configure(unaligned_addr, aligned_addr), -1);
- EXPECT_EQ(hf_vm_configure(unaligned_addr, unaligned_addr), -1);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(aligned_addr, unaligned_addr),
+ SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(unaligned_addr, aligned_addr),
+ SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(unaligned_addr, unaligned_addr),
+ SPCI_INVALID_PARAMETERS);
}
/**
* The configured send/receive addresses can't be the same page.
*/
-TEST(hf_vm_configure, fails_with_same_page)
+TEST(spci_rxtx_map, fails_with_same_page)
{
- EXPECT_EQ(hf_vm_configure(send_page_addr, send_page_addr), -1);
- EXPECT_EQ(hf_vm_configure(recv_page_addr, recv_page_addr), -1);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(send_page_addr, send_page_addr),
+ SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(recv_page_addr, recv_page_addr),
+ SPCI_INVALID_PARAMETERS);
}
/**
* The configuration of the send/receive addresses can only happen once.
*/
-TEST(hf_vm_configure, fails_if_already_succeeded)
+TEST(spci_rxtx_map, fails_if_already_succeeded)
{
- EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
- EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), -1);
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(send_page_addr, recv_page_addr),
+ SPCI_DENIED);
}
/**
* The configuration of the send/receive address is successful with valid
* arguments.
*/
-TEST(hf_vm_configure, succeeds)
+TEST(spci_rxtx_map, succeeds)
{
- EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
}
/**
diff --git a/test/vmapi/primary_with_secondaries/util.c b/test/vmapi/primary_with_secondaries/util.c
index 419ec6c..eaa2dd1 100644
--- a/test/vmapi/primary_with_secondaries/util.c
+++ b/test/vmapi/primary_with_secondaries/util.c
@@ -34,7 +34,8 @@
struct mailbox_buffers set_up_mailbox(void)
{
- ASSERT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0);
+ ASSERT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
return (struct mailbox_buffers){
.send = send_page,
.recv = recv_page,