| /* |
| * Copyright 2018 The Hafnium Authors. |
| * |
| * Licensed under the Apache License, Version 2.0 (the "License"); |
| * you may not use this file except in compliance with the License. |
| * You may obtain a copy of the License at |
| * |
| * https://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| |
| #include "hf/arch/vm/interrupts.h" |
| |
| #include "hf/mm.h" |
| #include "hf/std.h" |
| |
| #include "vmapi/hf/call.h" |
| |
| #include "primary_with_secondary.h" |
| #include "test/hftest.h" |
| #include "test/vmapi/exception_handler.h" |
| #include "test/vmapi/spci.h" |
| |
| alignas(PAGE_SIZE) static uint8_t page[PAGE_SIZE]; |
| |
| TEST_SERVICE(memory_increment) |
| { |
| /* Loop, writing message to the shared memory. */ |
| for (;;) { |
| size_t i; |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| |
| struct spci_value ret = spci_msg_wait(); |
| spci_vm_id_t sender = retrieve_memory_from_message( |
| recv_buf, send_buf, ret, NULL); |
| struct spci_retrieved_memory_region *memory_region = |
| (struct spci_retrieved_memory_region *)recv_buf; |
| struct spci_receiver_address_range *range = |
| spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| uint8_t *ptr = |
| (uint8_t *)spci_memory_region_constituent_get_address( |
| &range->constituents[0]); |
| |
| /* Allow the memory to be populated. */ |
| EXPECT_EQ(spci_yield().func, SPCI_SUCCESS_32); |
| |
| /* Increment each byte of memory. */ |
| for (i = 0; i < PAGE_SIZE; ++i) { |
| ++ptr[i]; |
| } |
| |
| /* Signal completion and reset. */ |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| spci_msg_send(hf_vm_get_id(), sender, sizeof(ptr), 0); |
| } |
| } |
| |
| TEST_SERVICE(give_memory_and_fault) |
| { |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| struct spci_memory_region_constituent constituents[] = { |
| spci_memory_region_constituent_init((uint64_t)&page, 1), |
| }; |
| |
| /* Give memory to the primary. */ |
| send_memory_and_retrieve_request( |
| SPCI_MEM_DONATE_32, send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID, |
| constituents, ARRAY_SIZE(constituents), |
| SPCI_MEMORY_REGION_FLAG_CLEAR, SPCI_MEMORY_RW_X); |
| |
| exception_setup(NULL, exception_handler_yield_data_abort); |
| |
| /* Try using the memory that isn't valid unless it's been returned. */ |
| page[16] = 123; |
| |
| FAIL("Exception not generated by invalid access."); |
| } |
| |
| TEST_SERVICE(lend_memory_and_fault) |
| { |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| struct spci_memory_region_constituent constituents[] = { |
| spci_memory_region_constituent_init((uint64_t)&page, 1), |
| }; |
| |
| /* Lend memory to the primary. */ |
| send_memory_and_retrieve_request( |
| SPCI_MEM_LEND_32, send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID, |
| constituents, ARRAY_SIZE(constituents), |
| SPCI_MEMORY_REGION_FLAG_CLEAR, SPCI_MEMORY_RW_X); |
| |
| exception_setup(NULL, exception_handler_yield_data_abort); |
| |
| /* Try using the memory that isn't valid unless it's been returned. */ |
| page[633] = 180; |
| |
| FAIL("Exception not generated by invalid access."); |
| } |
| |
| TEST_SERVICE(spci_memory_return) |
| { |
| struct spci_value ret = spci_msg_wait(); |
| uint8_t *ptr; |
| size_t i; |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| |
| exception_setup(NULL, exception_handler_yield_data_abort); |
| |
| spci_vm_id_t sender = |
| retrieve_memory_from_message(recv_buf, send_buf, ret, NULL); |
| struct spci_retrieved_memory_region *memory_region = |
| (struct spci_retrieved_memory_region *)recv_buf; |
| struct spci_receiver_address_range *range = |
| spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| ptr = (uint8_t *)spci_memory_region_constituent_get_address( |
| &range->constituents[0]); |
| |
| /* Check that one has access to the shared region. */ |
| for (i = 0; i < PAGE_SIZE; ++i) { |
| ptr[i]++; |
| } |
| |
| /* Give the memory back and notify the sender. */ |
| send_memory_and_retrieve_request( |
| SPCI_MEM_DONATE_32, send_buf, hf_vm_get_id(), sender, |
| range->constituents, range->constituent_count, 0, |
| SPCI_MEMORY_RW_X); |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| |
| /* |
| * Try and access the memory which will cause a fault unless the memory |
| * has been shared back again. |
| */ |
| ptr[0] = 123; |
| |
| FAIL("Exception not generated by invalid access."); |
| } |
| |
| /** |
| * Attempt to modify above the upper bound of a memory region sent to us. |
| */ |
| TEST_SERVICE(spci_check_upper_bound) |
| { |
| struct spci_retrieved_memory_region *memory_region; |
| struct spci_receiver_address_range *range; |
| uint8_t *ptr; |
| uint8_t index; |
| |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| struct spci_value ret = spci_msg_wait(); |
| |
| exception_setup(NULL, exception_handler_yield_data_abort); |
| |
| retrieve_memory_from_message(recv_buf, send_buf, ret, NULL); |
| memory_region = (struct spci_retrieved_memory_region *)recv_buf; |
| range = spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| |
| /* Choose which constituent we want to test. */ |
| index = *(uint8_t *)spci_memory_region_constituent_get_address( |
| &range->constituents[0]); |
| ptr = (uint8_t *)spci_memory_region_constituent_get_address( |
| &range->constituents[index]); |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| |
| /* |
| * Check that we can't access out of bounds after the region sent to us. |
| * This should trigger the exception handler. |
| */ |
| ptr[PAGE_SIZE]++; |
| |
| FAIL("Exception not generated by access out of bounds."); |
| } |
| |
| /** |
| * Attempt to modify below the lower bound of a memory region sent to us. |
| */ |
| TEST_SERVICE(spci_check_lower_bound) |
| { |
| struct spci_retrieved_memory_region *memory_region; |
| struct spci_receiver_address_range *range; |
| uint8_t *ptr; |
| uint8_t index; |
| |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| struct spci_value ret = spci_msg_wait(); |
| |
| exception_setup(NULL, exception_handler_yield_data_abort); |
| |
| retrieve_memory_from_message(recv_buf, send_buf, ret, NULL); |
| memory_region = (struct spci_retrieved_memory_region *)recv_buf; |
| range = spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| |
| /* Choose which constituent we want to test. */ |
| index = *(uint8_t *)spci_memory_region_constituent_get_address( |
| &range->constituents[0]); |
| ptr = (uint8_t *)spci_memory_region_constituent_get_address( |
| &range->constituents[index]); |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| |
| /* |
| * Check that we can't access out of bounds before the region sent to |
| * us. This should trigger the exception handler. |
| */ |
| ptr[-1]++; |
| |
| FAIL("Exception not generated by access out of bounds."); |
| } |
| |
| /** |
| * Attempt to donate memory and then modify. |
| */ |
| TEST_SERVICE(spci_donate_secondary_and_fault) |
| { |
| uint8_t *ptr; |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| |
| struct spci_value ret = spci_msg_wait(); |
| spci_vm_id_t sender = |
| retrieve_memory_from_message(recv_buf, send_buf, ret, NULL); |
| struct spci_retrieved_memory_region *memory_region = |
| (struct spci_retrieved_memory_region *)recv_buf; |
| struct spci_receiver_address_range *range = |
| spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| |
| ASSERT_EQ(sender, HF_PRIMARY_VM_ID); |
| exception_setup(NULL, exception_handler_yield_data_abort); |
| |
| ptr = (uint8_t *)spci_memory_region_constituent_get_address( |
| &range->constituents[0]); |
| |
| /* Donate memory to next VM. */ |
| send_memory_and_retrieve_request( |
| SPCI_MEM_DONATE_32, send_buf, hf_vm_get_id(), SERVICE_VM2, |
| range->constituents, range->constituent_count, 0, |
| SPCI_MEMORY_RW_X); |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| |
| /* Ensure that we are unable to modify memory any more. */ |
| ptr[0] = 'c'; |
| |
| FAIL("Exception not generated by invalid access."); |
| } |
| |
| /** |
| * Attempt to donate memory twice from VM. |
| */ |
| TEST_SERVICE(spci_donate_twice) |
| { |
| uint32_t msg_size; |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| |
| struct spci_value ret = spci_msg_wait(); |
| spci_vm_id_t sender = |
| retrieve_memory_from_message(recv_buf, send_buf, ret, NULL); |
| struct spci_retrieved_memory_region *memory_region = |
| (struct spci_retrieved_memory_region *)recv_buf; |
| struct spci_receiver_address_range *range = |
| spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| struct spci_memory_region_constituent constituent = |
| range->constituents[0]; |
| |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| |
| /* Yield to allow attempt to re donate from primary. */ |
| spci_yield(); |
| |
| /* Give the memory back and notify the sender. */ |
| send_memory_and_retrieve_request(SPCI_MEM_DONATE_32, send_buf, |
| hf_vm_get_id(), sender, &constituent, |
| 1, 0, SPCI_MEMORY_RW_X); |
| |
| /* Attempt to donate the memory to another VM. */ |
| msg_size = spci_memory_region_init( |
| send_buf, hf_vm_get_id(), SERVICE_VM2, &constituent, 1, 0, 0, |
| SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM, |
| SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE); |
| EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0), |
| SPCI_INVALID_PARAMETERS); |
| |
| spci_yield(); |
| } |
| |
| /** |
| * Continually receive memory, check if we have access and ensure it is not |
| * changed by a third party. |
| */ |
| TEST_SERVICE(spci_memory_receive) |
| { |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| |
| for (;;) { |
| struct spci_value ret = spci_msg_wait(); |
| struct spci_retrieved_memory_region *memory_region; |
| struct spci_receiver_address_range *range; |
| uint8_t *ptr; |
| |
| retrieve_memory_from_message(recv_buf, send_buf, ret, NULL); |
| memory_region = (struct spci_retrieved_memory_region *)recv_buf; |
| range = spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| ptr = (uint8_t *)spci_memory_region_constituent_get_address( |
| &range->constituents[0]); |
| |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| ptr[0] = 'd'; |
| spci_yield(); |
| |
| /* Ensure memory has not changed. */ |
| EXPECT_EQ(ptr[0], 'd'); |
| spci_yield(); |
| } |
| } |
| |
| /** |
| * Receive memory and attempt to donate from primary VM. |
| */ |
| TEST_SERVICE(spci_donate_invalid_source) |
| { |
| uint32_t msg_size; |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| |
| struct spci_value ret = spci_msg_wait(); |
| spci_vm_id_t sender = |
| retrieve_memory_from_message(recv_buf, send_buf, ret, NULL); |
| struct spci_retrieved_memory_region *memory_region = |
| (struct spci_retrieved_memory_region *)recv_buf; |
| struct spci_receiver_address_range *range = |
| spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| |
| /* Give the memory back and notify the sender. */ |
| send_memory_and_retrieve_request( |
| SPCI_MEM_DONATE_32, send_buf, hf_vm_get_id(), sender, |
| range->constituents, range->constituent_count, 0, |
| SPCI_MEMORY_RW_X); |
| |
| /* Fail to donate the memory from the primary to VM2. */ |
| msg_size = spci_memory_region_init( |
| send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, range->constituents, |
| range->constituent_count, 0, 0, SPCI_MEMORY_RW_X, |
| SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK, |
| SPCI_MEMORY_OUTER_SHAREABLE); |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| EXPECT_SPCI_ERROR(spci_mem_donate(msg_size, msg_size, 0), |
| SPCI_INVALID_PARAMETERS); |
| spci_yield(); |
| } |
| |
| TEST_SERVICE(spci_memory_lend_relinquish) |
| { |
| exception_setup(NULL, exception_handler_yield_data_abort); |
| |
| /* Loop, giving memory back to the sender. */ |
| for (;;) { |
| uint8_t *ptr; |
| uint8_t *ptr2; |
| uint32_t count; |
| uint32_t count2; |
| size_t i; |
| spci_memory_handle_t handle; |
| |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| |
| struct spci_value ret = spci_msg_wait(); |
| spci_vm_id_t sender = retrieve_memory_from_message( |
| recv_buf, send_buf, ret, &handle); |
| struct spci_retrieved_memory_region *memory_region = |
| (struct spci_retrieved_memory_region *)recv_buf; |
| struct spci_receiver_address_range *range = |
| spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| struct spci_memory_region_constituent *constituents = |
| range->constituents; |
| |
| ptr = (uint8_t *)spci_memory_region_constituent_get_address( |
| &constituents[0]); |
| count = constituents[0].page_count; |
| ptr2 = (uint8_t *)spci_memory_region_constituent_get_address( |
| &constituents[1]); |
| count2 = constituents[1].page_count; |
| /* Relevant information read, mailbox can be cleared. */ |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| |
| /* Check that one has access to the shared region. */ |
| for (i = 0; i < PAGE_SIZE * count; ++i) { |
| ptr[i]++; |
| } |
| for (i = 0; i < PAGE_SIZE * count2; ++i) { |
| ptr2[i]++; |
| } |
| |
| /* Give the memory back and notify the sender. */ |
| *(struct spci_mem_relinquish *)send_buf = |
| (struct spci_mem_relinquish){.handle = handle, |
| .sender = hf_vm_get_id()}; |
| EXPECT_EQ(spci_mem_relinquish().func, SPCI_SUCCESS_32); |
| EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func, |
| SPCI_SUCCESS_32); |
| |
| /* |
| * Try and access the memory which will cause a fault unless the |
| * memory has been shared back again. |
| */ |
| ptr[0] = 123; |
| } |
| } |
| |
| /** |
| * Ensure that we can't relinquish donated memory. |
| */ |
| TEST_SERVICE(spci_memory_donate_relinquish) |
| { |
| for (;;) { |
| size_t i; |
| spci_memory_handle_t handle; |
| struct spci_retrieved_memory_region *memory_region; |
| struct spci_receiver_address_range *range; |
| uint8_t *ptr; |
| |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| struct spci_value ret = spci_msg_wait(); |
| |
| retrieve_memory_from_message(recv_buf, send_buf, ret, &handle); |
| memory_region = (struct spci_retrieved_memory_region *)recv_buf; |
| range = spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| |
| ptr = (uint8_t *)spci_memory_region_constituent_get_address( |
| &range->constituents[0]); |
| |
| /* Check that we have access to the shared region. */ |
| for (i = 0; i < PAGE_SIZE; ++i) { |
| ptr[i]++; |
| } |
| |
| /* |
| * Attempt to relinquish the memory, which should fail because |
| * it was donated not lent. |
| */ |
| *(struct spci_mem_relinquish *)send_buf = |
| (struct spci_mem_relinquish){.handle = handle, |
| .sender = hf_vm_get_id()}; |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| EXPECT_SPCI_ERROR(spci_mem_relinquish(), |
| SPCI_INVALID_PARAMETERS); |
| |
| /* Ensure we still have access to the memory. */ |
| ptr[0] = 123; |
| |
| spci_yield(); |
| } |
| } |
| |
| /** |
| * Receive memory that has been shared, try to relinquish it with the clear flag |
| * set (and expect to fail), and then relinquish without any flags. |
| */ |
| TEST_SERVICE(spci_memory_share_relinquish_clear) |
| { |
| exception_setup(NULL, exception_handler_yield_data_abort); |
| |
| /* Loop, receiving memory and relinquishing it. */ |
| for (;;) { |
| spci_memory_handle_t handle; |
| |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| |
| struct spci_value ret = spci_msg_wait(); |
| spci_vm_id_t sender = retrieve_memory_from_message( |
| recv_buf, send_buf, ret, &handle); |
| |
| /* |
| * Mailbox can be cleared, we don't actually care what the |
| * memory region is. |
| */ |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| |
| /* Trying to relinquish the memory and clear it should fail. */ |
| *(struct spci_mem_relinquish *)send_buf = |
| (struct spci_mem_relinquish){ |
| .handle = handle, |
| .sender = hf_vm_get_id(), |
| .flags = SPCI_MEMORY_REGION_FLAG_CLEAR}; |
| EXPECT_SPCI_ERROR(spci_mem_relinquish(), |
| SPCI_INVALID_PARAMETERS); |
| |
| /* Give the memory back and notify the sender. */ |
| *(struct spci_mem_relinquish *)send_buf = |
| (struct spci_mem_relinquish){.handle = handle, |
| .sender = hf_vm_get_id()}; |
| EXPECT_EQ(spci_mem_relinquish().func, SPCI_SUCCESS_32); |
| EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func, |
| SPCI_SUCCESS_32); |
| } |
| } |
| |
| /** |
| * Receive memory and attempt to donate from primary VM. |
| */ |
| TEST_SERVICE(spci_lend_invalid_source) |
| { |
| spci_memory_handle_t handle; |
| uint32_t msg_size; |
| |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| struct spci_value ret = spci_msg_wait(); |
| spci_vm_id_t sender = |
| retrieve_memory_from_message(recv_buf, send_buf, ret, &handle); |
| struct spci_retrieved_memory_region *memory_region = |
| (struct spci_retrieved_memory_region *)recv_buf; |
| struct spci_receiver_address_range *range = |
| spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| |
| /* Give the memory back and notify the sender. */ |
| *(struct spci_mem_relinquish *)send_buf = (struct spci_mem_relinquish){ |
| .handle = handle, .sender = hf_vm_get_id()}; |
| EXPECT_EQ(spci_mem_relinquish().func, SPCI_SUCCESS_32); |
| EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func, |
| SPCI_SUCCESS_32); |
| |
| /* Ensure we cannot lend from the primary to another secondary. */ |
| msg_size = spci_memory_region_init( |
| send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, range->constituents, |
| range->constituent_count, 0, 0, SPCI_MEMORY_RW_X, |
| SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK, |
| SPCI_MEMORY_OUTER_SHAREABLE); |
| EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size, 0), |
| SPCI_INVALID_PARAMETERS); |
| |
| /* Ensure we cannot share from the primary to another secondary. */ |
| msg_size = spci_memory_region_init( |
| send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, range->constituents, |
| range->constituent_count, 0, 0, SPCI_MEMORY_RW_X, |
| SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK, |
| SPCI_MEMORY_OUTER_SHAREABLE); |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size, 0), |
| SPCI_INVALID_PARAMETERS); |
| |
| spci_yield(); |
| } |
| |
| /** |
| * Attempt to execute an instruction from the lent memory. |
| */ |
| TEST_SERVICE(spci_memory_lend_relinquish_X) |
| { |
| exception_setup(NULL, exception_handler_yield_instruction_abort); |
| |
| for (;;) { |
| spci_memory_handle_t handle; |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| struct spci_value ret = spci_msg_wait(); |
| spci_vm_id_t sender = retrieve_memory_from_message( |
| recv_buf, send_buf, ret, &handle); |
| struct spci_retrieved_memory_region *memory_region = |
| (struct spci_retrieved_memory_region *)recv_buf; |
| struct spci_receiver_address_range *range = |
| spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| struct spci_memory_region_constituent *constituents = |
| range->constituents; |
| uint64_t *ptr = |
| (uint64_t *)spci_memory_region_constituent_get_address( |
| &constituents[0]); |
| |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| |
| /* |
| * Verify that the instruction in memory is the encoded RET |
| * instruction. |
| */ |
| EXPECT_EQ(*ptr, 0xD65F03C0); |
| /* Try to execute instruction from the shared memory region. */ |
| __asm__ volatile("blr %0" ::"r"(ptr)); |
| |
| /* Release the memory again. */ |
| *(struct spci_mem_relinquish *)send_buf = |
| (struct spci_mem_relinquish){.handle = handle, |
| .sender = hf_vm_get_id()}; |
| EXPECT_EQ(spci_mem_relinquish().func, SPCI_SUCCESS_32); |
| EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func, |
| SPCI_SUCCESS_32); |
| } |
| } |
| |
| /** |
| * Attempt to read and write to a shared page. |
| */ |
| TEST_SERVICE(spci_memory_lend_relinquish_RW) |
| { |
| exception_setup(NULL, exception_handler_yield_data_abort); |
| |
| for (;;) { |
| spci_memory_handle_t handle; |
| uint8_t *ptr; |
| size_t i; |
| |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| struct spci_value ret = spci_msg_wait(); |
| spci_vm_id_t sender = retrieve_memory_from_message( |
| recv_buf, send_buf, ret, &handle); |
| struct spci_retrieved_memory_region *memory_region = |
| (struct spci_retrieved_memory_region *)recv_buf; |
| struct spci_receiver_address_range *range = |
| spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| struct spci_memory_region_constituent constituent_copy = |
| range->constituents[0]; |
| |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| |
| ptr = (uint8_t *)spci_memory_region_constituent_get_address( |
| &constituent_copy); |
| |
| /* Check that we have read access. */ |
| for (i = 0; i < PAGE_SIZE; ++i) { |
| EXPECT_EQ(ptr[i], 'b'); |
| } |
| |
| /* Return control to primary, to verify shared access. */ |
| spci_yield(); |
| |
| /* Attempt to modify the memory. */ |
| for (i = 0; i < PAGE_SIZE; ++i) { |
| ptr[i]++; |
| } |
| |
| /* Give the memory back and notify the sender. */ |
| *(struct spci_mem_relinquish *)send_buf = |
| (struct spci_mem_relinquish){.handle = handle, |
| .sender = hf_vm_get_id()}; |
| EXPECT_EQ(spci_mem_relinquish().func, SPCI_SUCCESS_32); |
| EXPECT_EQ(spci_msg_send(hf_vm_get_id(), sender, 0, 0).func, |
| SPCI_SUCCESS_32); |
| } |
| } |
| |
| TEST_SERVICE(spci_memory_lend_twice) |
| { |
| struct spci_value ret = spci_msg_wait(); |
| uint8_t *ptr; |
| uint32_t msg_size; |
| size_t i; |
| |
| void *recv_buf = SERVICE_RECV_BUFFER(); |
| void *send_buf = SERVICE_SEND_BUFFER(); |
| struct spci_retrieved_memory_region *memory_region; |
| struct spci_receiver_address_range *range; |
| struct spci_memory_region_constituent constituent_copy; |
| |
| retrieve_memory_from_message(recv_buf, send_buf, ret, NULL); |
| memory_region = (struct spci_retrieved_memory_region *)recv_buf; |
| range = spci_retrieved_memory_region_first_receiver_range( |
| memory_region); |
| constituent_copy = range->constituents[0]; |
| |
| EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32); |
| |
| ptr = (uint8_t *)spci_memory_region_constituent_get_address( |
| &constituent_copy); |
| |
| /* Check that we have read access. */ |
| for (i = 0; i < PAGE_SIZE; ++i) { |
| EXPECT_EQ(ptr[i], 'b'); |
| } |
| |
| /* Attempt to modify the memory. */ |
| for (i = 0; i < PAGE_SIZE; ++i) { |
| ptr[i]++; |
| } |
| |
| for (i = 1; i < PAGE_SIZE * 2; i++) { |
| uint64_t address = (uint64_t)ptr + i; |
| constituent_copy.address_high = address << 32; |
| constituent_copy.address_low = (uint32_t)address; |
| |
| /* Fail to lend or share the memory from the primary. */ |
| msg_size = spci_memory_region_init( |
| send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, |
| &constituent_copy, 1, 0, 0, SPCI_MEMORY_RW_X, |
| SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK, |
| SPCI_MEMORY_OUTER_SHAREABLE); |
| EXPECT_SPCI_ERROR(spci_mem_lend(msg_size, msg_size, 0), |
| SPCI_INVALID_PARAMETERS); |
| msg_size = spci_memory_region_init( |
| send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, |
| &constituent_copy, 1, 0, 0, SPCI_MEMORY_RW_X, |
| SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK, |
| SPCI_MEMORY_OUTER_SHAREABLE); |
| EXPECT_SPCI_ERROR(spci_mem_share(msg_size, msg_size, 0), |
| SPCI_INVALID_PARAMETERS); |
| } |
| |
| /* Return control to primary. */ |
| spci_yield(); |
| } |