Use uint32_t rather than int for memory modes.
Change-Id: I12b4d0cb0582d80bf86ca5dd99c7e462d776320f
diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h
index a7a155a..e8cfa36 100644
--- a/inc/hf/arch/mm.h
+++ b/inc/hf/arch/mm.h
@@ -146,17 +146,17 @@
/**
* Converts the mode into stage-1 attributes for a block PTE.
*/
-uint64_t arch_mm_mode_to_stage1_attrs(int mode);
+uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode);
/**
* Converts the mode into stage-2 attributes for a block PTE.
*/
-uint64_t arch_mm_mode_to_stage2_attrs(int mode);
+uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode);
/**
* Converts the stage-2 block attributes back to the corresponding mode.
*/
-int arch_mm_stage2_attrs_to_mode(uint64_t attrs);
+uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs);
/**
* Initializes the arch specific memory management.
diff --git a/inc/hf/cpu.h b/inc/hf/cpu.h
index 56f7982..7535454 100644
--- a/inc/hf/cpu.h
+++ b/inc/hf/cpu.h
@@ -67,7 +67,7 @@
ipaddr_t ipaddr;
vaddr_t vaddr;
vaddr_t pc;
- int mode;
+ uint32_t mode;
};
struct vcpu {
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 44b51b8..613e6e5 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -33,10 +33,10 @@
#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
/* The following are arch-independent page mapping modes. */
-#define MM_MODE_R 0x0001 /* read */
-#define MM_MODE_W 0x0002 /* write */
-#define MM_MODE_X 0x0004 /* execute */
-#define MM_MODE_D 0x0008 /* device */
+#define MM_MODE_R UINT32_C(0x0001) /* read */
+#define MM_MODE_W UINT32_C(0x0002) /* write */
+#define MM_MODE_X UINT32_C(0x0004) /* execute */
+#define MM_MODE_D UINT32_C(0x0008) /* device */
/*
* Memory in stage-1 is either valid (present) or invalid (absent).
@@ -66,9 +66,9 @@
*
* Modes are selected so that owner of exclusive memory is the default.
*/
-#define MM_MODE_INVALID 0x0010
-#define MM_MODE_UNOWNED 0x0020
-#define MM_MODE_SHARED 0x0040
+#define MM_MODE_INVALID UINT32_C(0x0010)
+#define MM_MODE_UNOWNED UINT32_C(0x0020)
+#define MM_MODE_SHARED UINT32_C(0x0040)
#define MM_FLAG_COMMIT 0x01
#define MM_FLAG_UNMAP 0x02
@@ -105,19 +105,19 @@
bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
- int mode, ipaddr_t *ipa, struct mpool *ppool);
+ uint32_t mode, ipaddr_t *ipa, struct mpool *ppool);
bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
struct mpool *ppool);
bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool);
void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool);
void mm_vm_dump(struct mm_ptable *t);
bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
- int *mode);
+ uint32_t *mode);
struct mm_stage1_locked mm_lock_stage1(void);
void mm_unlock_stage1(struct mm_stage1_locked *lock);
void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
- paddr_t end, int mode, struct mpool *ppool);
+ paddr_t end, uint32_t mode, struct mpool *ppool);
bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
struct mpool *ppool);
void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
diff --git a/inc/hf/spci_internal.h b/inc/hf/spci_internal.h
index 2bb1457..38bfd73 100644
--- a/inc/hf/spci_internal.h
+++ b/inc/hf/spci_internal.h
@@ -27,10 +27,10 @@
#define SPCI_VERSION_MAJOR_OFFSET 16
struct spci_mem_transitions {
- int orig_from_mode;
- int orig_to_mode;
- int from_mode;
- int to_mode;
+ uint32_t orig_from_mode;
+ uint32_t orig_to_mode;
+ uint32_t from_mode;
+ uint32_t to_mode;
};
/* TODO: Add device attributes: GRE, cacheability, shareability. */
@@ -69,6 +69,6 @@
bool spci_msg_check_transition(struct vm *to, struct vm *from,
enum spci_memory_share share,
- int *orig_from_mode, ipaddr_t begin,
+ uint32_t *orig_from_mode, ipaddr_t begin,
ipaddr_t end, uint32_t memory_to_attributes,
- int *from_mode, int *to_mode);
+ uint32_t *from_mode, uint32_t *to_mode);
diff --git a/src/api.c b/src/api.c
index e7c2d01..d009538 100644
--- a/src/api.c
+++ b/src/api.c
@@ -576,7 +576,7 @@
/**
* Check that the mode indicates memory that is valid, owned and exclusive.
*/
-static bool api_mode_valid_owned_and_exclusive(int mode)
+static bool api_mode_valid_owned_and_exclusive(uint32_t mode)
{
return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
MM_MODE_SHARED)) == 0;
@@ -682,8 +682,9 @@
*/
static bool api_vm_configure_pages(struct vm_locked vm_locked,
paddr_t pa_send_begin, paddr_t pa_send_end,
- int orig_send_mode, paddr_t pa_recv_begin,
- paddr_t pa_recv_end, int orig_recv_mode)
+ uint32_t orig_send_mode,
+ paddr_t pa_recv_begin, paddr_t pa_recv_end,
+ uint32_t orig_recv_mode)
{
bool ret;
struct mpool local_page_pool;
@@ -765,8 +766,8 @@
paddr_t pa_send_end;
paddr_t pa_recv_begin;
paddr_t pa_recv_end;
- int orig_send_mode;
- int orig_recv_mode;
+ uint32_t orig_send_mode;
+ uint32_t orig_recv_mode;
int64_t ret;
/* Fail if addresses are not page-aligned. */
@@ -1451,9 +1452,9 @@
{
struct vm *to = to_locked.vm;
struct vm *from = from_locked.vm;
- int orig_from_mode;
- int from_mode;
- int to_mode;
+ uint32_t orig_from_mode;
+ uint32_t from_mode;
+ uint32_t to_mode;
struct mpool local_page_pool;
struct spci_value ret;
paddr_t pa_begin;
@@ -1546,9 +1547,9 @@
{
struct vm *from = current->vm;
struct vm *to;
- int orig_from_mode;
- int from_mode;
- int to_mode;
+ uint32_t orig_from_mode;
+ uint32_t from_mode;
+ uint32_t to_mode;
ipaddr_t begin;
ipaddr_t end;
paddr_t pa_begin;
@@ -1636,7 +1637,7 @@
* owning VM.
*/
if (orig_from_mode & MM_MODE_UNOWNED) {
- int orig_to_mode;
+ uint32_t orig_to_mode;
if (share != HF_MEMORY_GIVE ||
!mm_vm_get_mode(&to->ptable, begin, end, &orig_to_mode) ||
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 8132406..77b10e3 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -528,7 +528,8 @@
* instruction and data aborts, but not necessarily for other exception reasons.
*/
static struct vcpu_fault_info fault_info_init(uintreg_t esr,
- const struct vcpu *vcpu, int mode)
+ const struct vcpu *vcpu,
+ uint32_t mode)
{
uint32_t fsc = esr & 0x3f;
struct vcpu_fault_info r;
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index d8dda85..f93bd8a 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -402,7 +402,7 @@
dsb(sy);
}
-uint64_t arch_mm_mode_to_stage1_attrs(int mode)
+uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode)
{
uint64_t attrs = 0;
@@ -435,7 +435,7 @@
return attrs;
}
-uint64_t arch_mm_mode_to_stage2_attrs(int mode)
+uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode)
{
uint64_t attrs = 0;
uint64_t access = 0;
@@ -493,9 +493,9 @@
return attrs;
}
-int arch_mm_stage2_attrs_to_mode(uint64_t attrs)
+uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs)
{
- int mode = 0;
+ uint32_t mode = 0;
if (attrs & STAGE2_S2AP(STAGE2_ACCESS_READ)) {
mode |= MM_MODE_R;
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index 68a2383..d0066fb 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -147,17 +147,17 @@
return 4;
}
-uint64_t arch_mm_mode_to_stage1_attrs(int mode)
+uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode)
{
return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
}
-uint64_t arch_mm_mode_to_stage2_attrs(int mode)
+uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode)
{
return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
}
-int arch_mm_stage2_attrs_to_mode(uint64_t attrs)
+uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs)
{
return attrs >> PTE_ATTR_MODE_SHIFT;
}
diff --git a/src/cpu.c b/src/cpu.c
index 823826c..aeb1127 100644
--- a/src/cpu.c
+++ b/src/cpu.c
@@ -299,8 +299,8 @@
struct vcpu_fault_info *f)
{
struct vm *vm = current->vm;
- int mode;
- int mask = f->mode | MM_MODE_INVALID;
+ uint32_t mode;
+ uint32_t mask = f->mode | MM_MODE_INVALID;
bool resume;
sl_lock(&vm->lock);
diff --git a/src/mm.c b/src/mm.c
index b3d06e2..f6c2481 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -784,7 +784,7 @@
* architecture-agnostic mode provided.
*/
bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
- int mode, ipaddr_t *ipa, struct mpool *ppool)
+ uint32_t mode, ipaddr_t *ipa, struct mpool *ppool)
{
int flags = 0;
bool success = mm_ptable_identity_update(
@@ -847,7 +847,7 @@
* Returns true if the range is mapped with the same mode and false otherwise.
*/
bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
- int *mode)
+ uint32_t *mode)
{
uint64_t attrs;
bool ret;
@@ -884,7 +884,7 @@
* architecture-agnostic mode provided.
*/
void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
- paddr_t end, int mode, struct mpool *ppool)
+ paddr_t end, uint32_t mode, struct mpool *ppool)
{
if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
arch_mm_mode_to_stage1_attrs(mode),
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 71c3876..4a523c8 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -57,7 +57,7 @@
*/
bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
{
- int mode;
+ uint32_t mode;
return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
(mode & MM_MODE_INVALID) == 0;
}
@@ -137,7 +137,7 @@
*/
TEST_F(mm, map_first_page)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t page_begin = pa_init(0);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
@@ -180,7 +180,7 @@
*/
TEST_F(mm, map_round_to_page)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
const paddr_t map_end = pa_add(map_begin, 268);
ipaddr_t ipa = ipa_init(-1);
@@ -226,7 +226,7 @@
*/
TEST_F(mm, map_across_tables)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
struct mm_ptable ptable;
@@ -289,7 +289,7 @@
*/
TEST_F(mm, map_all_at_top_level)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
@@ -317,7 +317,7 @@
*/
TEST_F(mm, map_already_mapped)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
@@ -339,7 +339,7 @@
*/
TEST_F(mm, map_reverse_range)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
@@ -361,7 +361,7 @@
*/
TEST_F(mm, map_reverse_range_quirk)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
@@ -381,7 +381,7 @@
*/
TEST_F(mm, map_last_address_quirk)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
@@ -402,7 +402,7 @@
*/
TEST_F(mm, map_clamp_to_range)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
@@ -421,7 +421,7 @@
*/
TEST_F(mm, map_ignore_out_of_range)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
@@ -441,7 +441,7 @@
*/
TEST_F(mm, map_block_replaces_table)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
@@ -463,7 +463,7 @@
*/
TEST_F(mm, map_does_not_defrag)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
@@ -518,7 +518,7 @@
*/
TEST_F(mm, unmap_all)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
@@ -541,7 +541,7 @@
*/
TEST_F(mm, unmap_round_to_page)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
struct mm_ptable ptable;
@@ -561,7 +561,7 @@
*/
TEST_F(mm, unmap_across_tables)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
struct mm_ptable ptable;
@@ -580,7 +580,7 @@
*/
TEST_F(mm, unmap_out_of_range)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
@@ -600,7 +600,7 @@
*/
TEST_F(mm, unmap_reverse_range)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
@@ -623,7 +623,7 @@
*/
TEST_F(mm, unmap_reverse_range_quirk)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t page_begin = pa_init(0x180'0000'0000);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
@@ -647,7 +647,7 @@
*/
TEST_F(mm, unmap_last_address_quirk)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
@@ -667,7 +667,7 @@
*/
TEST_F(mm, unmap_does_not_defrag)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
@@ -704,7 +704,7 @@
*/
TEST_F(mm, is_mapped_all)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
@@ -720,7 +720,7 @@
*/
TEST_F(mm, is_mapped_page)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t page_begin = pa_init(0x100'0000'0000);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
@@ -739,7 +739,7 @@
*/
TEST_F(mm, is_mapped_out_of_range)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
struct mm_ptable ptable;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
@@ -760,7 +760,7 @@
constexpr int default_mode =
MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
struct mm_ptable ptable;
- int read_mode;
+ uint32_t read_mode;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
read_mode = 0;
@@ -787,11 +787,11 @@
*/
TEST_F(mm, get_mode_pages_across_tables)
{
- constexpr int mode = MM_MODE_INVALID | MM_MODE_SHARED;
+ constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
struct mm_ptable ptable;
- int read_mode;
+ uint32_t read_mode;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
nullptr, &ppool));
@@ -818,9 +818,9 @@
*/
TEST_F(mm, get_mode_out_of_range)
{
- constexpr int mode = MM_MODE_UNOWNED;
+ constexpr uint32_t mode = MM_MODE_UNOWNED;
struct mm_ptable ptable;
- int read_mode;
+ uint32_t read_mode;
ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
nullptr, &ppool));
@@ -855,7 +855,7 @@
*/
TEST_F(mm, defrag_empty_subtables)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
@@ -881,7 +881,7 @@
*/
TEST_F(mm, defrag_block_subtables)
{
- constexpr int mode = 0;
+ constexpr uint32_t mode = 0;
const paddr_t begin = pa_init(39456 * mm_entry_size(1));
const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
diff --git a/src/spci_architected_message.c b/src/spci_architected_message.c
index 605a7a9..7d05ef5 100644
--- a/src/spci_architected_message.c
+++ b/src/spci_architected_message.c
@@ -124,7 +124,8 @@
static bool spci_msg_get_next_state(
const struct spci_mem_transitions *transitions,
uint32_t transition_count, uint32_t memory_to_attributes,
- int orig_from_mode, int orig_to_mode, int *from_mode, int *to_mode)
+ uint32_t orig_from_mode, uint32_t orig_to_mode, uint32_t *from_mode,
+ uint32_t *to_mode)
{
const uint32_t state_mask =
MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
@@ -166,11 +167,11 @@
*/
bool spci_msg_check_transition(struct vm *to, struct vm *from,
enum spci_memory_share share,
- int *orig_from_mode, ipaddr_t begin,
+ uint32_t *orig_from_mode, ipaddr_t begin,
ipaddr_t end, uint32_t memory_to_attributes,
- int *from_mode, int *to_mode)
+ uint32_t *from_mode, uint32_t *to_mode)
{
- int orig_to_mode;
+ uint32_t orig_to_mode;
const struct spci_mem_transitions *mem_transition_table;
uint32_t transition_table_size;
diff --git a/test/hftest/inc/hftest.h b/test/hftest/inc/hftest.h
index af581e1..6752665 100644
--- a/test/hftest/inc/hftest.h
+++ b/test/hftest/inc/hftest.h
@@ -99,7 +99,7 @@
bool hftest_mm_init(void);
/** Adds stage-1 identity mapping for pages covering bytes [base, base+size). */
-void hftest_mm_identity_map(const void *base, size_t size, int mode);
+void hftest_mm_identity_map(const void *base, size_t size, uint32_t mode);
void hftest_mm_vcpu_init(void);
diff --git a/test/hftest/mm.c b/test/hftest/mm.c
index 988c26e..4446b24 100644
--- a/test/hftest/mm.c
+++ b/test/hftest/mm.c
@@ -59,7 +59,7 @@
return true;
}
-void hftest_mm_identity_map(const void *base, size_t size, int mode)
+void hftest_mm_identity_map(const void *base, size_t size, uint32_t mode)
{
struct mm_stage1_locked stage1_locked = get_stage1_locked();
paddr_t start = pa_from_va(va_from_ptr(base));
diff --git a/test/vmapi/arch/aarch64/gicv3/gicv3.c b/test/vmapi/arch/aarch64/gicv3/gicv3.c
index 2027f8d..a2418d0 100644
--- a/test/vmapi/arch/aarch64/gicv3/gicv3.c
+++ b/test/vmapi/arch/aarch64/gicv3/gicv3.c
@@ -50,7 +50,7 @@
void system_setup()
{
- const int mode = MM_MODE_R | MM_MODE_W | MM_MODE_D;
+ const uint32_t mode = MM_MODE_R | MM_MODE_W | MM_MODE_D;
hftest_mm_identity_map((void *)GICD_BASE, PAGE_SIZE, mode);
hftest_mm_identity_map((void *)GICR_BASE, PAGE_SIZE, mode);
hftest_mm_identity_map((void *)SGI_BASE, PAGE_SIZE, mode);