Introduce clang-format
An automated and opinionated style for code. We can decide to change the
style and have the source updated by the clang-format tool by running:
make format
I've based the style on the Google style with exceptions to better
match
the current source style.
Change-Id: I43f85c7d4ce02ca999805558b25fcab2e43859c6
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..016cbec
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,9 @@
+BasedOnStyle: Google
+IndentWidth: 8
+ContinuationIndentWidth: 8
+UseTab: Always
+BreakBeforeBraces: Linux
+AllowShortFunctionsOnASingleLine: None
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+IndentCaseLabels: false
diff --git a/Makefile b/Makefile
index 99b2a17..5419741 100644
--- a/Makefile
+++ b/Makefile
@@ -157,4 +157,8 @@
clean:
rm -rf $(ROOT_DIR)out
+format:
+ find $(ROOT_DIR)src/ -name *.c -o -name *.h | xargs clang-format -style file -i
+ find $(ROOT_DIR)inc/ -name *.c -o -name *.h | xargs clang-format -style file -i
+
-include $(patsubst %,%.d,$(GLOBAL_OBJS),$(GLOBAL_OFFSETS))
diff --git a/inc/alloc.h b/inc/alloc.h
index 4277495..4570b1a 100644
--- a/inc/alloc.h
+++ b/inc/alloc.h
@@ -9,4 +9,4 @@
void *halloc_aligned(size_t size, size_t align);
void *halloc_aligned_nosync(size_t size, size_t align);
-#endif /* _ALLOC_H */
+#endif /* _ALLOC_H */
diff --git a/inc/api.h b/inc/api.h
index edc9797..edcd9e5 100644
--- a/inc/api.h
+++ b/inc/api.h
@@ -14,4 +14,4 @@
int32_t api_vcpu_run(uint32_t vm_idx, uint32_t vcpu_idx, struct vcpu **next);
struct vcpu *api_wait_for_interrupt(void);
-#endif /* _API_H */
+#endif /* _API_H */
diff --git a/inc/arch.h b/inc/arch.h
index 84624fb..195d149 100644
--- a/inc/arch.h
+++ b/inc/arch.h
@@ -5,4 +5,4 @@
void arch_putchar(char c);
-#endif /* _ARCH_H */
+#endif /* _ARCH_H */
diff --git a/inc/cpio.h b/inc/cpio.h
index cc244ce..a563ae6 100644
--- a/inc/cpio.h
+++ b/inc/cpio.h
@@ -16,7 +16,7 @@
void cpio_init(struct cpio *c, const void *buf, size_t size);
void cpio_init_iter(struct cpio *c, struct cpio_iter *iter);
-bool cpio_next(struct cpio_iter *iter, const char **name,
- const void **contents, size_t *size);
+bool cpio_next(struct cpio_iter *iter, const char **name, const void **contents,
+ size_t *size);
-#endif /* _CPIO_H */
+#endif /* _CPIO_H */
diff --git a/inc/cpu.h b/inc/cpu.h
index 43bd356..0dc561b 100644
--- a/inc/cpu.h
+++ b/inc/cpu.h
@@ -50,4 +50,4 @@
void vcpu_on(struct vcpu *v);
void vcpu_off(struct vcpu *v);
-#endif /* _CPU_H */
+#endif /* _CPU_H */
diff --git a/inc/decl_offsets.h b/inc/decl_offsets.h
index e0da1e8..fbcff34 100644
--- a/inc/decl_offsets.h
+++ b/inc/decl_offsets.h
@@ -2,9 +2,8 @@
#define _DECL_OFFSETS_H
#define DECL(name, type, field) \
- const size_t DEFINE_OFFSET__##name = offsetof(type, field)
+ const size_t DEFINE_OFFSET__##name = offsetof(type, field)
-#define DECL_SIZE(name, type) \
- const size_t DEFINE_OFFSET__name = sizeof(type)
+#define DECL_SIZE(name, type) const size_t DEFINE_OFFSET__name = sizeof(type)
-#endif /* _DECL_OFFSETS_H */
+#endif /* _DECL_OFFSETS_H */
diff --git a/inc/dlog.h b/inc/dlog.h
index d4c08c0..fca8103 100644
--- a/inc/dlog.h
+++ b/inc/dlog.h
@@ -9,4 +9,4 @@
void dlog_init(void (*pchar)(char));
-#endif /* _DLOG_H */
+#endif /* _DLOG_H */
diff --git a/inc/fdt.h b/inc/fdt.h
index fcc1716..44d629e 100644
--- a/inc/fdt.h
+++ b/inc/fdt.h
@@ -23,7 +23,7 @@
bool fdt_read_property(const struct fdt_node *node, const char *name,
const char **buf, uint32_t *size);
-void fdt_add_mem_reservation(struct fdt_header *hdr,
- uint64_t addr, uint64_t len);
+void fdt_add_mem_reservation(struct fdt_header *hdr, uint64_t addr,
+ uint64_t len);
-#endif /* _FDT_H */
+#endif /* _FDT_H */
diff --git a/inc/mm.h b/inc/mm.h
index 1af7496..82066cd 100644
--- a/inc/mm.h
+++ b/inc/mm.h
@@ -40,4 +40,4 @@
bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode);
void mm_ptable_defrag(struct mm_ptable *t);
-#endif /* _MM_H */
+#endif /* _MM_H */
diff --git a/inc/spinlock.h b/inc/spinlock.h
index 7761980..dca3efb 100644
--- a/inc/spinlock.h
+++ b/inc/spinlock.h
@@ -7,7 +7,10 @@
atomic_flag v;
};
-#define SPINLOCK_INIT {.v = ATOMIC_FLAG_INIT}
+#define SPINLOCK_INIT \
+ { \
+ .v = ATOMIC_FLAG_INIT \
+ }
static inline void sl_init(struct spinlock *l)
{
@@ -16,7 +19,8 @@
static inline void sl_lock(struct spinlock *l)
{
- while (atomic_flag_test_and_set_explicit(&l->v, memory_order_acquire));
+ while (atomic_flag_test_and_set_explicit(&l->v, memory_order_acquire))
+ ;
}
static inline void sl_unlock(struct spinlock *l)
@@ -24,4 +28,4 @@
atomic_flag_clear_explicit(&l->v, memory_order_release);
}
-#endif /* _SPINLOCK_H */
+#endif /* _SPINLOCK_H */
diff --git a/inc/std.h b/inc/std.h
index 7c10200..d7cc124 100644
--- a/inc/std.h
+++ b/inc/std.h
@@ -48,10 +48,11 @@
#define htole32(v) __builtin_bswap32(v)
#define htole64(v) __builtin_bswap64(v)
-#else /* __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ && __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ */
+#else /* __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ && \
+ __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ */
#error "Unsupported byte order"
#endif
-#endif /* STD_H */
+#endif /* STD_H */
diff --git a/inc/vm.h b/inc/vm.h
index 88ae8ac..eb6a386 100644
--- a/inc/vm.h
+++ b/inc/vm.h
@@ -13,4 +13,4 @@
void vm_start_vcpu(struct vm *vm, size_t index, size_t entry, size_t arg,
bool is_primary);
-#endif /* _VM_H */
+#endif /* _VM_H */
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index 27ceacf..7fe2ead 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -14,27 +14,34 @@
void irq_current(void)
{
dlog("IRQ from current\n");
- for (;;);
+ for (;;)
+ ;
}
void sync_current_exception(uint64_t esr, uint64_t elr)
{
switch (esr >> 26) {
case 0x25: /* EC = 100101, Data abort. */
- dlog("Data abort: pc=0x%x, esr=0x%x, ec=0x%x", elr, esr, esr >> 26);
+ dlog("Data abort: pc=0x%x, esr=0x%x, ec=0x%x", elr, esr,
+ esr >> 26);
if (!(esr & (1u << 10))) /* Check FnV bit. */
- dlog(", far=0x%x, hpfar=0x%x", read_msr(far_el2), read_msr(hpfar_el2) << 8);
+ dlog(", far=0x%x, hpfar=0x%x", read_msr(far_el2),
+ read_msr(hpfar_el2) << 8);
else
dlog(", far=invalid");
dlog("\n");
- for (;;);
+ for (;;)
+ ;
default:
- dlog("Unknown sync exception pc=0x%x, esr=0x%x, ec=0x%x\n", elr, esr, esr >> 26);
- for (;;);
+ dlog("Unknown sync exception pc=0x%x, esr=0x%x, ec=0x%x\n", elr,
+ esr, esr >> 26);
+ for (;;)
+ ;
}
- for (;;);
+ for (;;)
+ ;
}
struct hvc_handler_return hvc_handler(size_t arg0, size_t arg1, size_t arg2,
@@ -95,18 +102,23 @@
return api_wait_for_interrupt();
case 0x24: /* EC = 100100, Data abort. */
- dlog("Data abort: pc=0x%x, esr=0x%x, ec=0x%x", vcpu->regs.pc, esr, esr >> 26);
+ dlog("Data abort: pc=0x%x, esr=0x%x, ec=0x%x", vcpu->regs.pc,
+ esr, esr >> 26);
if (!(esr & (1u << 10))) /* Check FnV bit. */
- dlog(", far=0x%x, hpfar=0x%x", read_msr(far_el2), read_msr(hpfar_el2) << 8);
+ dlog(", far=0x%x, hpfar=0x%x", read_msr(far_el2),
+ read_msr(hpfar_el2) << 8);
else
dlog(", far=invalid");
dlog("\n");
- for (;;);
+ for (;;)
+ ;
default:
- dlog("Unknown sync exception pc=0x%x, esr=0x%x, ec=0x%x\n", vcpu->regs.pc, esr, esr >> 26);
- for (;;);
+ dlog("Unknown sync exception pc=0x%x, esr=0x%x, ec=0x%x\n",
+ vcpu->regs.pc, esr, esr >> 26);
+ for (;;)
+ ;
}
return NULL;
diff --git a/src/arch/aarch64/inc/arch_api.h b/src/arch/aarch64/inc/arch_api.h
index 733d818..82b1429 100644
--- a/src/arch/aarch64/inc/arch_api.h
+++ b/src/arch/aarch64/inc/arch_api.h
@@ -1,6 +1,9 @@
#ifndef _ARCH_API_H
#define _ARCH_API_H
+/* Keep macro alignment */
+/* clang-format off */
+
/* Return values for vcpu_run() hypervisor call. */
#define HF_VCPU_YIELD 0x00
#define HF_VCPU_WAIT_FOR_INTERRUPT 0x01
@@ -11,4 +14,6 @@
#define HF_VM_GET_COUNT 0xff01
#define HF_VCPU_GET_COUNT 0xff02
-#endif /* _ARCH_API_H */
+/* clang-format on */
+
+#endif /* _ARCH_API_H */
diff --git a/src/arch/aarch64/inc/arch_barriers.h b/src/arch/aarch64/inc/arch_barriers.h
index 4d4cf08..1cf5224 100644
--- a/src/arch/aarch64/inc/arch_barriers.h
+++ b/src/arch/aarch64/inc/arch_barriers.h
@@ -16,4 +16,4 @@
__asm__ volatile("isb");
}
-#endif /* _ARCH_BARRIERS_H */
+#endif /* _ARCH_BARRIERS_H */
diff --git a/src/arch/aarch64/inc/arch_cpu.h b/src/arch/aarch64/inc/arch_cpu.h
index 9612078..e199b0d 100644
--- a/src/arch/aarch64/inc/arch_cpu.h
+++ b/src/arch/aarch64/inc/arch_cpu.h
@@ -63,21 +63,21 @@
__asm volatile("msr DAIFClr, #0xf");
}
-static inline
-void arch_regs_init(struct arch_regs *r, size_t pc, size_t arg, bool is_primary)
+static inline void arch_regs_init(struct arch_regs *r, size_t pc, size_t arg,
+ bool is_primary)
{
/* TODO: Use constant here. */
- r->spsr = 5 | /* M bits, set to EL1h. */
+ r->spsr = 5 | /* M bits, set to EL1h. */
(0xf << 6); /* DAIF bits set; disable interrupts. */
r->pc = pc;
r->r[0] = arg;
- r->lazy.hcr_el2 = (1u << 31) | /* RW bit. */
- (1u << 2) | /* PTW, Protected Table Walk. */
- (1u << 0); /* VM: enable stage-2 translation. */
+ r->lazy.hcr_el2 = (1u << 31) | /* RW bit. */
+ (1u << 2) | /* PTW, Protected Table Walk. */
+ (1u << 0); /* VM: enable stage-2 translation. */
if (!is_primary)
- r->lazy.hcr_el2 |= (7u << 3) | /* AMO, IMO, FMO bits. */
- (3u << 13); /* TWI, TWE bits. */
+ r->lazy.hcr_el2 |= (7u << 3) | /* AMO, IMO, FMO bits. */
+ (3u << 13); /* TWI, TWE bits. */
}
static inline void arch_regs_set_retval(struct arch_regs *r, size_t v)
@@ -124,9 +124,9 @@
static inline void arch_set_vm_mm(struct arch_page_table *table)
{
- __asm volatile("msr vttbr_el2, %0" : : "r" ((size_t)table));
+ __asm volatile("msr vttbr_el2, %0" : : "r"((size_t)table));
}
void arch_vptable_init(struct arch_page_table *table);
-#endif /* _ARCH_CPU_H */
+#endif /* _ARCH_CPU_H */
diff --git a/src/arch/aarch64/inc/arch_mm.h b/src/arch/aarch64/inc/arch_mm.h
index c65c488..4ea627d 100644
--- a/src/arch/aarch64/inc/arch_mm.h
+++ b/src/arch/aarch64/inc/arch_mm.h
@@ -180,12 +180,13 @@
for (it = begin; it < end; it += (1ull << (PAGE_BITS - 12)))
__asm__("tlbi ipas2e1, %0" : : "r"(it));
- __asm__ volatile("dsb ish\n"
- "tlbi vmalle1is\n"
- "dsb ish\n");
+ __asm__ volatile(
+ "dsb ish\n"
+ "tlbi vmalle1is\n"
+ "dsb ish\n");
}
uint64_t arch_mm_mode_to_attrs(int mode);
void arch_mm_init(paddr_t table);
-#endif /* _ARCH_MM_H */
+#endif /* _ARCH_MM_H */
diff --git a/src/arch/aarch64/inc/io.h b/src/arch/aarch64/inc/io.h
index 16f3112..45b2924 100644
--- a/src/arch/aarch64/inc/io.h
+++ b/src/arch/aarch64/inc/io.h
@@ -31,4 +31,4 @@
io_write(addr, v);
}
-#endif /* _IO_H */
+#endif /* _IO_H */
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 225f1ae..1331cbf 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -1,7 +1,10 @@
-#include "arch_cpu.h"
#include "mm.h"
+#include "arch_cpu.h"
#include "msr.h"
+/* Keep macro alignment */
+/* clang-format off */
+
#define NON_SHAREABLE 0ull
#define OUTER_SHAREABLE 2ull
#define INNER_SHAREABLE 3ull
@@ -51,23 +54,25 @@
#define STAGE2_ACCESS_READ 1ull
#define STAGE2_ACCESS_WRITE 2ull
+/* clang-format on */
+
void arch_vptable_init(struct arch_page_table *table)
{
uint64_t i;
/* TODO: Check each bit. */
for (i = 0; i < 512; i++) {
- table->entry0[i] = 1 |
- (i << 30) | /* Address */
- (1 << 10) | /* Access flag. */
- (0 << 8) | /* sh: non-shareable. this preserves EL1. */
- (3 << 6) | /* rw */
+ table->entry0[i] =
+ 1 | (i << 30) | /* Address */
+ (1 << 10) | /* Access flag. */
+ (0 << 8) | /* sh: non-shareable. this preserves EL1. */
+ (3 << 6) | /* rw */
(0xf << 2); /* normal mem; preserves EL0/1. */
- table->entry1[i] = 1 |
- ((i+512) << 30) | /* Address */
- (1 << 10) | /* Access flag. */
- (0 << 8) | /* sh: non-shareable. this preserves EL1. */
- (3 << 6) | /* rw */
+ table->entry1[i] =
+ 1 | ((i + 512) << 30) | /* Address */
+ (1 << 10) | /* Access flag. */
+ (0 << 8) | /* sh: non-shareable. this preserves EL1. */
+ (3 << 6) | /* rw */
(0xf << 2); /* normal mem; preserves EL0/1. */
table->first[i] = 0;
}
@@ -131,15 +136,14 @@
void arch_mm_init(paddr_t table)
{
- uint64_t v =
- (1u << 31) | /* RES1. */
- (4 << 16) | /* PS: 44 bits. */
- (0 << 14) | /* TG0: 4 KB granule. */
- (3 << 12) | /* SH0: inner shareable. */
- (1 << 10) | /* ORGN0: normal, cacheable ... */
- (1 << 8) | /* IRGN0: normal, cacheable ... */
- (2 << 6) | /* SL0: Start at level 0. */
- (20 << 0); /* T0SZ: 44-bit input address size. */
+ uint64_t v = (1u << 31) | /* RES1. */
+ (4 << 16) | /* PS: 44 bits. */
+ (0 << 14) | /* TG0: 4 KB granule. */
+ (3 << 12) | /* SH0: inner shareable. */
+ (1 << 10) | /* ORGN0: normal, cacheable ... */
+ (1 << 8) | /* IRGN0: normal, cacheable ... */
+ (2 << 6) | /* SL0: Start at level 0. */
+ (20 << 0); /* T0SZ: 44-bit input address size. */
write_msr(vtcr_el2, v);
/*
@@ -147,40 +151,37 @@
* 0xff -> Normal memory, Inner/Outer Write-Back Non-transient,
* Write-Alloc, Read-Alloc.
*/
- write_msr(mair_el2,
- (0 << (8 * STAGE1_DEVICEINDX)) |
- (0xff << (8 * STAGE1_NORMALINDX)));
+ write_msr(mair_el2, (0 << (8 * STAGE1_DEVICEINDX)) |
+ (0xff << (8 * STAGE1_NORMALINDX)));
write_msr(ttbr0_el2, table);
/*
* Configure tcr_el2.
*/
- v =
- (1 << 20) | /* TBI, top byte ignored. */
- (2 << 16) | /* PS, Physical Address Size, 40 bits, 1TB. */
- (0 << 14) | /* TG0, granule size, 4KB. */
- (3 << 12) | /* SH0, inner shareable. */
- (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */
- (1 << 8) | /* IRGN0, normal mem, WB RA WA Cacheable. */
- (25 << 0) | /* T0SZ, input address is 2^39 bytes. */
- 0;
+ v = (1 << 20) | /* TBI, top byte ignored. */
+ (2 << 16) | /* PS, Physical Address Size, 40 bits, 1TB. */
+ (0 << 14) | /* TG0, granule size, 4KB. */
+ (3 << 12) | /* SH0, inner shareable. */
+ (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */
+ (1 << 8) | /* IRGN0, normal mem, WB RA WA Cacheable. */
+ (25 << 0) | /* T0SZ, input address is 2^39 bytes. */
+ 0;
write_msr(tcr_el2, v);
- v =
- (1 << 0) | /* M, enable stage 1 EL2 MMU. */
- (1 << 1) | /* A, enable alignment check faults. */
- (1 << 2) | /* C, data cache enable. */
- (1 << 3) | /* SA, enable stack alignment check. */
- (3 << 4) | /* RES1 bits. */
- (1 << 11) | /* RES1 bit. */
- (1 << 12) | /* I, instruction cache enable. */
- (1 << 16) | /* RES1 bit. */
- (1 << 18) | /* RES1 bit. */
- (1 << 19) | /* WXN bit, writable execute never . */
- (3 << 22) | /* RES1 bits. */
- (3 << 28) | /* RES1 bits. */
- 0;
+ v = (1 << 0) | /* M, enable stage 1 EL2 MMU. */
+ (1 << 1) | /* A, enable alignment check faults. */
+ (1 << 2) | /* C, data cache enable. */
+ (1 << 3) | /* SA, enable stack alignment check. */
+ (3 << 4) | /* RES1 bits. */
+ (1 << 11) | /* RES1 bit. */
+ (1 << 12) | /* I, instruction cache enable. */
+ (1 << 16) | /* RES1 bit. */
+ (1 << 18) | /* RES1 bit. */
+ (1 << 19) | /* WXN bit, writable execute never . */
+ (3 << 22) | /* RES1 bits. */
+ (3 << 28) | /* RES1 bits. */
+ 0;
__asm volatile("dsb sy");
__asm volatile("isb");
diff --git a/src/arch/aarch64/msr.h b/src/arch/aarch64/msr.h
index e242cc2..30916db 100644
--- a/src/arch/aarch64/msr.h
+++ b/src/arch/aarch64/msr.h
@@ -3,17 +3,16 @@
#include <stddef.h>
-#define read_msr(name) \
- __extension__({ \
- size_t __v; \
- __asm volatile("mrs %0, " #name : "=r" (__v)); \
- __v; \
+#define read_msr(name) \
+ __extension__({ \
+ size_t __v; \
+ __asm volatile("mrs %0, " #name : "=r"(__v)); \
+ __v; \
})
-#define write_msr(name, value) \
- do { \
- __asm volatile("msr " #name ", %x0" \
- : : "rZ" ((size_t)value)); \
+#define write_msr(name, value) \
+ do { \
+ __asm volatile("msr " #name ", %x0" : : "rZ"((size_t)value)); \
} while (0)
-#endif /* _MSR_H */
+#endif /* _MSR_H */
diff --git a/src/arch/aarch64/pl011.c b/src/arch/aarch64/pl011.c
index 02df2eb..24a3460 100644
--- a/src/arch/aarch64/pl011.c
+++ b/src/arch/aarch64/pl011.c
@@ -20,7 +20,8 @@
arch_putchar('\r');
/* Wait until there is room in the tx buffer. */
- while (io_read(PL011_BASE + UARTFR) & UARTFR_TXFF);
+ while (io_read(PL011_BASE + UARTFR) & UARTFR_TXFF)
+ ;
dmb();
@@ -30,5 +31,6 @@
dmb();
/* Wait until the UART is no longer busy. */
- while (io_read_mb(PL011_BASE + UARTFR) & UARTFR_BUSY);
+ while (io_read_mb(PL011_BASE + UARTFR) & UARTFR_BUSY)
+ ;
}
diff --git a/src/cpio.c b/src/cpio.c
index c4add22..e8e95ec 100644
--- a/src/cpio.c
+++ b/src/cpio.c
@@ -32,8 +32,8 @@
iter->size_left = c->total_size;
}
-bool cpio_next(struct cpio_iter *iter, const char **name,
- const void **contents, size_t *size)
+bool cpio_next(struct cpio_iter *iter, const char **name, const void **contents,
+ size_t *size)
{
const struct cpio_header *h = iter->cur;
size_t size_left;
@@ -70,7 +70,8 @@
*size = filelen;
iter->cur = (struct cpio_header *)((char *)*contents + filelen);
- iter->cur = (struct cpio_header *)(char *)(((size_t)iter->cur + 1) & ~1);
+ iter->cur =
+ (struct cpio_header *)(char *)(((size_t)iter->cur + 1) & ~1);
iter->size_left = size_left;
return true;
diff --git a/src/dlog.c b/src/dlog.c
index c4d49ce..0af7eb5 100644
--- a/src/dlog.c
+++ b/src/dlog.c
@@ -1,13 +1,16 @@
#include "dlog.h"
+#include <stdarg.h>
#include <stdbool.h>
#include <stddef.h>
-#include <stdarg.h>
#include "arch.h"
#include "spinlock.h"
#include "std.h"
+/* Keep macro alignment */
+/* clang-format off */
+
#define FLAG_SPACE 0x01
#define FLAG_ZERO 0x02
#define FLAG_MINUS 0x04
@@ -16,6 +19,8 @@
#define FLAG_UPPER 0x20
#define FLAG_NEG 0x40
+/* clang-format on */
+
/*
* Prints a raw string to the debug log and returns its length.
*/
@@ -200,27 +205,23 @@
/* Handle the format specifier. */
switch (p[1]) {
- case 's':
- {
- char *str = va_arg(args, char *);
- print_string(str, str, w, flags, ' ');
- }
+ case 's': {
+ char *str = va_arg(args, char *);
+ print_string(str, str, w, flags, ' ');
p++;
- break;
+ } break;
case 'd':
- case 'i':
- {
- int v = va_arg(args, int);
- if (v < 0) {
- flags |= FLAG_NEG;
- v = -v;
- }
-
- print_num((size_t)v, 10, w, flags);
+ case 'i': {
+ int v = va_arg(args, int);
+ if (v < 0) {
+ flags |= FLAG_NEG;
+ v = -v;
}
+
+ print_num((size_t)v, 10, w, flags);
p++;
- break;
+ } break;
case 'X':
flags |= FLAG_UPPER;
diff --git a/src/fdt.c b/src/fdt.c
index d20b82e..162b606 100644
--- a/src/fdt.c
+++ b/src/fdt.c
@@ -78,8 +78,8 @@
return false;
}
-static bool fdt_tokenizer_bytes(struct fdt_tokenizer *t,
- const char **res, size_t size)
+static bool fdt_tokenizer_bytes(struct fdt_tokenizer *t, const char **res,
+ size_t size)
{
const char *next = t->cur + size;
if (next > t->end)
@@ -197,7 +197,8 @@
const char *name;
const char *buf;
uint32_t size;
- while (fdt_next_property(t, &name, &buf, &size));
+ while (fdt_next_property(t, &name, &buf, &size))
+ ;
}
static bool fdt_skip_node(struct fdt_tokenizer *t)
@@ -320,9 +321,11 @@
depth++;
while (fdt_next_property(&t, &name, &buf, &size)) {
size_t i;
- dlog("%*sproperty: \"%s\" (", 2 * depth, "", name);
+ dlog("%*sproperty: \"%s\" (", 2 * depth, "",
+ name);
for (i = 0; i < size; i++)
- dlog("%s%02x", i == 0 ? "" : " ", buf[i]);
+ dlog("%s%02x", i == 0 ? "" : " ",
+ buf[i]);
dlog(")\n");
}
}
@@ -338,9 +341,12 @@
dlog("fdt: off_mem_rsvmap=%u\n", be32toh(hdr->off_mem_rsvmap));
{
- struct fdt_reserve_entry *e = (struct fdt_reserve_entry *)((size_t)hdr + be32toh(hdr->off_mem_rsvmap));
+ struct fdt_reserve_entry *e =
+ (struct fdt_reserve_entry
+ *)((size_t)hdr + be32toh(hdr->off_mem_rsvmap));
while (e->address || e->size) {
- dlog("Entry: %p (0x%x bytes)\n", be64toh(e->address), be64toh(e->size));
+ dlog("Entry: %p (0x%x bytes)\n", be64toh(e->address),
+ be64toh(e->size));
e++;
}
}
@@ -351,10 +357,14 @@
/* TODO: Clean this up. */
char *begin = (char *)hdr + be32toh(hdr->off_mem_rsvmap);
struct fdt_reserve_entry *e = (struct fdt_reserve_entry *)begin;
- hdr->totalsize = htobe32(be32toh(hdr->totalsize) + sizeof(struct fdt_reserve_entry));
- hdr->off_dt_struct = htobe32(be32toh(hdr->off_dt_struct) + sizeof(struct fdt_reserve_entry));
- hdr->off_dt_strings = htobe32(be32toh(hdr->off_dt_strings) + sizeof(struct fdt_reserve_entry));
- memmove(begin + sizeof(struct fdt_reserve_entry), begin, be32toh(hdr->totalsize) - be32toh(hdr->off_mem_rsvmap));
+ hdr->totalsize = htobe32(be32toh(hdr->totalsize) +
+ sizeof(struct fdt_reserve_entry));
+ hdr->off_dt_struct = htobe32(be32toh(hdr->off_dt_struct) +
+ sizeof(struct fdt_reserve_entry));
+ hdr->off_dt_strings = htobe32(be32toh(hdr->off_dt_strings) +
+ sizeof(struct fdt_reserve_entry));
+ memmove(begin + sizeof(struct fdt_reserve_entry), begin,
+ be32toh(hdr->totalsize) - be32toh(hdr->off_mem_rsvmap));
e->address = htobe64(addr);
e->size = htobe64(len);
}
diff --git a/src/main.c b/src/main.c
index 0c18d79..e7639f5 100644
--- a/src/main.c
+++ b/src/main.c
@@ -45,7 +45,7 @@
}
static bool fdt_read_number(const struct fdt_node *node, const char *name,
- uint64_t *value)
+ uint64_t *value)
{
const char *data;
uint32_t size;
@@ -161,8 +161,8 @@
/* Traverse all memory ranges within this node. */
while (size >= entry_size) {
uint64_t addr = convert_number(data, address_size);
- uint64_t len = convert_number(data + address_size,
- size_size);
+ uint64_t len =
+ convert_number(data + address_size, size_size);
if (len > *block_size) {
/* Remember the largest range we've found. */
@@ -321,8 +321,8 @@
return false;
}
-static bool load_secondary(struct cpio *c,
- uint64_t mem_start, uint64_t *mem_size)
+static bool load_secondary(struct cpio *c, uint64_t mem_start,
+ uint64_t *mem_size)
{
struct memiter it;
struct memiter str;
@@ -335,10 +335,10 @@
return false;
}
- for (count = 0; memiter_parse_uint(&it, &mem) &&
- memiter_parse_uint(&it, &cpu) &&
- memiter_parse_str(&it, &str) &&
- count < MAX_VMS; count++) {
+ for (count = 0;
+ memiter_parse_uint(&it, &mem) && memiter_parse_uint(&it, &cpu) &&
+ memiter_parse_str(&it, &str) && count < MAX_VMS;
+ count++) {
struct memiter kernel;
if (!memiter_find_file(c, &str, &kernel)) {
@@ -353,7 +353,9 @@
}
if (mem < kernel.limit - kernel.next) {
- dlog("Kernel is larger than available memory for vm %u\n", count);
+ dlog("Kernel is larger than available memory for vm "
+ "%u\n",
+ count);
continue;
}
@@ -367,8 +369,8 @@
dlog("Loaded VM%u with %u vcpus, entry at 0x%x\n", count, cpu,
mem_start + *mem_size);
vm_init(secondary_vm + count, cpu);
- vm_start_vcpu(secondary_vm + count, 0,
- mem_start + *mem_size, 0, false);
+ vm_start_vcpu(secondary_vm + count, 0, mem_start + *mem_size, 0,
+ false);
}
secondary_vm_count = count;
@@ -439,28 +441,29 @@
if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
dlog("Unable to allocate memory for page table.\n");
- for (;;);
+ for (;;)
+ ;
}
dlog("text: 0x%x - 0x%x\n", text_begin, text_end);
dlog("rodata: 0x%x - 0x%x\n", rodata_begin, rodata_end);
dlog("data: 0x%x - 0x%x\n", data_begin, data_end);
- /* Map page for uart. */
- mm_ptable_map_page(&ptable, PL011_BASE, PL011_BASE,
+ /* Map page for uart. */
+ mm_ptable_map_page(&ptable, PL011_BASE, PL011_BASE,
MM_MODE_R | MM_MODE_W | MM_MODE_D | MM_MODE_NOSYNC |
- MM_MODE_STAGE1);
+ MM_MODE_STAGE1);
- /* Map each section. */
- mm_ptable_map(&ptable, (vaddr_t)text_begin, (vaddr_t)text_end,
+ /* Map each section. */
+ mm_ptable_map(&ptable, (vaddr_t)text_begin, (vaddr_t)text_end,
(paddr_t)text_begin,
MM_MODE_X | MM_MODE_NOSYNC | MM_MODE_STAGE1);
- mm_ptable_map(&ptable, (vaddr_t)rodata_begin, (vaddr_t)rodata_end,
+ mm_ptable_map(&ptable, (vaddr_t)rodata_begin, (vaddr_t)rodata_end,
(paddr_t)rodata_begin,
MM_MODE_R | MM_MODE_NOSYNC | MM_MODE_STAGE1);
- mm_ptable_map(&ptable, (vaddr_t)data_begin, (vaddr_t)data_end,
+ mm_ptable_map(&ptable, (vaddr_t)data_begin, (vaddr_t)data_end,
(paddr_t)data_begin,
MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC | MM_MODE_STAGE1);
@@ -476,8 +479,7 @@
/* Map in the fdt header. */
if (!mm_ptable_map(&ptable, (vaddr_t)fdt,
(vaddr_t)fdt + fdt_header_size(),
- (paddr_t)fdt,
- MM_MODE_R | MM_MODE_STAGE1)) {
+ (paddr_t)fdt, MM_MODE_R | MM_MODE_STAGE1)) {
dlog("Unable to map FDT header.\n");
break;
}
@@ -488,8 +490,7 @@
*/
if (!mm_ptable_map(&ptable, (vaddr_t)fdt,
(vaddr_t)fdt + fdt_total_size(fdt),
- (paddr_t)fdt,
- MM_MODE_R | MM_MODE_STAGE1)) {
+ (paddr_t)fdt, MM_MODE_R | MM_MODE_STAGE1)) {
dlog("Unable to map FDT.\n");
break;
}
@@ -515,11 +516,11 @@
cpio_init(&c, (void *)begin, end - begin);
/* Map the fdt in r/w mode in preparation for extending it. */
- if (!mm_ptable_map(&ptable, (vaddr_t)fdt,
- (vaddr_t)fdt + fdt_total_size(fdt) +
- PAGE_SIZE,
- (paddr_t)fdt,
- MM_MODE_R | MM_MODE_W | MM_MODE_STAGE1)) {
+ if (!mm_ptable_map(
+ &ptable, (vaddr_t)fdt,
+ (vaddr_t)fdt + fdt_total_size(fdt) + PAGE_SIZE,
+ (paddr_t)fdt,
+ MM_MODE_R | MM_MODE_W | MM_MODE_STAGE1)) {
dlog("Unable to map FDT in r/w mode.\n");
break;
}
@@ -532,9 +533,10 @@
mem_size - new_mem_size);
/* Unmap FDT. */
- if (!mm_ptable_unmap(&ptable, (vaddr_t)fdt,
- (vaddr_t)fdt + fdt_total_size(fdt) +
- PAGE_SIZE, MM_MODE_STAGE1)) {
+ if (!mm_ptable_unmap(
+ &ptable, (vaddr_t)fdt,
+ (vaddr_t)fdt + fdt_total_size(fdt) + PAGE_SIZE,
+ MM_MODE_STAGE1)) {
dlog("Unable to unmap the FDT.\n");
break;
}
diff --git a/src/mm.c b/src/mm.c
index 4a72d56..8bfd6f4 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -6,9 +6,14 @@
#include "alloc.h"
#include "dlog.h"
+/* Keep macro alignment */
+/* clang-format off */
+
#define MAP_FLAG_SYNC 0x01
#define MAP_FLAG_COMMIT 0x02
+/* clang-format on */
+
/**
* Calculates the size of the address space represented by a page table entry at
* the given level.
@@ -58,7 +63,7 @@
/* Allocate a new table. */
ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)(
- PAGE_SIZE, PAGE_SIZE);
+ PAGE_SIZE, PAGE_SIZE);
if (!ntable) {
dlog("Failed to allocate memory for page table\n");
return NULL;
@@ -142,12 +147,12 @@
mm_free_page_pte(pte, level, sync);
}
} else {
- pte_t *nt = mm_populate_table_pte(table + i, level,
- sync);
+ pte_t *nt =
+ mm_populate_table_pte(table + i, level, sync);
if (!nt)
return false;
- if (!mm_map_level(va, va_end, pa, attrs, nt, level-1,
+ if (!mm_map_level(va, va_end, pa, attrs, nt, level - 1,
flags))
return false;
}