[REFACTOR] Hafnium style fixes and other minor fixes.

Change-Id: I8f10a1d82f0de9efc43894a3a7cdd09bbbcfc6ec
diff --git a/build/image/image.ld b/build/image/image.ld
index f092458..c42ab5f 100644
--- a/build/image/image.ld
+++ b/build/image/image.ld
@@ -189,8 +189,8 @@
 	image_end = .;
 
 	/*
-	 * Calculate sizes of the the binary file and image loaded into memory
-	 * as well as the text, read-only and read-write data sections.
+	 * Calculate sizes of the binary file and image loaded into memory as
+	 * well as the text, read-only and read-write data sections.
 	 */
 	bin_size = ABSOLUTE(bin_end - ORIGIN_ADDRESS);
 	image_size = ABSOLUTE(image_end - ORIGIN_ADDRESS);
diff --git a/inc/hf/cpu.h b/inc/hf/cpu.h
index fe1c7f4..af7391f 100644
--- a/inc/hf/cpu.h
+++ b/inc/hf/cpu.h
@@ -18,7 +18,7 @@
 
 #include "hf/arch/cpu.h"
 
-/* TODO: Update alignment such that cpus are in different cache lines. */
+/* TODO: Fix alignment such that `cpu` structs are in different cache lines. */
 struct cpu {
 	/** CPU identifier. Doesn't have to be contiguous. */
 	cpu_id_t id;
@@ -29,7 +29,7 @@
 	/** See api.c for the partial ordering on locks. */
 	struct spinlock lock;
 
-	/** Determines whether or not the cpu is currently on. */
+	/** Determines whether the CPU is currently on. */
 	bool is_on;
 };
 
diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h
index 869bf23..87d5e8c 100644
--- a/inc/hf/vcpu.h
+++ b/inc/hf/vcpu.h
@@ -25,22 +25,22 @@
 #define INTERRUPT_REGISTER_BITS 32
 
 enum vcpu_state {
-	/** The vcpu is switched off. */
+	/** The vCPU is switched off. */
 	VCPU_STATE_OFF,
 
-	/** The vcpu is ready to be run. */
+	/** The vCPU is ready to be run. */
 	VCPU_STATE_READY,
 
-	/** The vcpu is currently running. */
+	/** The vCPU is currently running. */
 	VCPU_STATE_RUNNING,
 
-	/** The vcpu is waiting for a message. */
+	/** The vCPU is waiting for a message. */
 	VCPU_STATE_BLOCKED_MAILBOX,
 
-	/** The vcpu is waiting for an interrupt. */
+	/** The vCPU is waiting for an interrupt. */
 	VCPU_STATE_BLOCKED_INTERRUPT,
 
-	/** The vcpu has aborted. */
+	/** The vCPU has aborted. */
 	VCPU_STATE_ABORTED,
 };
 
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index bd1c0bd..f2455e2 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -52,7 +52,7 @@
 }
 
 /**
- * Returns the number of VCPUs configured in the given secondary VM.
+ * Returns the number of vCPUs configured in the given secondary VM.
  */
 static inline spci_vcpu_count_t hf_vcpu_get_count(spci_vm_id_t vm_id)
 {
@@ -70,7 +70,7 @@
 }
 
 /**
- * Hints that the vcpu is willing to yield its current use of the physical CPU.
+ * Hints that the vCPU is willing to yield its current use of the physical CPU.
  * This call always returns SPCI_SUCCESS.
  */
 static inline struct spci_value spci_yield(void)
@@ -140,7 +140,7 @@
  * If no message is immediately available and there are no enabled and pending
  * interrupts (irrespective of whether interrupts are enabled globally), then
  * this will block until a message is available or an enabled interrupt becomes
- * pending. This matches the behaviour of the WFI instruction on aarch64, except
+ * pending. This matches the behaviour of the WFI instruction on AArch64, except
  * that a message becoming available is also treated like a wake-up event.
  *
  * Returns:
@@ -277,7 +277,7 @@
  * SPCI interfaces.
  *
  * Returns:
- *  - SPCI_SUCCESS in .func if the the optional interface with function_id is
+ *  - SPCI_SUCCESS in .func if the optional interface with function_id is
  * implemented.
  *  - SPCI_ERROR in .func if the optional interface with function_id is not
  * implemented.
diff --git a/src/api.c b/src/api.c
index a5d6ef8..7aa573c 100644
--- a/src/api.c
+++ b/src/api.c
@@ -59,7 +59,7 @@
 }
 
 /**
- * Switches the physical CPU back to the corresponding vcpu of the primary VM.
+ * Switches the physical CPU back to the corresponding vCPU of the primary VM.
  *
  * This triggers the scheduling logic to run. Run in the context of secondary VM
  * to cause SPCI_RUN to return and the primary VM to regain control of the CPU.
@@ -110,7 +110,7 @@
 	/* Set the return value for the primary VM's call to HF_VCPU_RUN. */
 	arch_regs_set_retval(&next->regs, primary_ret);
 
-	/* Mark the current vcpu as waiting. */
+	/* Mark the current vCPU as waiting. */
 	sl_lock(&current->lock);
 	current->state = secondary_state;
 	sl_unlock(&current->lock);
@@ -119,7 +119,7 @@
 }
 
 /**
- * Returns to the primary vm and signals that the vcpu still has work to do so.
+ * Returns to the primary VM and signals that the vCPU still has work to do so.
  */
 struct vcpu *api_preempt(struct vcpu *current)
 {
@@ -132,7 +132,7 @@
 }
 
 /**
- * Puts the current vcpu in wait for interrupt mode, and returns to the primary
+ * Puts the current vCPU in wait for interrupt mode, and returns to the primary
  * vm.
  */
 struct vcpu *api_wait_for_interrupt(struct vcpu *current)
@@ -166,8 +166,8 @@
 }
 
 /**
- * Returns to the primary vm to allow this cpu to be used for other tasks as the
- * vcpu does not have work to do at this moment. The current vcpu is marked as
+ * Returns to the primary VM to allow this CPU to be used for other tasks as the
+ * vCPU does not have work to do at this moment. The current vCPU is marked as
  * ready to be scheduled again.
  */
 void api_yield(struct vcpu *current, struct vcpu **next)
@@ -178,7 +178,7 @@
 	};
 
 	if (current->vm->id == HF_PRIMARY_VM_ID) {
-		/* Noop on the primary as it makes the scheduling decisions. */
+		/* NOOP on the primary as it makes the scheduling decisions. */
 		return;
 	}
 
@@ -249,7 +249,7 @@
 {
 	struct vm *vm;
 
-	/* Only the primary VM needs to know about vcpus for scheduling. */
+	/* Only the primary VM needs to know about vCPUs for scheduling. */
 	if (current->vm->id != HF_PRIMARY_VM_ID) {
 		return 0;
 	}
@@ -264,8 +264,8 @@
 
 /**
  * This function is called by the architecture-specific context switching
- * function to indicate that register state for the given vcpu has been saved
- * and can therefore be used by other pcpus.
+ * function to indicate that register state for the given vCPU has been saved
+ * and can therefore be used by other pCPUs.
  */
 void api_regs_state_saved(struct vcpu *vcpu)
 {
@@ -375,7 +375,7 @@
 }
 
 /**
- * Prepares the vcpu to run by updating its state and fetching whether a return
+ * Prepares the vCPU to run by updating its state and fetching whether a return
  * value needs to be forced onto the vCPU.
  */
 static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
@@ -522,13 +522,13 @@
 	struct vcpu *vcpu;
 	struct spci_value ret = spci_error(SPCI_INVALID_PARAMETERS);
 
-	/* Only the primary VM can switch vcpus. */
+	/* Only the primary VM can switch vCPUs. */
 	if (current->vm->id != HF_PRIMARY_VM_ID) {
 		ret.arg2 = SPCI_DENIED;
 		goto out;
 	}
 
-	/* Only secondary VM vcpus can be run. */
+	/* Only secondary VM vCPUs can be run. */
 	if (vm_id == HF_PRIMARY_VM_ID) {
 		goto out;
 	}
@@ -1088,7 +1088,7 @@
 
 	/*
 	 * The primary VM will receive messages as a status code from running
-	 * vcpus and must not call this function.
+	 * vCPUs and must not call this function.
 	 */
 	if (vm->id == HF_PRIMARY_VM_ID) {
 		return spci_error(SPCI_NOT_SUPPORTED);
@@ -1377,7 +1377,7 @@
 	}
 
 	if (target_vcpu_idx >= target_vm->vcpu_count) {
-		/* The requested vcpu must exist. */
+		/* The requested vCPU must exist. */
 		return -1;
 	}
 
@@ -1387,7 +1387,7 @@
 
 	target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
 
-	dlog("Injecting IRQ %d for VM %d VCPU %d from VM %d VCPU %d\n", intid,
+	dlog("Injecting IRQ %d for VM %d vCPU %d from VM %d vCPU %d\n", intid,
 	     target_vm_id, target_vcpu_idx, current->vm->id, current->cpu->id);
 	return internal_interrupt_inject(target_vcpu, intid, current, next);
 }
diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
index 2f47c00..ed218cb 100644
--- a/src/arch/aarch64/hypervisor/exceptions.S
+++ b/src/arch/aarch64/hypervisor/exceptions.S
@@ -18,7 +18,7 @@
 #include "exception_macros.S"
 
 /**
- * Saves the volatile registers into the register buffer of the current vcpu.
+ * Saves the volatile registers into the register buffer of the current vCPU.
  */
 .macro save_volatile_to_vcpu
 	/*
@@ -27,7 +27,7 @@
 	 */
 	str x18, [sp, #-16]!
 
-	/* Get the current vcpu. */
+	/* Get the current vCPU. */
 	mrs x18, tpidr_el2
 	stp x0, x1, [x18, #VCPU_REGS + 8 * 0]
 	stp x2, x3, [x18, #VCPU_REGS + 8 * 2]
@@ -40,7 +40,7 @@
 	stp x16, x17, [x18, #VCPU_REGS + 8 * 16]
 	stp x29, x30, [x18, #VCPU_REGS + 8 * 29]
 
-	/* x18 was saved on the stack, so we move it to vcpu regs buffer. */
+	/* x18 was saved on the stack, so we move it to vCPU regs buffer. */
 	ldr x0, [sp], #16
 	str x0, [x18, #VCPU_REGS + 8 * 18]
 
@@ -52,11 +52,11 @@
 
 /**
  * This is a generic handler for exceptions taken at a lower EL. It saves the
- * volatile registers to the current vcpu and calls the C handler, which can
+ * volatile registers to the current vCPU and calls the C handler, which can
  * select one of two paths: (a) restore volatile registers and return, or
- * (b) switch to a different vcpu. In the latter case, the handler needs to save
+ * (b) switch to a different vCPU. In the latter case, the handler needs to save
  * all non-volatile registers (they haven't been saved yet), then restore all
- * registers from the new vcpu.
+ * registers from the new vCPU.
  */
 .macro lower_exception handler:req
 	save_volatile_to_vcpu
@@ -64,10 +64,10 @@
 	/* Call C handler. */
 	bl \handler
 
-	/* Switch vcpu if requested by handler. */
+	/* Switch vCPU if requested by handler. */
 	cbnz x0, vcpu_switch
 
-	/* vcpu is not changing. */
+	/* vCPU is not changing. */
 	mrs x0, tpidr_el2
 	b vcpu_restore_volatile_and_run
 .endm
@@ -90,10 +90,10 @@
 	mrs x0, esr_el2
 	bl sync_lower_exception
 
-	/* Switch vcpu if requested by handler. */
+	/* Switch vCPU if requested by handler. */
 	cbnz x0, vcpu_switch
 
-	/* vcpu is not changing. */
+	/* vCPU is not changing. */
 	mrs x0, tpidr_el2
 	b vcpu_restore_volatile_and_run
 .endm
@@ -180,7 +180,7 @@
 	 * can clobber non-volatile registers that are used by the msr/mrs,
 	 * which results in the wrong value being read or written.
 	 */
-	/* Get the current vcpu. */
+	/* Get the current vCPU. */
 	mrs x18, tpidr_el2
 	stp x19, x20, [x18, #VCPU_REGS + 8 * 19]
 	stp x21, x22, [x18, #VCPU_REGS + 8 * 21]
@@ -193,18 +193,18 @@
 	bl handle_system_register_access
 	cbnz x0, vcpu_switch
 
-	/* vcpu is not changing. */
+	/* vCPU is not changing. */
 	mrs x0, tpidr_el2
 	b vcpu_restore_nonvolatile_and_run
 
 /**
- * Switch to a new vcpu.
+ * Switch to a new vCPU.
  *
- * All volatile registers from the old vcpu have already been saved. We need
- * to save only non-volatile ones from the old vcpu, and restore all from the
+ * All volatile registers from the old vCPU have already been saved. We need
+ * to save only non-volatile ones from the old vCPU, and restore all from the
  * new one.
  *
- * x0 is a pointer to the new vcpu.
+ * x0 is a pointer to the new vCPU.
  */
 vcpu_switch:
 	/* Save non-volatile registers. */
@@ -320,7 +320,7 @@
 	mrs x4, fpcr
 	stp x3, x4, [x28], #32
 
-	/* Save new vcpu pointer in non-volatile register. */
+	/* Save new vCPU pointer in non-volatile register. */
 	mov x19, x0
 
 	/*
@@ -334,7 +334,7 @@
 	/* Intentional fallthrough. */
 .global vcpu_restore_all_and_run
 vcpu_restore_all_and_run:
-	/* Update pointer to current vcpu. */
+	/* Update pointer to current vCPU. */
 	msr tpidr_el2, x0
 
 	/* Restore peripheral registers. */
@@ -495,9 +495,9 @@
 
 	/* Intentional fallthrough. */
 /**
- * Restore volatile registers and run the given vcpu.
+ * Restore volatile registers and run the given vCPU.
  *
- * x0 is a pointer to the target vcpu.
+ * x0 is a pointer to the target vCPU.
  */
 vcpu_restore_volatile_and_run:
 	ldp x4, x5, [x0, #VCPU_REGS + 8 * 4]
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 21c6d94..53ae4d0 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -306,8 +306,8 @@
 			  args->arg4, args->arg5, args->arg6, args->arg7);
 
 	/*
-	 * Preserve the value passed by the caller, rather than the client_id we
-	 * generated. Note that this would also overwrite any return value that
+	 * Preserve the value passed by the caller, rather than the generated
+	 * client_id. Note that this would also overwrite any return value that
 	 * may be in x7, but the SMCs that we are forwarding are legacy calls
 	 * from before SMCCC 1.2 so won't have more than 4 return values anyway.
 	 */
@@ -731,7 +731,7 @@
 }
 
 /**
- * Handles EC = 011000, msr, mrs instruction traps.
+ * Handles EC = 011000, MSR, MRS instruction traps.
  * Returns non-null ONLY if the access failed and the vcpu is changing.
  */
 struct vcpu *handle_system_register_access(uintreg_t esr_el2)
diff --git a/src/arch/aarch64/hypervisor/vm.c b/src/arch/aarch64/hypervisor/vm.c
index 97c8746..619ee65 100644
--- a/src/arch/aarch64/hypervisor/vm.c
+++ b/src/arch/aarch64/hypervisor/vm.c
@@ -40,7 +40,7 @@
 		vm->arch.trapped_features |= HF_FEATURE_PERFMON;
 
 		/*
-		 * TODO(b/132395845):  Access to RAS registers is not trapped at
+		 * TODO(b/132395845): Access to RAS registers is not trapped at
 		 * the moment for the primary VM, only for the secondaries. RAS
 		 * register access isn't needed now, but it might be
 		 * required for debugging. When Hafnium introduces debug vs
diff --git a/src/cpu.c b/src/cpu.c
index 92d17fd..f8beed6 100644
--- a/src/cpu.c
+++ b/src/cpu.c
@@ -46,7 +46,7 @@
  * TOCTOU issues while Hafnium performs actions on information that would
  * otherwise be re-writable by the VM.
  *
- * Each buffer is owned by a single cpu. The buffer can only be used for
+ * Each buffer is owned by a single CPU. The buffer can only be used for
  * spci_msg_send. The information stored in the buffer is only valid during the
  * spci_msg_send request is performed.
  */
@@ -158,7 +158,7 @@
 }
 
 /**
- * Searches for a CPU based on its id.
+ * Searches for a CPU based on its ID.
  */
 struct cpu *cpu_find(cpu_id_t id)
 {
diff --git a/src/load.c b/src/load.c
index 9f311a9..d69b36d 100644
--- a/src/load.c
+++ b/src/load.c
@@ -234,7 +234,7 @@
 		goto out;
 	}
 
-	dlog("Loaded with %u vcpus, entry at %#x.\n",
+	dlog("Loaded with %u vCPUs, entry at %#x.\n",
 	     manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
 
 	vcpu = vm_get_vcpu(vm, 0);
diff --git a/src/vm.c b/src/vm.c
index 3d71c51..856509f 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -64,7 +64,7 @@
 		list_init(&vm->wait_entries[i].ready_links);
 	}
 
-	/* Do basic initialization of vcpus. */
+	/* Do basic initialization of vCPUs. */
 	for (i = 0; i < vcpu_count; i++) {
 		vcpu_init(vm_get_vcpu(vm, i), vm);
 	}
diff --git a/test/inc/test/vmapi/exception_handler.h b/test/inc/test/vmapi/exception_handler.h
index 1703af6..07ef312 100644
--- a/test/inc/test/vmapi/exception_handler.h
+++ b/test/inc/test/vmapi/exception_handler.h
@@ -26,8 +26,8 @@
 
 void exception_handler_reset(void);
 
-void exception_handler_send_num_exceptions(void);
+void exception_handler_send_exception_count(void);
 
-int exception_handler_receive_num_exceptions(
+int exception_handler_receive_exception_count(
 	const struct spci_value *send_res,
 	const struct spci_memory_region *recv_buf);
diff --git a/test/vmapi/common/exception_handler.c b/test/vmapi/common/exception_handler.c
index 87e61ec..c58a5b8 100644
--- a/test/vmapi/common/exception_handler.c
+++ b/test/vmapi/common/exception_handler.c
@@ -24,22 +24,22 @@
 /**
  * Tracks the number of times the exception handler has been invoked.
  */
-static int exception_handler_num_exceptions = 0;
+static int exception_handler_exception_count = 0;
 
 /**
  * Sends the number of exceptions handled to the Primary VM.
  */
-void exception_handler_send_num_exceptions(void)
+void exception_handler_send_exception_count(void)
 {
 	void *send_buf = SERVICE_SEND_BUFFER();
 
-	dlog("Sending num_exceptions %d to primary VM\n",
-	     exception_handler_num_exceptions);
+	dlog("Sending exception_count %d to primary VM\n",
+	     exception_handler_exception_count);
 	memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX,
-		 (const void *)&exception_handler_num_exceptions,
-		 sizeof(exception_handler_num_exceptions));
+		 (const void *)&exception_handler_exception_count,
+		 sizeof(exception_handler_exception_count));
 	EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID,
-				sizeof(exception_handler_num_exceptions), 0)
+				sizeof(exception_handler_exception_count), 0)
 			  .func,
 		  SPCI_SUCCESS_32);
 }
@@ -47,16 +47,16 @@
 /**
  * Receives the number of exceptions handled.
  */
-int exception_handler_receive_num_exceptions(
+int exception_handler_receive_exception_count(
 	const struct spci_value *send_res,
 	const struct spci_memory_region *recv_buf)
 {
-	int num_exceptions = *((const int *)recv_buf);
+	int exception_count = *((const int *)recv_buf);
 
 	EXPECT_EQ(send_res->func, SPCI_MSG_SEND_32);
-	EXPECT_EQ(spci_msg_send_size(*send_res), sizeof(num_exceptions));
+	EXPECT_EQ(spci_msg_send_size(*send_res), sizeof(exception_count));
 	EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
-	return num_exceptions;
+	return exception_count;
 }
 
 /**
@@ -66,7 +66,7 @@
 bool exception_handler_skip_instruction(void)
 {
 	dlog("%s function is triggered!\n", __func__);
-	++exception_handler_num_exceptions;
+	++exception_handler_exception_count;
 
 	/* Skip instruction that triggered the exception. */
 	uint64_t next_pc = read_msr(elr_el1);
@@ -84,9 +84,9 @@
 bool exception_handler_yield(void)
 {
 	dlog("%s function is triggered!\n", __func__);
-	++exception_handler_num_exceptions;
+	++exception_handler_exception_count;
 
-	exception_handler_send_num_exceptions();
+	exception_handler_send_exception_count();
 
 	/* Indicate that elr_el1 should not be restored. */
 	return true;
@@ -97,7 +97,7 @@
  */
 int exception_handler_get_num(void)
 {
-	return exception_handler_num_exceptions;
+	return exception_handler_exception_count;
 }
 
 /**
@@ -105,5 +105,5 @@
  */
 void exception_handler_reset(void)
 {
-	exception_handler_num_exceptions = 0;
+	exception_handler_exception_count = 0;
 }
diff --git a/test/vmapi/primary_only/faults.c b/test/vmapi/primary_only/faults.c
index d9c1b46..34b7e15 100644
--- a/test/vmapi/primary_only/faults.c
+++ b/test/vmapi/primary_only/faults.c
@@ -59,7 +59,7 @@
 	sl_init(&s.lock);
 	s.done = false;
 
-	/* Start secondary cpu while holding lock. */
+	/* Start secondary CPU while holding lock. */
 	sl_lock(&s.lock);
 	EXPECT_EQ(
 		hftest_cpu_start(hftest_get_cpu_id(1), other_stack,
diff --git a/test/vmapi/primary_only/primary_only.c b/test/vmapi/primary_only/primary_only.c
index 7b4bba1..ab26f1d 100644
--- a/test/vmapi/primary_only/primary_only.c
+++ b/test/vmapi/primary_only/primary_only.c
@@ -48,7 +48,7 @@
 }
 
 /**
- * Confirm the primary has at least one vcpu.
+ * Confirm the primary has at least one vCPU.
  */
 TEST(hf_vcpu_get_count, primary_has_at_least_one)
 {
@@ -56,7 +56,7 @@
 }
 
 /**
- * Confirm an error is returned when getting the vcpu count of a non-existant
+ * Confirm an error is returned when getting the vCPU count of a non-existent
  * VM.
  */
 TEST(hf_vcpu_get_count, no_secondary_vms)
@@ -65,7 +65,7 @@
 }
 
 /**
- * Confirm an error is returned when getting the vcpu count for a reserved ID.
+ * Confirm an error is returned when getting the vCPU count for a reserved ID.
  */
 TEST(hf_vcpu_get_count, reserved_vm_id)
 {
@@ -77,7 +77,7 @@
 }
 
 /**
- * Confirm an error is returned when getting the vcpu count of a VM with an ID
+ * Confirm an error is returned when getting the vCPU count of a VM with an ID
  * that is likely to be far outside the resource limit.
  */
 TEST(hf_vcpu_get_count, large_invalid_vm_id)
@@ -86,7 +86,7 @@
 }
 
 /**
- * Confirm it is an error when running a vcpu from the primary VM.
+ * Confirm it is an error when running a vCPU from the primary VM.
  */
 TEST(spci_run, cannot_run_primary)
 {
@@ -95,7 +95,7 @@
 }
 
 /**
- * Confirm it is an error when running a vcpu from a non-existant secondary VM.
+ * Confirm it is an error when running a vCPU from a non-existent secondary VM.
  */
 TEST(spci_run, cannot_run_absent_secondary)
 {
@@ -123,7 +123,7 @@
 }
 
 /**
- * Confirm a new cpu can be started to execute in parallel.
+ * Confirm a new CPU can be started to execute in parallel.
  */
 TEST(cpus, start)
 {
diff --git a/test/vmapi/primary_with_secondaries/boot.c b/test/vmapi/primary_with_secondaries/boot.c
index 7236b34..e098538 100644
--- a/test/vmapi/primary_with_secondaries/boot.c
+++ b/test/vmapi/primary_with_secondaries/boot.c
@@ -48,7 +48,7 @@
 	SERVICE_SELECT(SERVICE_VM1, "boot_memory_overrun", mb.send);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -63,6 +63,6 @@
 	SERVICE_SELECT(SERVICE_VM1, "boot_memory_underrun", mb.send);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
index 08a8f25..a697e06 100644
--- a/test/vmapi/primary_with_secondaries/memory_sharing.c
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -34,7 +34,7 @@
 static void check_cannot_send_memory(
 	struct mailbox_buffers mb, uint32_t mode,
 	struct spci_memory_region_constituent constituents[],
-	int num_constituents, int32_t avoid_vm)
+	int constituent_count, int32_t avoid_vm)
 
 {
 	enum spci_memory_access access[] = {SPCI_MEMORY_RO_NX, SPCI_MEMORY_RO_X,
@@ -68,7 +68,7 @@
 						spci_memory_region_init(
 							mb.send, vms[i],
 							constituents,
-							num_constituents, 0, 0,
+							constituent_count, 0, 0,
 							access[j],
 							SPCI_MEMORY_NORMAL_MEM,
 							cacheability[l],
@@ -84,7 +84,7 @@
 						spci_memory_region_init(
 							mb.send, vms[i],
 							constituents,
-							num_constituents, 0, 0,
+							constituent_count, 0, 0,
 							access[j],
 							SPCI_MEMORY_DEVICE_MEM,
 							device[l],
@@ -106,11 +106,11 @@
 static void check_cannot_lend_memory(
 	struct mailbox_buffers mb,
 	struct spci_memory_region_constituent constituents[],
-	int num_constituents, int32_t avoid_vm)
+	int constituent_count, int32_t avoid_vm)
 
 {
 	check_cannot_send_memory(mb, SPCI_MSG_SEND_LEGACY_MEMORY_LEND,
-				 constituents, num_constituents, avoid_vm);
+				 constituents, constituent_count, avoid_vm);
 }
 
 /**
@@ -119,11 +119,11 @@
 static void check_cannot_share_memory(
 	struct mailbox_buffers mb,
 	struct spci_memory_region_constituent constituents[],
-	int num_constituents, int32_t avoid_vm)
+	int constituent_count, int32_t avoid_vm)
 
 {
 	check_cannot_send_memory(mb, SPCI_MSG_SEND_LEGACY_MEMORY_SHARE,
-				 constituents, num_constituents, avoid_vm);
+				 constituents, constituent_count, avoid_vm);
 }
 
 /**
@@ -134,7 +134,7 @@
 static void check_cannot_donate_memory(
 	struct mailbox_buffers mb,
 	struct spci_memory_region_constituent constituents[],
-	int num_constituents, int32_t avoid_vm)
+	int constituent_count, int32_t avoid_vm)
 {
 	uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM1, SERVICE_VM2};
 
@@ -146,7 +146,7 @@
 			continue;
 		}
 		msg_size = spci_memory_region_init(
-			mb.send, vms[i], constituents, num_constituents, 0, 0,
+			mb.send, vms[i], constituents, constituent_count, 0, 0,
 			SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
 			SPCI_MEMORY_CACHE_WRITE_BACK,
 			SPCI_MEMORY_OUTER_SHAREABLE);
@@ -164,7 +164,7 @@
 static void check_cannot_relinquish_memory(
 	struct mailbox_buffers mb,
 	struct spci_memory_region_constituent constituents[],
-	int num_constituents)
+	int constituent_count)
 {
 	uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM1, SERVICE_VM2};
 
@@ -173,8 +173,9 @@
 	for (i = 0; i < ARRAY_SIZE(vms); ++i) {
 		for (j = 0; j < ARRAY_SIZE(vms); ++j) {
 			uint32_t msg_size = spci_memory_region_init(
-				mb.send, vms[i], constituents, num_constituents,
-				0, 0, SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+				mb.send, vms[i], constituents,
+				constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+				SPCI_MEMORY_NORMAL_MEM,
 				SPCI_MEMORY_CACHE_WRITE_BACK,
 				SPCI_MEMORY_OUTER_SHAREABLE);
 			EXPECT_SPCI_ERROR(
@@ -274,7 +275,7 @@
 	EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -340,7 +341,7 @@
 	}
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -417,7 +418,7 @@
 	EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -457,7 +458,7 @@
 	EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -547,7 +548,7 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -580,7 +581,7 @@
 	EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -613,7 +614,7 @@
 	EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -654,7 +655,7 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 
 	/* Use different memory regions for verifying the second constituent. */
@@ -677,7 +678,7 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -718,7 +719,7 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 
 	/* Use different memory regions for verifying the second constituent. */
@@ -745,7 +746,7 @@
 	 * NOTE: This generates two exceptions, one for the page fault, and one
 	 * for accessing a region past the lower bound.
 	 */
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  2);
 }
 
@@ -796,7 +797,7 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -846,7 +847,7 @@
 
 	/* Try to access memory in VM1. */
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 
 	/* Ensure that memory in VM2 remains the same. */
@@ -1208,7 +1209,7 @@
 	EXPECT_EQ(run_res.func, SPCI_YIELD_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -1281,7 +1282,7 @@
 	}
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -1353,7 +1354,7 @@
 	}
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -1404,7 +1405,7 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -1455,7 +1456,7 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -1798,7 +1799,7 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 
 	/* Use different memory regions for verifying the second constituent. */
@@ -1822,7 +1823,7 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM2, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -1864,7 +1865,7 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 
 	/* Use different memory regions for verifying the second constituent. */
@@ -1888,6 +1889,6 @@
 		  SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM2, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
diff --git a/test/vmapi/primary_with_secondaries/no_services.c b/test/vmapi/primary_with_secondaries/no_services.c
index 2e06c78..2757df2 100644
--- a/test/vmapi/primary_with_secondaries/no_services.c
+++ b/test/vmapi/primary_with_secondaries/no_services.c
@@ -52,7 +52,7 @@
 }
 
 /**
- * Confirm that secondary VM has 1 VCPU.
+ * Confirm that secondary VM has 1 vCPU.
  */
 TEST(hf_vcpu_get_count, secondary_has_one_vcpu)
 {
@@ -60,7 +60,7 @@
 }
 
 /**
- * Confirm an error is returned when getting the vcpu count for a reserved ID.
+ * Confirm an error is returned when getting the vCPU count for a reserved ID.
  */
 TEST(hf_vcpu_get_count, reserved_vm_id)
 {
@@ -72,7 +72,7 @@
 }
 
 /**
- * Confirm it is an error to query how many VCPUs are assigned to a nonexistent
+ * Confirm it is an error to query how many vCPUs are assigned to a nonexistent
  * secondary VM.
  */
 TEST(hf_vcpu_get_count, large_invalid_vm_id)
@@ -99,7 +99,7 @@
 }
 
 /**
- * Can only run a vcpu that exists.
+ * Can only run a vCPU that exists.
  */
 TEST(spci_run, cannot_run_absent_vcpu)
 {
@@ -124,7 +124,7 @@
 	hf_ipaddr_t unaligned_addr = (hf_ipaddr_t)&maybe_aligned[1];
 	hf_ipaddr_t aligned_addr = (hf_ipaddr_t)send_page;
 
-	/* Check the the address is unaligned. */
+	/* Check that the address is unaligned. */
 	ASSERT_EQ(unaligned_addr & 1, 1);
 
 	EXPECT_SPCI_ERROR(spci_rxtx_map(aligned_addr, unaligned_addr),
diff --git a/test/vmapi/primary_with_secondaries/services/BUILD.gn b/test/vmapi/primary_with_secondaries/services/BUILD.gn
index 71235d1..d4a78c2 100644
--- a/test/vmapi/primary_with_secondaries/services/BUILD.gn
+++ b/test/vmapi/primary_with_secondaries/services/BUILD.gn
@@ -14,7 +14,7 @@
 
 import("//build/image/image.gni")
 
-# Service to expose race conditions when running a vcpu.
+# Service to expose race conditions when running a vCPU.
 source_set("check_state") {
   testonly = true
   public_configs = [ "//test/hftest:hftest_config" ]
diff --git a/test/vmapi/primary_with_secondaries/services/check_state.c b/test/vmapi/primary_with_secondaries/services/check_state.c
index 1b8db1f..da27cc6 100644
--- a/test/vmapi/primary_with_secondaries/services/check_state.c
+++ b/test/vmapi/primary_with_secondaries/services/check_state.c
@@ -33,7 +33,7 @@
 }
 
 /**
- * This service repeatedly takes the following steps: sets the per-cpu pointer
+ * This service repeatedly takes the following steps: sets the per-CPU pointer
  * to some value, makes a hypervisor call, check that the value is still what it
  * was set to.
  *
diff --git a/test/vmapi/primary_with_secondaries/unmapped.c b/test/vmapi/primary_with_secondaries/unmapped.c
index 8b5af23..0b9c116 100644
--- a/test/vmapi/primary_with_secondaries/unmapped.c
+++ b/test/vmapi/primary_with_secondaries/unmapped.c
@@ -32,7 +32,7 @@
 	SERVICE_SELECT(SERVICE_VM1, "data_unmapped", mb.send);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -55,7 +55,7 @@
 	EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -70,7 +70,7 @@
 	SERVICE_SELECT(SERVICE_VM1, "instruction_unmapped", mb.send);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }
 
@@ -93,6 +93,6 @@
 	EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
 
 	run_res = spci_run(SERVICE_VM1, 0);
-	EXPECT_EQ(exception_handler_receive_num_exceptions(&run_res, mb.recv),
+	EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
 		  1);
 }