Enable MMU and caching in VM API tests

VM API tests are failing on real hardware because VMs are not seeing
data written by the hypervisor. The reason for this is that Hafnium has
data caching enabled while the test VMs do not. Solve this discrepancy
by enabling data caching in the VMs too, which requires enabling stage-1
MMU translation.

The entire address space is identity-mapped with read-write-execute
permisssions. Only GIC tests currently require custom device mappings.

Implementation shares ptable management code from src/mm.c and
src/arch/mm.c.

Bug: 138985026
Test: ./kokoro/ubuntu/build.sh
Change-Id: Ib9f599c448d70296a6ca869ddbb51abfcc55148d
diff --git a/build/toolchain/embedded.gni b/build/toolchain/embedded.gni
index d6a6102..a1eaeed 100644
--- a/build/toolchain/embedded.gni
+++ b/build/toolchain/embedded.gni
@@ -350,6 +350,9 @@
                              "toolchain_args",
                            ])
     cpu = "${invoker.cpu}+nofp"
+
+    # Add a macro so files can tell whether they are not being built for a VM.
+    extra_defines = " -DVM_TOOLCHAIN=0"
   }
 
   # Toolchain for building test VMs which run under Hafnium.
@@ -369,5 +372,8 @@
     # Nonsense values because they are required but shouldn't be used.
     heap_pages = 0
     max_vms = 0
+
+    # Add a macro so files can tell whether they are being built for a VM.
+    extra_defines = " -DVM_TOOLCHAIN=1"
   }
 }
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index f7c4d9d..8b2fb8f 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -32,7 +32,6 @@
 #define PAGE_SIZE (1 << PAGE_BITS)
 #define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
 
-
 /* The following are arch-independent page mapping modes. */
 #define MM_MODE_R 0x0001 /* read */
 #define MM_MODE_W 0x0002 /* write */
@@ -71,6 +70,10 @@
 #define MM_MODE_UNOWNED 0x0020
 #define MM_MODE_SHARED  0x0040
 
+#define MM_FLAG_COMMIT  0x01
+#define MM_FLAG_UNMAP   0x02
+#define MM_FLAG_STAGE1  0x04
+
 /* clang-format on */
 
 struct mm_page_table {
@@ -86,6 +89,9 @@
 	paddr_t root;
 };
 
+/** The type of addresses stored in the page table. */
+typedef uintvaddr_t ptable_addr_t;
+
 /** Represents the currently locked stage-1 page table of the hypervisor. */
 struct mm_stage1_locked {
 	struct mm_ptable *ptable;
@@ -93,6 +99,9 @@
 
 void mm_vm_enable_invalidation(void);
 
+bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool);
+ptable_addr_t mm_ptable_addr_space_end(int flags);
+
 bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
 void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
diff --git a/src/BUILD.gn b/src/BUILD.gn
index f5108ec..8c5b6e5 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -17,10 +17,8 @@
 
 # The hypervisor image.
 hypervisor("hafnium") {
-  sources = [
-    "layout.c",
-  ]
   deps = [
+    ":layout",
     ":src_not_testable_yet",
   ]
 }
@@ -50,8 +48,6 @@
     "abort.c",
     "api.c",
     "cpu.c",
-    "mm.c",
-    "mpool.c",
     "panic.c",
     "spci_architected_message.c",
     "vm.c",
@@ -61,6 +57,7 @@
     ":fdt",
     ":fdt_handler",
     ":memiter",
+    ":mm",
     ":std",
     "//src/arch/${plat_arch}/hypervisor",
     plat_console,
@@ -71,6 +68,19 @@
   }
 }
 
+source_set("layout") {
+  sources = [
+    "layout.c",
+  ]
+}
+
+source_set("mm") {
+  sources = [
+    "mm.c",
+    "mpool.c",
+  ]
+}
+
 # Standard library functions.
 source_set("std") {
   sources = [
diff --git a/src/arch/aarch64/hftest/BUILD.gn b/src/arch/aarch64/hftest/BUILD.gn
index 4c5dace..e5bcedc 100644
--- a/src/arch/aarch64/hftest/BUILD.gn
+++ b/src/arch/aarch64/hftest/BUILD.gn
@@ -79,3 +79,9 @@
     "//src/arch/aarch64/hftest:hf_call",
   ]
 }
+
+source_set("mm") {
+  sources = [
+    "mm.c",
+  ]
+}
diff --git a/src/arch/aarch64/hftest/mm.c b/src/arch/aarch64/hftest/mm.c
new file mode 100644
index 0000000..6673d90
--- /dev/null
+++ b/src/arch/aarch64/hftest/mm.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/mm.h"
+
+#include "hf/arch/barriers.h"
+
+#include "hf/dlog.h"
+
+#include "../msr.h"
+
+#define STAGE1_DEVICEINDX UINT64_C(0)
+#define STAGE1_NORMALINDX UINT64_C(1)
+
+/**
+ * Initialize MMU for a test running in EL1.
+ */
+bool arch_vm_mm_init(paddr_t table)
+{
+	static const int pa_bits_table[16] = {32, 36, 40, 42, 44, 48};
+	uint64_t features = read_msr(id_aa64mmfr0_el1);
+	uint64_t v;
+	int pa_bits = pa_bits_table[features & 0xf];
+
+	/* Check that 4KB granules are supported. */
+	if ((features >> 28) & 0xf) {
+		dlog("4KB granules are not supported\n");
+		return false;
+	}
+
+	/* Check the physical address range. */
+	if (!pa_bits) {
+		dlog("Unsupported value of id_aa64mmfr0_el1.PARange: %x\n",
+		     features & 0xf);
+		return false;
+	}
+
+	/*
+	 * 0    -> Device-nGnRnE memory
+	 * 0xff -> Normal memory, Inner/Outer Write-Back Non-transient,
+	 *         Write-Alloc, Read-Alloc.
+	 */
+	v = (0 << (8 * STAGE1_DEVICEINDX)) | (0xff << (8 * STAGE1_NORMALINDX));
+	write_msr(mair_el1, v);
+
+	write_msr(ttbr0_el1, pa_addr(table));
+
+	v = (1 << 20) |		       /* TBI, top byte ignored. */
+	    ((features & 0xf) << 16) | /* PS. */
+	    (0 << 14) |		       /* TG0, granule size, 4KB. */
+	    (3 << 12) |		       /* SH0, inner shareable. */
+	    (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */
+	    (1 << 8) |  /* IRGN0, normal mem, WB RA WA Cacheable. */
+	    (25 << 0) | /* T0SZ, input address is 2^39 bytes. */
+	    0;
+	write_msr(tcr_el1, v);
+
+	v = (1 << 0) |  /* M, enable stage 1 EL2 MMU. */
+	    (1 << 1) |  /* A, enable alignment check faults. */
+	    (1 << 2) |  /* C, data cache enable. */
+	    (1 << 3) |  /* SA, enable stack alignment check. */
+	    (3 << 4) |  /* RES1 bits. */
+	    (1 << 11) | /* RES1 bit. */
+	    (1 << 12) | /* I, instruction cache enable. */
+	    (1 << 16) | /* RES1 bit. */
+	    (1 << 18) | /* RES1 bit. */
+	    (0 << 19) | /* WXN bit, writable execute never. */
+	    (3 << 22) | /* RES1 bits. */
+	    (3 << 28) | /* RES1 bits. */
+	    0;
+
+	dsb(sy);
+	isb();
+	write_msr(sctlr_el1, v);
+	isb();
+
+	return true;
+}
diff --git a/src/arch/aarch64/inc/hf/arch/vm/mm.h b/src/arch/aarch64/inc/hf/arch/vm/mm.h
new file mode 100644
index 0000000..8bbb70e
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/vm/mm.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/mm.h"
+
+bool arch_vm_mm_init(paddr_t table);
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 191368e..1106aa6 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -266,14 +266,22 @@
 	 * there are too many, it is quicker to invalidate all TLB entries.
 	 */
 	if ((end - begin) > (MAX_TLBI_OPS * PAGE_SIZE)) {
-		tlbi(alle2);
+		if (VM_TOOLCHAIN == 1) {
+			tlbi(vmalle1is);
+		} else {
+			tlbi(alle2);
+		}
 	} else {
 		begin >>= 12;
 		end >>= 12;
 		/* Invalidate stage-1 TLB, one page from the range at a time. */
 		for (it = begin; it < end;
 		     it += (UINT64_C(1) << (PAGE_BITS - 12))) {
-			tlbi_reg(vae2is, it);
+			if (VM_TOOLCHAIN == 1) {
+				tlbi_reg(vae1is, it);
+			} else {
+				tlbi_reg(vae2is, it);
+			}
 		}
 	}
 
diff --git a/src/mm.c b/src/mm.c
index 2a85484..995e77d 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -32,9 +32,6 @@
  * contain only 1-1 mappings, aligned on the block boundaries.
  */
 
-/* The type of addresses stored in the page table. */
-typedef uintvaddr_t ptable_addr_t;
-
 /*
  * For stage 2, the input is an intermediate physical addresses rather than a
  * virtual address so:
@@ -46,15 +43,6 @@
 	"are the same size. It looks like that assumption might not be holding "
 	"so we need to check that everything is going to be ok.");
 
-/* Keep macro alignment */
-/* clang-format off */
-
-#define MM_FLAG_COMMIT       0x01
-#define MM_FLAG_UNMAP        0x02
-#define MM_FLAG_STAGE1       0x04
-
-/* clang-format on */
-
 static struct mm_ptable ptable;
 static struct spinlock ptable_lock;
 
@@ -210,9 +198,19 @@
 }
 
 /**
+ * Returns the first address which cannot be encoded in page tables given by
+ * `flags`. It is the exclusive end of the address space created by the tables.
+ */
+ptable_addr_t mm_ptable_addr_space_end(int flags)
+{
+	return mm_root_table_count(flags) *
+	       mm_entry_size(mm_max_level(flags) + 1);
+}
+
+/**
  * Initialises the given page table.
  */
-static bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool)
+bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool)
 {
 	uint8_t i;
 	size_t j;
@@ -492,8 +490,7 @@
 				      struct mpool *ppool)
 {
 	uint8_t root_level = mm_max_level(flags) + 1;
-	ptable_addr_t ptable_end =
-		mm_root_table_count(flags) * mm_entry_size(root_level);
+	ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
 	ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
 	ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
 
diff --git a/test/hftest/BUILD.gn b/test/hftest/BUILD.gn
index e08c62a..9a5b452 100644
--- a/test/hftest/BUILD.gn
+++ b/test/hftest/BUILD.gn
@@ -42,6 +42,7 @@
   ]
 
   deps = [
+    ":mm",
     "//src:dlog",
     "//src:memiter",
     "//src:panic",
@@ -91,6 +92,7 @@
 
   deps = [
     ":common",
+    ":mm",
     "//src:dlog",
     "//src:fdt",
     "//src:memiter",
@@ -116,3 +118,20 @@
     "//src:std",
   ]
 }
+
+source_set("mm") {
+  testonly = true
+
+  public_configs = [ ":hftest_config" ]
+
+  sources = [
+    "hftest_mm.c",
+  ]
+
+  deps = [
+    "//src:layout",
+    "//src:mm",
+    "//src/arch/${plat_arch}:arch",
+    "//src/arch/${plat_arch}/hftest:mm",
+  ]
+}
diff --git a/test/hftest/hftest_mm.c b/test/hftest/hftest_mm.c
new file mode 100644
index 0000000..36b977b
--- /dev/null
+++ b/test/hftest/hftest_mm.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/mm.h"
+#include "hf/arch/vm/power_mgmt.h"
+
+#include "hftest.h"
+
+/* Number of pages reserved for page tables. Increase if necessary. */
+#define PTABLE_PAGES 3
+
+alignas(alignof(struct mm_page_table)) static char ptable_buf
+	[sizeof(struct mm_page_table) * PTABLE_PAGES];
+
+static struct mpool ppool;
+static struct mm_ptable ptable;
+
+static struct mm_stage1_locked get_stage1_locked(void)
+{
+	return (struct mm_stage1_locked){.ptable = &ptable};
+}
+
+bool hftest_mm_init(void)
+{
+	struct mm_stage1_locked stage1_locked;
+
+	mpool_init(&ppool, sizeof(struct mm_page_table));
+	if (!mpool_add_chunk(&ppool, ptable_buf, sizeof(ptable_buf))) {
+		HFTEST_FAIL(true, "Failed to add buffer to page-table pool.");
+	}
+
+	if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, &ppool)) {
+		HFTEST_FAIL(true, "Unable to allocate memory for page table.");
+	}
+
+	stage1_locked = get_stage1_locked();
+	mm_identity_map(stage1_locked, pa_init(0),
+			pa_init(mm_ptable_addr_space_end(MM_FLAG_STAGE1)),
+			MM_MODE_R | MM_MODE_W | MM_MODE_X, &ppool);
+
+	return arch_vm_mm_init(ptable.root);
+}
+
+void hftest_mm_identity_map(const void *base, size_t size, int mode)
+{
+	struct mm_stage1_locked stage1_locked = get_stage1_locked();
+	paddr_t start = pa_from_va(va_from_ptr(base));
+	paddr_t end = pa_add(start, size);
+
+	if (mm_identity_map(stage1_locked, start, end, mode, &ppool) != base) {
+		FAIL("Could not add new page table mapping. Try increasing "
+		     "size of the page table buffer.");
+	}
+}
+
+struct cpu_start_state {
+	void (*entry)(uintptr_t arg);
+	uintreg_t arg;
+	struct spinlock lock;
+};
+
+static void cpu_entry(uintptr_t arg)
+{
+	struct cpu_start_state *s = (struct cpu_start_state *)arg;
+	struct cpu_start_state local = *s;
+
+	sl_unlock(&s->lock);
+	ASSERT_TRUE(arch_vm_mm_init(ptable.root));
+	local.entry(local.arg);
+}
+
+bool hftest_cpu_start(uintptr_t id, void *stack, size_t stack_size,
+		      void (*entry)(uintptr_t arg), uintptr_t arg)
+{
+	struct cpu_start_state s =
+		(struct cpu_start_state){.entry = entry, .arg = arg};
+
+	sl_init(&s.lock);
+	sl_lock(&s.lock);
+	if (!cpu_start(id, stack, stack_size, &cpu_entry, (uintptr_t)&s)) {
+		return false;
+	}
+
+	/* Wait until cpu_entry unlocks the lock before freeing stack memory. */
+	sl_lock(&s.lock);
+	return true;
+}
diff --git a/test/hftest/hftest_service.c b/test/hftest/hftest_service.c
index 5b9fafb..72efa7d 100644
--- a/test/hftest/hftest_service.c
+++ b/test/hftest/hftest_service.c
@@ -18,6 +18,7 @@
 #include <stdint.h>
 
 #include "hf/memiter.h"
+#include "hf/mm.h"
 #include "hf/spci.h"
 #include "hf/std.h"
 
@@ -82,6 +83,17 @@
 	hftest_test_fn service;
 	struct hftest_context *ctx;
 
+	/*
+	 * Initialize the stage-1 MMU and identity-map the entire address space.
+	 */
+	if (!hftest_mm_init()) {
+		HFTEST_LOG_FAILURE();
+		HFTEST_LOG(HFTEST_LOG_INDENT "Memory initialization failed");
+		for (;;) {
+			/* Hang if memory init failed. */
+		}
+	}
+
 	struct spci_message *recv_msg = (struct spci_message *)recv;
 
 	/* Prepare the context. */
diff --git a/test/hftest/inc/hftest.h b/test/hftest/inc/hftest.h
index bcffe5c..62eb84c 100644
--- a/test/hftest/inc/hftest.h
+++ b/test/hftest/inc/hftest.h
@@ -90,6 +90,20 @@
  */
 #define HFTEST_LOG_INDENT "    "
 
+/** Initializes stage-1 MMU for tests running in a VM. */
+bool hftest_mm_init(void);
+
+/** Adds stage-1 identity mapping for pages covering bytes [base, base+size). */
+void hftest_mm_identity_map(const void *base, size_t size, int mode);
+
+/**
+ * Starts the CPU with the given ID. It will start at the provided entry point
+ * with the provided argument. It is a wrapper around the generic cpu_start()
+ * and takes care of MMU initialization.
+ */
+bool hftest_cpu_start(uintptr_t id, void *stack, size_t stack_size,
+		      void (*entry)(uintptr_t arg), uintptr_t arg);
+
 uintptr_t hftest_get_cpu_id(size_t index);
 
 /* Above this point is the public API. Now include the implementation. */
diff --git a/test/hftest/standalone_main.c b/test/hftest/standalone_main.c
index c1bfa1a..63edf1e 100644
--- a/test/hftest/standalone_main.c
+++ b/test/hftest/standalone_main.c
@@ -19,6 +19,7 @@
 
 #include "hf/fdt.h"
 #include "hf/memiter.h"
+#include "hf/mm.h"
 
 #include "hftest.h"
 #include "hftest_common.h"
@@ -36,6 +37,14 @@
 	struct memiter bootargs_iter;
 	struct memiter command;
 
+	/*
+	 * Initialize the stage-1 MMU and identity-map the entire address space.
+	 */
+	if ((VM_TOOLCHAIN == 1) && !hftest_mm_init()) {
+		HFTEST_LOG("Memory initialization failed.");
+		return;
+	}
+
 	hftest_use_list(hftest_begin, hftest_end - hftest_begin);
 
 	if (!fdt_root_node(&n, fdt)) {
diff --git a/test/vmapi/gicv3/gicv3.c b/test/vmapi/gicv3/gicv3.c
index 571f978..47923e0 100644
--- a/test/vmapi/gicv3/gicv3.c
+++ b/test/vmapi/gicv3/gicv3.c
@@ -50,6 +50,11 @@
 
 void system_setup()
 {
+	const int mode = MM_MODE_R | MM_MODE_W | MM_MODE_D;
+	hftest_mm_identity_map((void *)GICD_BASE, PAGE_SIZE, mode);
+	hftest_mm_identity_map((void *)GICR_BASE, PAGE_SIZE, mode);
+	hftest_mm_identity_map((void *)SGI_BASE, PAGE_SIZE, mode);
+
 	exception_setup(irq);
 	interrupt_gic_setup();
 }
diff --git a/test/vmapi/primary_only/faults.c b/test/vmapi/primary_only/faults.c
index 1adeb5b..bf56b15 100644
--- a/test/vmapi/primary_only/faults.c
+++ b/test/vmapi/primary_only/faults.c
@@ -61,9 +61,10 @@
 
 	/* Start secondary cpu while holding lock. */
 	sl_lock(&s.lock);
-	EXPECT_EQ(cpu_start(hftest_get_cpu_id(1), other_stack,
-			    sizeof(other_stack), rx_reader, (uintptr_t)&s),
-		  true);
+	EXPECT_EQ(
+		hftest_cpu_start(hftest_get_cpu_id(1), other_stack,
+				 sizeof(other_stack), rx_reader, (uintptr_t)&s),
+		true);
 
 	/* Wait for CPU to release the lock. */
 	sl_lock(&s.lock);
diff --git a/test/vmapi/primary_only/primary_only.c b/test/vmapi/primary_only/primary_only.c
index a7b29c8..4bede95 100644
--- a/test/vmapi/primary_only/primary_only.c
+++ b/test/vmapi/primary_only/primary_only.c
@@ -117,10 +117,10 @@
 
 	/* Start secondary while holding lock. */
 	sl_lock(&lock);
-	EXPECT_EQ(
-		cpu_start(hftest_get_cpu_id(1), other_stack,
-			  sizeof(other_stack), vm_cpu_entry, (uintptr_t)&lock),
-		true);
+	EXPECT_EQ(hftest_cpu_start(hftest_get_cpu_id(1), other_stack,
+				   sizeof(other_stack), vm_cpu_entry,
+				   (uintptr_t)&lock),
+		  true);
 
 	/* Wait for CPU to release the lock. */
 	sl_lock(&lock);
diff --git a/test/vmapi/primary_with_secondaries/run_race.c b/test/vmapi/primary_with_secondaries/run_race.c
index 49c95e3..63ba81d 100644
--- a/test/vmapi/primary_with_secondaries/run_race.c
+++ b/test/vmapi/primary_with_secondaries/run_race.c
@@ -88,8 +88,8 @@
 	SERVICE_SELECT(SERVICE_VM0, "check_state", mb.send);
 
 	/* Start second vCPU. */
-	ASSERT_TRUE(cpu_start(hftest_get_cpu_id(1), stack, sizeof(stack),
-			      vm_cpu_entry, (uintptr_t)&mb));
+	ASSERT_TRUE(hftest_cpu_start(hftest_get_cpu_id(1), stack, sizeof(stack),
+				     vm_cpu_entry, (uintptr_t)&mb));
 
 	/* Run on a loop until the secondary VM is done. */
 	EXPECT_TRUE(run_loop(&mb));
diff --git a/test/vmapi/primary_with_secondaries/services/abort.c b/test/vmapi/primary_with_secondaries/services/abort.c
index e37d7e9..3d11bd2 100644
--- a/test/vmapi/primary_with_secondaries/services/abort.c
+++ b/test/vmapi/primary_with_secondaries/services/abort.c
@@ -49,12 +49,18 @@
 
 TEST_SERVICE(straddling_instruction_abort)
 {
+	/*
+	 * Get a function pointer which, when branched to, will attempt to
+	 * execute a 4-byte instruction straddling two pages.
+	 */
 	int (*f)(void) = (int (*)(void))(&pages[PAGE_SIZE - 2]);
 
-	/* Give some memory to the primary VM so that it's unmapped. */
+	/* Give second page to the primary VM so that it's unmapped. */
 	ASSERT_EQ(hf_share_memory(HF_PRIMARY_VM_ID,
 				  (hf_ipaddr_t)(&pages[PAGE_SIZE]), PAGE_SIZE,
 				  HF_MEMORY_GIVE),
 		  0);
+
+	/* Branch to instruction whose 2 bytes are now in an unmapped page. */
 	f();
 }
diff --git a/test/vmapi/primary_with_secondaries/services/smp.c b/test/vmapi/primary_with_secondaries/services/smp.c
index 863919d..44dd505 100644
--- a/test/vmapi/primary_with_secondaries/services/smp.c
+++ b/test/vmapi/primary_with_secondaries/services/smp.c
@@ -73,8 +73,8 @@
 
 	/* Start second vCPU. */
 	dlog("Secondary starting second vCPU.\n");
-	ASSERT_TRUE(
-		cpu_start(1, stack, sizeof(stack), vm_cpu_entry, ARG_VALUE));
+	ASSERT_TRUE(hftest_cpu_start(1, stack, sizeof(stack), vm_cpu_entry,
+				     ARG_VALUE));
 	dlog("Secondary started second vCPU.\n");
 
 	/* Check that vCPU statuses are as expected. */