Move driver/linux into the main repository rather than a submodule.
Change-Id: I3e49fbd0b52379ff1729eefc31467fe8487529e4
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..c83c7f7
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,24 @@
+BasedOnStyle: Google
+
+IndentWidth: 8
+ContinuationIndentWidth: 8
+UseTab: Always
+
+BreakBeforeBraces: Linux
+AllowShortFunctionsOnASingleLine: None
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+IndentCaseLabels: false
+
+IncludeBlocks: Regroup
+IncludeCategories:
+ - Regex: '^<[[:alnum:].]+>'
+ Priority: 1
+ - Regex: '^"hf/arch/'
+ Priority: 2
+ - Regex: '^"hf/'
+ Priority: 3
+ - Regex: '^"vmapi/'
+ Priority: 4
+ - Regex: '.*'
+ Priority: 5
diff --git a/.clang-tidy b/.clang-tidy
new file mode 100644
index 0000000..38278ee
--- /dev/null
+++ b/.clang-tidy
@@ -0,0 +1,4 @@
+Checks: 'readability-*,portability-*,performance-*,misc-*,bugprone-*,modernize-*,google-runtime-int,-modernize-deprecated-headers,-clang-analyzer-valist.Uninitialized,-readability-magic-numbers'
+HeaderFilterRegex: '^(?!third_party).+'
+FormatStyle: file
+WarningsAsErrors: '*'
diff --git a/.gitignore b/.gitignore
index 8fd2089..9e90f42 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,9 @@
-*.ko
-*.mod.c
-*.o
-.*o.cmd
-.tmp_versions/
-Module.symvers
-modules.order
+out/
+.repo/
+
+# Ignore project/ subfolders which are not part of the public Hafnium tree.
+# When these are checked out using a `repo` manifest Git otherwise treats them
+# as untracked files and the repository as dirty.
+!project/
+project/*/
+!project/reference/
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..13fb56f
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,16 @@
+[submodule "prebuilts"]
+ path = prebuilts
+ url = https://hafnium.googlesource.com/hafnium/prebuilts
+ shallow = true
+[submodule "project/reference"]
+ path = project/reference
+ url = https://hafnium.googlesource.com/hafnium/project/reference
+ shallow = true
+[submodule "third_party/googletest"]
+ path = third_party/googletest
+ url = https://hafnium.googlesource.com/hafnium/third_party/googletest
+ shallow = true
+[submodule "third_party/linux"]
+ path = third_party/linux
+ url = https://hafnium.googlesource.com/hafnium/third_party/linux
+ shallow = true
diff --git a/.gn b/.gn
new file mode 100644
index 0000000..b5eecc7
--- /dev/null
+++ b/.gn
@@ -0,0 +1,19 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The location of the build configuration file.
+buildconfig = "//build/BUILDCONFIG.gn"
+
+# Force use of Python 3
+script_executable = "python3"
diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json
new file mode 100644
index 0000000..6a99fb2
--- /dev/null
+++ b/.vscode/c_cpp_properties.json
@@ -0,0 +1,17 @@
+{
+ "configurations": [
+ {
+ "name": "Linux",
+ "includePath": [
+ "${workspaceFolder}/**"
+ ],
+ "defines": [],
+ "compilerPath": "${workspaceFolder}/prebuilts/linux-x64/clang/bin/clang",
+ "cStandard": "c11",
+ "cppStandard": "c++17",
+ "intelliSenseMode": "clang-x64",
+ "compileCommands": "${workspaceFolder}/out/reference/compile_commands.json"
+ }
+ ],
+ "version": 4
+}
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 0000000..1e0e217
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,29 @@
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "(gdb) QEMU",
+ "type": "cppdbg",
+ "request": "launch",
+ "program": "out/reference/qemu_aarch64_clang/hafnium.elf",
+ "miDebuggerServerAddress": "localhost:1234",
+ "MIMode": "gdb",
+ "miDebuggerPath": "/usr/bin/gdb-multiarch",
+ "cwd": "${workspaceRoot}",
+ "setupCommands": [
+ {
+ "description": "Enable pretty-printing for gdb",
+ "text": "-enable-pretty-printing",
+ "ignoreFailures": true
+ },
+ {
+ // Update this with whatever VMs you want to debug.
+ "text": "add-symbol-file ${workspaceRoot}/out/reference/qemu_aarch64_vm_clang/obj/test/vmapi/primary_with_secondaries/services/service_vm3.elf 0x43d00000+0xc4",
+ }
+ ]
+ },
+ ]
+}
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000..3475316
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,24 @@
+{
+ "files.associations": {
+ "constants.h": "c",
+ "hftest.h": "c",
+ "primary_with_secondary.h": "c",
+ "cpu.h": "c",
+ "call.h": "c",
+ "mpool.h": "c",
+ "atomic": "c",
+ "assert.h": "c",
+ "dlog.h": "c",
+ "stdarg.h": "c",
+ "primary_only.h": "c",
+ "fdt_handler.h": "c",
+ "spinlock.h": "c",
+ "offsets.h": "c",
+ "barriers.h": "c",
+ "spci.h": "c",
+ "spci_internal.h": "c",
+ "interrupts_gicv3.h": "c",
+ "interrupts.h": "c"
+ },
+ "C_Cpp.errorSquiggles": "Disabled"
+}
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
new file mode 100644
index 0000000..23d9330
--- /dev/null
+++ b/.vscode/tasks.json
@@ -0,0 +1,51 @@
+{
+ // See https://go.microsoft.com/fwlink/?LinkId=733558
+ // for the documentation about the tasks.json format
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": "make",
+ "type": "shell",
+ "command": "make",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
+ "problemMatcher": []
+ },
+ {
+ "label": "format",
+ "type": "shell",
+ "command": "make format",
+ "problemMatcher": []
+ },
+ {
+ "label": "check",
+ "type": "shell",
+ "command": "make check",
+ "problemMatcher": []
+ },
+ {
+ "label": "build.sh",
+ "type": "shell",
+ "command": "kokoro/ubuntu/build.sh",
+ "problemMatcher": []
+ },
+ {
+ "label": "test",
+ "type": "shell",
+ "command": "make && kokoro/ubuntu/test.sh"
+ },
+ {
+ "label": "test on FVP",
+ "type": "shell",
+ "command": "make && kokoro/ubuntu/test.sh --fvp"
+ },
+ {
+ "label": "push",
+ "type": "shell",
+ "command": "git push origin HEAD:refs/for/master",
+ "problemMatcher": []
+ }
+ ]
+}
diff --git a/AUTHORS b/AUTHORS
index 7c577d2..2d9ea28 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -5,4 +5,5 @@
# of contributors, see the revision history in source control.
#
# Please keep the list sorted alphabetically.
+Arm Ltd.
Google LLC
diff --git a/BUILD.gn b/BUILD.gn
index 6c7d688..7b1bb73 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2019 The Hafnium Authors.
+# Copyright 2018 The Hafnium Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,10 +12,35 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import("//build/linux/linux.gni")
+# The root of the build redirects to a project build file so each project can
+# select the artifacts it needs to build.
-linux_kernel_module("linux") {
- module_name = "hafnium"
- kernel_target = "//third_party/linux:linux"
- kernel_dir = "//third_party/linux"
+# The root of the build.
+group("root") {
+ deps = [
+ "//project/${project}:root",
+ ]
+}
+
+# The root of the build for test artifacts.
+group("test_root") {
+ testonly = true
+
+ deps = [
+ "//project/${project}:test_root",
+ ]
+}
+
+group("update_prebuilts") {
+ deps = [
+ "//third_party/linux",
+ ]
+}
+
+group("default") {
+ testonly = true
+ deps = [
+ ":root",
+ ":test_root",
+ ]
}
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..1efa7e5
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,54 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution;
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to <https://cla.developers.google.com/> to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Style guide
+
+Submissions should follow the Hafnium [style guide](docs/StyleGuide.md).
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use [Gerrit](https://hafnium-review.googlesource.com) for this purpose.
+
+To submit a change:
+
+1. Create an account in the
+ [Gerrit UI](https://hafnium-review.googlesource.com).
+2. Follow the [getting started](docs/GettingStarted.md) instructions to clone
+ the Hafnium repositories and set up the necessary commit hook.
+3. Make your change.
+4. Run our autoformatter with `make format`.
+5. Commit as usual. If you make a change in a submodule you will also need to
+ commit a change in the main repository to update the submodule version.
+6. Run the [tests](docs/Testing.md) and other presubmit checks with
+ `kokoro/ubuntu/build.sh`, ensure they all pass.
+7. Upload the change to Gerrit with `git push origin HEAD:refs/for/master`. If
+ you have changed submodules then you'll need to push them as well.
+8. If you changed submodules, then add a matching 'topic' from the Gerrit UI
+ for all your changes (submodules and the main repository) so that they can
+ be reviewed and submitted together.
+9. Wait 10-15 minutes for our presubmit tests to run, and make sure a 'Kokoro
+ +1' comment shows up in Gerrit indicating that they have passed. If not,
+ follow the links to find the errors, fix them and try again.
+10. From the Gerrit UI add one or more reviewers. Looking at who has modified
+ the same files frequently recently is usually a good way to pick a reviewer,
+ but if you're not sure then you can add hafnium-team@google.com.
+
+## Community Guidelines
+
+This project follows
+[Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
diff --git a/LICENSE b/LICENSE
index d159169..d645695 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,339 +1,202 @@
- GNU GENERAL PUBLIC LICENSE
- Version 2, June 1991
- Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
- Preamble
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
- The licenses for most software are designed to take away your
-freedom to share and change it. By contrast, the GNU General Public
-License is intended to guarantee your freedom to share and change free
-software--to make sure the software is free for all its users. This
-General Public License applies to most of the Free Software
-Foundation's software and to any other program whose authors commit to
-using it. (Some other Free Software Foundation software is covered by
-the GNU Lesser General Public License instead.) You can apply it to
-your programs, too.
+ 1. Definitions.
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-this service if you wish), that you receive source code or can get it
-if you want it, that you can change the software or use pieces of it
-in new free programs; and that you know you can do these things.
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
- To protect your rights, we need to make restrictions that forbid
-anyone to deny you these rights or to ask you to surrender the rights.
-These restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must give the recipients all the rights that
-you have. You must make sure that they, too, receive or can get the
-source code. And you must show them these terms so they know their
-rights.
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
- We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
- Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software. If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
- Finally, any free program is threatened constantly by software
-patents. We wish to avoid the danger that redistributors of a free
-program will individually obtain patent licenses, in effect making the
-program proprietary. To prevent this, we have made it clear that any
-patent must be licensed for everyone's free use or not licensed at all.
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
- The precise terms and conditions for copying, distribution and
-modification follow.
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
- GNU GENERAL PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
- 0. This License applies to any program or other work which contains
-a notice placed by the copyright holder saying it may be distributed
-under the terms of this General Public License. The "Program", below,
-refers to any such program or work, and a "work based on the Program"
-means either the Program or any derivative work under copyright law:
-that is to say, a work containing the Program or a portion of it,
-either verbatim or with modifications and/or translated into another
-language. (Hereinafter, translation is included without limitation in
-the term "modification".) Each licensee is addressed as "you".
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of
-running the Program is not restricted, and the output from the Program
-is covered only if its contents constitute a work based on the
-Program (independent of having been made by running the Program).
-Whether that is true depends on what the Program does.
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
- 1. You may copy and distribute verbatim copies of the Program's
-source code as you receive it, in any medium, provided that you
-conspicuously and appropriately publish on each copy an appropriate
-copyright notice and disclaimer of warranty; keep intact all the
-notices that refer to this License and to the absence of any warranty;
-and give any other recipients of the Program a copy of this License
-along with the Program.
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
- 2. You may modify your copy or copies of the Program or any portion
-of it, thus forming a work based on the Program, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
- a) You must cause the modified files to carry prominent notices
- stating that you changed the files and the date of any change.
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
- b) You must cause any work that you distribute or publish, that in
- whole or in part contains or is derived from the Program or any
- part thereof, to be licensed as a whole at no charge to all third
- parties under the terms of this License.
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
- c) If the modified program normally reads commands interactively
- when run, you must cause it, when started running for such
- interactive use in the most ordinary way, to print or display an
- announcement including an appropriate copyright notice and a
- notice that there is no warranty (or else, saying that you provide
- a warranty) and that users may redistribute the program under
- these conditions, and telling the user how to view a copy of this
- License. (Exception: if the Program itself is interactive but
- does not normally print such an announcement, your work based on
- the Program is not required to print an announcement.)
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Program,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based
-on the Program, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote it.
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
- 3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
- a) Accompany it with the complete corresponding machine-readable
- source code, which must be distributed under the terms of Sections
- 1 and 2 above on a medium customarily used for software interchange; or,
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
- b) Accompany it with a written offer, valid for at least three
- years, to give any third party, for a charge no more than your
- cost of physically performing source distribution, a complete
- machine-readable copy of the corresponding source code, to be
- distributed under the terms of Sections 1 and 2 above on a medium
- customarily used for software interchange; or,
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
- c) Accompany it with the information you received as to the offer
- to distribute corresponding source code. (This alternative is
- allowed only for noncommercial distribution and only if you
- received the program in object code or executable form with such
- an offer, in accord with Subsection b above.)
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
-The source code for a work means the preferred form of the work for
-making modifications to it. For an executable work, complete source
-code means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to
-control compilation and installation of the executable. However, as a
-special exception, the source code distributed need not include
-anything that is normally distributed (in either source or binary
-form) with the major components (compiler, kernel, and so on) of the
-operating system on which the executable runs, unless that component
-itself accompanies the executable.
+ END OF TERMS AND CONDITIONS
-If distribution of executable or object code is made by offering
-access to copy from a designated place, then offering equivalent
-access to copy the source code from the same place counts as
-distribution of the source code, even though third parties are not
-compelled to copy the source along with the object code.
+ APPENDIX: How to apply the Apache License to your work.
- 4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License. Any attempt
-otherwise to copy, modify, sublicense or distribute the Program is
-void, and will automatically terminate your rights under this License.
-However, parties who have received copies, or rights, from you under
-this License will not have their licenses terminated so long as such
-parties remain in full compliance.
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
- 5. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Program or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
+ Copyright [yyyy] [name of copyright owner]
- 6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions. You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties to
-this License.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
- 7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Program at all. For example, if a patent
-license would not permit royalty-free redistribution of the Program by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
+ http://www.apache.org/licenses/LICENSE-2.0
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is
-implemented by public license practices. Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
- 8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License
-may add an explicit geographical distribution limitation excluding
-those countries, so that distribution is permitted only in or among
-countries not thus excluded. In such case, this License incorporates
-the limitation as if written in the body of this License.
-
- 9. The Free Software Foundation may publish revised and/or new versions
-of the General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and conditions
-either of that version or of any later version published by the Free
-Software Foundation. If the Program does not specify a version number of
-this License, you may choose any version ever published by the Free Software
-Foundation.
-
- 10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the author
-to ask for permission. For software which is copyrighted by the Free
-Software Foundation, write to the Free Software Foundation; we sometimes
-make exceptions for this. Our decision will be guided by the two goals
-of preserving the free status of all derivatives of our free software and
-of promoting the sharing and reuse of software generally.
-
- NO WARRANTY
-
- 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
-FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
-OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
-PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
-OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
-TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
-PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
-REPAIR OR CORRECTION.
-
- 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
-REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
-OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
-TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
-YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
-PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (C) <year> <name of author>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
- Gnomovision version 69, Copyright (C) year name of author
- Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, the commands you use may
-be called something other than `show w' and `show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
- Yoyodyne, Inc., hereby disclaims all copyright interest in the program
- `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
- <signature of Ty Coon>, 1 April 1989
- Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs. If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Makefile b/Makefile
index 0add537..9f39365 100644
--- a/Makefile
+++ b/Makefile
@@ -1,45 +1,125 @@
# Copyright 2018 The Hafnium Authors.
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# version 2 as published by the Free Software Foundation.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
-# By default, assume this was checked out as a submodule of the Hafnium repo
-# and that Linux was checked out along side that checkout. These paths can be
-# overridden if that assumption is incorrect.
-HAFNIUM_PATH ?= $(CURDIR)/../..
+# Select the project to build.
+PROJECT ?= reference
-ifneq ($(KERNELRELEASE),)
+# If HAFNIUM_HERMETIC_BUILD is "true" (not default), invoke `make` inside
+# a container. The 'run_in_container.sh' script will set the variable value to
+# 'inside' to avoid recursion.
+ifeq ($(HAFNIUM_HERMETIC_BUILD),true)
-obj-m += hafnium.o
+# TODO: This is not ideal as (a) we invoke the container once per command-line
+# target, and (b) we cannot pass `make` arguments to the script. We could
+# consider creating a bash alias for `make` to invoke the script directly.
-hafnium-y += main.o
-hafnium-y += vmlib/aarch64/call.o
-hafnium-y += vmlib/spci.o
-
-ccflags-y = -I$(HAFNIUM_PATH)/inc/vmapi -I$(M)/inc
-
-else
-
-KERNEL_PATH ?= $(HAFNIUM_PATH)/third_party/linux
-ARCH ?= arm64
-CROSS_COMPILE ?= aarch64-linux-gnu-
-CHECKPATCH ?= $(KERNEL_PATH)/scripts/checkpatch.pl -q
-
+# Need to define at least one non-default target.
all:
- cp -r $(HAFNIUM_PATH)/vmlib/ $(CURDIR)
- make -C $(KERNEL_PATH) HAFNIUM_PATH=$(HAFNIUM_PATH) M=$(CURDIR) O=$(O) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
+ @$(CURDIR)/build/run_in_container.sh make PROJECT=$(PROJECT) $@
+# Catch-all target.
+.DEFAULT:
+ @$(CURDIR)/build/run_in_container.sh make PROJECT=$(PROJECT) $@
+
+else # HAFNIUM_HERMETIC_BUILD
+
+# Set path to prebuilts used in the build.
+UNNAME_S := $(shell uname -s | tr '[:upper:]' '[:lower:]')
+PREBUILTS := $(CURDIR)/prebuilts/$(UNNAME_S)-x64
+GN ?= $(PREBUILTS)/gn/gn
+NINJA ?= $(PREBUILTS)/ninja/ninja
+export PATH := $(PREBUILTS)/clang/bin:$(PATH)
+
+
+CHECKPATCH := $(CURDIR)/third_party/linux/scripts/checkpatch.pl \
+ --ignore BRACES,SPDX_LICENSE_TAG,VOLATILE,SPLIT_STRING,AVOID_EXTERNS,USE_SPINLOCK_T,NEW_TYPEDEFS,INITIALISED_STATIC,FILE_PATH_CHANGES,EMBEDDED_FUNCTION_NAME,SINGLE_STATEMENT_DO_WHILE_MACRO,MACRO_WITH_FLOW_CONTROL --quiet
+
+# Specifies the grep pattern for ignoring specific files in checkpatch.
+# C++ headers, *.hh, are automatically excluded.
+# Separate the different items in the list with a grep or (\|).
+# debug_el1.c : uses XMACROS, which checkpatch doesn't understand.
+# perfmon.c : uses XMACROS, which checkpatch doesn't understand.
+# feature_id.c : uses XMACROS, which checkpatch doesn't understand.
+CHECKPATCH_IGNORE := "src/arch/aarch64/hypervisor/debug_el1.c\|src/arch/aarch64/hypervisor/perfmon.c\|src/arch/aarch64/hypervisor/feature_id.c"
+
+OUT ?= out/$(PROJECT)
+OUT_DIR = out/$(PROJECT)
+
+.PHONY: all
+all: $(OUT_DIR)/build.ninja
+ @$(NINJA) -C $(OUT_DIR)
+
+$(OUT_DIR)/build.ninja:
+ @$(GN) --export-compile-commands gen --args='project="$(PROJECT)"' $(OUT_DIR)
+
+.PHONY: clean
clean:
- make -C $(KERNEL_PATH) HAFNIUM_PATH=$(HAFNIUM_PATH) M=$(CURDIR) O=$(O) clean
- rm -rf vmlib
+ @$(NINJA) -C $(OUT_DIR) -t clean
+.PHONY: clobber
+clobber:
+ rm -rf $(OUT)
+
+# see .clang-format.
+.PHONY: format
+format:
+ @echo "Formatting..."
+ @find src/ -name \*.c -o -name \*.cc -o -name \*.h | xargs -r clang-format -style file -i
+ @find inc/ -name \*.c -o -name \*.cc -o -name \*.h | xargs -r clang-format -style file -i
+ @find test/ -name \*.c -o -name \*.cc -o -name \*.h | xargs -r clang-format -style file -i
+ @find project/ -name \*.c -o -name \*.cc -o -name \*.h | xargs -r clang-format -style file -i
+ @find . \( -name \*.gn -o -name \*.gni \) | xargs -n1 $(GN) format
+
+.PHONY: checkpatch
checkpatch:
- $(CHECKPATCH) -f main.c
+ @find src/ -name \*.c -o -name \*.h | grep -v $(CHECKPATCH_IGNORE) | xargs $(CHECKPATCH) -f
+ @find inc/ -name \*.c -o -name \*.h | grep -v $(CHECKPATCH_IGNORE) | xargs $(CHECKPATCH) -f
+ # TODO: enable for test/
+ @find project/ -name \*.c -o -name \*.h | grep -v $(CHECKPATCH_IGNORE) | xargs $(CHECKPATCH) -f
-endif
+# see .clang-tidy.
+.PHONY: tidy
+tidy: $(OUT_DIR)/build.ninja
+ @$(NINJA) -C $(OUT_DIR)
+ @echo "Tidying..."
+ # TODO: enable readability-magic-numbers once there are fewer violations.
+ # TODO: enable for c++ tests as it currently gives spurious errors.
+ @find src/ \( -name \*.c \) | xargs clang-tidy -p $(OUT_DIR) -fix
+ @find test/ \( -name \*.c \) | xargs clang-tidy -p $(OUT_DIR) -fix
+
+.PHONY: check
+check: $(OUT_DIR)/build.ninja
+ @$(NINJA) -C $(OUT_DIR)
+ @echo "Checking..."
+ # TODO: enable for c++ tests as it currently gives spurious errors.
+ @find src/ \( -name \*.c \) | xargs clang-check -p $(OUT_DIR) -analyze -fix-what-you-can
+ @find test/ \( -name \*.c \) | xargs clang-check -p $(OUT_DIR) -analyze -fix-what-you-can
+
+.PHONY: license
+license:
+ @find src/ -name \*.S -o -name \*.c -o -name \*.cc -o -name \*.h -o -name \*.dts | xargs -n1 python build/license.py --style c
+ @find inc/ -name \*.S -o -name \*.c -o -name \*.cc -o -name \*.h -o -name \*.dts | xargs -n1 python build/license.py --style c
+ @find test/ -name \*.S -o -name \*.c -o -name \*.cc -o -name \*.h -o -name \*.dts | xargs -n1 python build/license.py --style c
+ @find build/ -name \*.py| xargs -n1 python build/license.py --style hash
+ @find test/ -name \*.py| xargs -n1 python build/license.py --style hash
+ @find . \( -name \*.gn -o -name \*.gni \) | xargs -n1 python build/license.py --style hash
+
+.PHONY: update-prebuilts
+update-prebuilts: prebuilts/linux-aarch64/linux/vmlinuz
+
+prebuilts/linux-aarch64/linux/vmlinuz: $(OUT_DIR)/build.ninja
+ @$(NINJA) -C $(OUT_DIR) "third_party/linux"
+ cp out/reference/obj/third_party/linux/linux.bin $@
+
+endif # HAFNIUM_HERMETIC_BUILD
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..5ee74bf
--- /dev/null
+++ b/README.md
@@ -0,0 +1,28 @@
+# Hafnium
+
+Hafnium is a hypervisor, initially supporting aarch64 (64-bit Armv8 CPUs).
+
+Get in touch and keep up-to-date at
+[hafnium-discuss@googlegroups.com](https://groups.google.com/forum/#!forum/hafnium-discuss).
+
+## Getting started
+
+To jump in and build Hafnium, follow the
+[getting started](docs/GettingStarted.md) instructions.
+
+If you want to contribute to the project, see details of
+[how we accept contributions](CONTRIBUTING.md).
+
+## Documentation
+
+More documentation is available on:
+
+* [Hafnium architecture](docs/Architecture.md)
+* [Code structure](docs/CodeStructure.md)
+* [Hafnium test infrastructure](docs/Testing.md)
+* [Running Hafnium under the Arm Fixed Virtual Platform](docs/FVP.md)
+* [How to build a RAM disk containing VMs for Hafnium to run](docs/HafniumRamDisk.md)
+* [Building Hafnium hermetically with Docker](docs/HermeticBuild.md)
+* [The interface Hafnium provides to VMs](docs/VmInterface.md)
+* [Scheduler VM expectations](docs/SchedulerExpectations.md)
+* [Hafnium coding style](docs/StyleGuide.md)
diff --git a/build/BUILD.gn b/build/BUILD.gn
new file mode 100644
index 0000000..8ce649c
--- /dev/null
+++ b/build/BUILD.gn
@@ -0,0 +1,58 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/toolchain/platform.gni")
+
+# Default language and error reporting configuration.
+config("compiler_defaults") {
+ cflags = [
+ "-g",
+ "-Wall",
+ "-O2",
+
+ #"-Wextra",
+ "-Wpedantic",
+ "-Werror",
+
+ "-fstack-protector-all",
+ ]
+
+ cflags_c = [ "-std=c11" ]
+
+ cflags_cc = [ "-std=c++2a" ]
+}
+
+# Platform configuration.
+config("platform") {
+ include_dirs = [
+ "//inc",
+ "//inc/vmapi",
+ "//src/arch/${plat_arch}/inc",
+
+ # Auto-generated headers using the 'offset_size_header' build rule.
+ "${root_gen_dir}/offset_size_header",
+ ]
+
+ defines = [
+ "HEAP_PAGES=${plat_heap_pages}",
+ "MAX_CPUS=${plat_max_cpus}",
+ "MAX_VMS=${plat_max_vms}",
+ ]
+
+ if (is_debug) {
+ defines += [ "DEBUG=1" ]
+ } else {
+ defines += [ "DEBUG=0" ]
+ }
+}
diff --git a/build/BUILDCONFIG.gn b/build/BUILDCONFIG.gn
new file mode 100644
index 0000000..134ddcd
--- /dev/null
+++ b/build/BUILDCONFIG.gn
@@ -0,0 +1,72 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Configuration of the build toolchain.
+declare_args() {
+ # The name of the project being built.
+ project = "reference"
+
+ # Enable extra debugging.
+ is_debug = true
+
+ # Whether to build against the platform for embedded images consisting of
+ # include paths and defines. This is also used for host targets that simulate
+ # an embedded image.
+ use_platform = false
+}
+
+# Check that we support the attempted build.
+assert(host_os == "linux", "Only linux builds are currently supported.")
+
+# Setup the standard variables.
+if (target_os == "") {
+ target_os = host_os
+}
+if (target_cpu == "") {
+ target_cpu = host_cpu
+}
+if (current_os == "") {
+ current_os = target_os
+}
+if (current_cpu == "") {
+ current_cpu = target_cpu
+}
+
+assert(target_os == host_os, "Cross compiles not yet supported.")
+assert(target_cpu == host_cpu, "Cross compiles not yet supported.")
+
+# All binary targets will get this list of configs by default.
+_shared_binary_target_configs = [ "//build:compiler_defaults" ]
+
+# If it's not building a host utility, it's building against the platform so apply the configuration.
+if (use_platform) {
+ _shared_binary_target_configs += [ "//build:platform" ]
+}
+
+# Apply that default list to the binary target types.
+set_defaults("executable") {
+ configs = _shared_binary_target_configs
+}
+set_defaults("static_library") {
+ configs = _shared_binary_target_configs
+}
+set_defaults("shared_library") {
+ configs = _shared_binary_target_configs
+}
+set_defaults("source_set") {
+ configs = _shared_binary_target_configs
+}
+
+# The default toolchain is the target toolchain for building utilities and tests.
+set_default_toolchain("//build/toolchain:host_clang")
diff --git a/build/docker/Dockerfile b/build/docker/Dockerfile
new file mode 100644
index 0000000..1eb6b2d
--- /dev/null
+++ b/build/docker/Dockerfile
@@ -0,0 +1,42 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Base container image to be uploaded to Google Cloud Platform as
+# "eu.gcr.io/hafnium-build/hafnium_ci". Each user derives their own container
+# with local user permissions from this base image. It should contain everything
+# needed to build and test Hafnium.
+#
+FROM launcher.gcr.io/google/ubuntu1804
+MAINTAINER Hafnium Team <hafnium-team+build@google.com>
+
+# Install dependencies. Clear APT cache at the end to save space.
+ENV DEBIAN_FRONTEND=noninteractive
+RUN apt-get update \
+ && apt-get install -y \
+ bc `# for Linux headers` \
+ bison \
+ build-essential \
+ cpio \
+ flex \
+ git \
+ libpixman-1-0 `# for QEMU` \
+ libsdl2-2.0-0 `# for QEMU` \
+ libglib2.0 `# for QEMU` \
+ libssl-dev `# for Linux headers` \
+ python \
+ python-git `# for Linux checkpatch` \
+ python-ply `# for Linux checkpatch` \
+ strace `# for strace_open.sh` \
+ && rm -rf /var/lib/apt/lists/*
diff --git a/build/docker/Dockerfile.local b/build/docker/Dockerfile.local
new file mode 100644
index 0000000..67eb92f
--- /dev/null
+++ b/build/docker/Dockerfile.local
@@ -0,0 +1,35 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Container derived from the base image hosted on Google Cloud Platform.
+# It sets up a user with the same UID/GID as the local user, so that generated
+# files can be accessed by the host.
+# Please keep the diff between base and local images as small as possible.
+#
+FROM eu.gcr.io/hafnium-build/hafnium_ci
+ARG LOCAL_UID=1000
+ARG LOCAL_GID=1000
+
+RUN addgroup \
+ --gid "${LOCAL_GID}" \
+ hafnium \
+ && adduser \
+ -disabled-password \
+ -gecos "" \
+ --uid "${LOCAL_UID}" \
+ --shell "/bin/bash" \
+ --ingroup hafnium \
+ hafnium
+USER hafnium
\ No newline at end of file
diff --git a/build/docker/build.sh b/build/docker/build.sh
new file mode 100755
index 0000000..6ba9467
--- /dev/null
+++ b/build/docker/build.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+source "${SCRIPT_DIR}/common.inc"
+
+${DOCKER} build \
+ --pull \
+ -f "${SCRIPT_DIR}/Dockerfile" \
+ -t "${CONTAINER_TAG}" \
+ "${SCRIPT_DIR}"
diff --git a/build/docker/common.inc b/build/docker/common.inc
new file mode 100644
index 0000000..0d1e1db
--- /dev/null
+++ b/build/docker/common.inc
@@ -0,0 +1,21 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+CONTAINER_TAG="eu.gcr.io/hafnium-build/hafnium_ci"
+
+if [[ ! -v DOCKER ]]
+then
+ DOCKER="$(which docker)" \
+ || (echo "ERROR: Could not find Docker binary" 1>&2; exit 1)
+fi
\ No newline at end of file
diff --git a/build/docker/publish.sh b/build/docker/publish.sh
new file mode 100755
index 0000000..57ad16a
--- /dev/null
+++ b/build/docker/publish.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+source "${SCRIPT_DIR}/common.inc"
+
+# Requires for the user to be an owner of the GCP 'hafnium-build' project and
+# have gcloud SDK installed and authenticated.
+
+${DOCKER} push "${CONTAINER_TAG}"
diff --git a/build/image/check_elf.py b/build/image/check_elf.py
new file mode 100644
index 0000000..1dc0cec
--- /dev/null
+++ b/build/image/check_elf.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Check ELF file for assembly-level regressions.
+
+Objdumps the given ELF file and detects known assembly patterns, checking for
+regressions on bugs such as CPU erratas. Throws an exception if a broken pattern
+is detected.
+"""
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+
+HF_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
+CLANG_ROOT = os.path.join(HF_ROOT, "prebuilts", "linux-x64", "clang")
+OBJDUMP = os.path.join(CLANG_ROOT, "bin", "llvm-objdump")
+
+def check_eret_speculation_barrier(objdump_stdout):
+ """
+ Some ARM64 CPUs speculatively execute instructions after ERET.
+ Check that every ERET is followed by DSB NSH and ISB.
+ """
+ found_eret = False
+
+ STATE_DEFAULT = 1
+ STATE_EXPECT_DSB_NSH = 2
+ STATE_EXPECT_ISB = 3
+
+ REGEX_ERET = re.compile(r"^\s*[0-9a-f]+:\s*e0 03 9f d6\s+eret$")
+ REGEX_DSB_NSH = re.compile(r"^\s*[0-9a-f]+:\s*9f 37 03 d5\s*dsb\s+nsh$")
+ REGEX_ISB = re.compile(r"^\s*[0-9a-f]+:\s*df 3f 03 d5\s+isb$")
+
+ state = STATE_DEFAULT
+ for line in objdump_stdout:
+ if state == STATE_DEFAULT:
+ if re.match(REGEX_ERET, line):
+ found_eret = True
+ state = STATE_EXPECT_DSB_NSH
+ elif state == STATE_EXPECT_DSB_NSH:
+ if re.match(REGEX_DSB_NSH, line):
+ state = STATE_EXPECT_ISB
+ else:
+ raise Exception("ERET not followed by DSB NSH")
+ elif state == STATE_EXPECT_ISB:
+ if re.match(REGEX_ISB, line):
+ state = STATE_DEFAULT
+ else:
+ raise Exception("ERET not followed by ISB")
+
+ # Ensure that at least one instance was found, otherwise the regexes are
+ # probably wrong.
+ if not found_eret:
+ raise Exception("Could not find any ERET instructions")
+
+def Main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("input_elf",
+ help="ELF file to analyze")
+ parser.add_argument("stamp_file",
+ help="file to be touched if successful")
+ args = parser.parse_args()
+
+ objdump_stdout = subprocess.check_output([
+ OBJDUMP, "-d", args.input_elf ])
+ objdump_stdout = objdump_stdout.decode("utf-8").splitlines()
+
+ check_eret_speculation_barrier(objdump_stdout)
+
+ # Touch `stamp_file`.
+ with open(args.stamp_file, "w"):
+ pass
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/build/image/convert_to_binary.py b/build/image/convert_to_binary.py
new file mode 100644
index 0000000..978d359
--- /dev/null
+++ b/build/image/convert_to_binary.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Convert a file to binary format.
+
+Calls objcopy to convert a file into raw binary format.
+"""
+
+import argparse
+import os
+import subprocess
+import sys
+
+HF_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
+CLANG_ROOT = os.path.join(HF_ROOT, "prebuilts", "linux-x64", "clang")
+OBJCOPY = os.path.join(CLANG_ROOT, "bin", "llvm-objcopy")
+
+def Main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--input", required=True)
+ parser.add_argument("--output", required=True)
+ args = parser.parse_args()
+ subprocess.check_call([
+ OBJCOPY, "-O", "binary", args.input, args.output
+ ])
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/build/image/dtc.py b/build/image/dtc.py
new file mode 100755
index 0000000..c0caff8
--- /dev/null
+++ b/build/image/dtc.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Wrapper around Device Tree Compiler (dtc)"""
+
+import argparse
+import os
+import subprocess
+import sys
+
+HF_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
+DTC_ROOT = os.path.join(HF_ROOT, "prebuilts", "linux-x64", "dtc")
+DTC = os.path.join(DTC_ROOT, "dtc")
+FDTOVERLAY = os.path.join(DTC_ROOT, "fdtoverlay")
+
+def cmd_compile(args):
+ exec_args = [
+ DTC,
+ "-I", "dts", "-O", "dtb",
+ "--out-version", "17",
+ ]
+
+ if args.output_file:
+ exec_args += [ "-o", args.output_file ]
+ if args.input_file:
+ exec_args += [ args.input_file ]
+
+ return subprocess.call(exec_args)
+
+def cmd_overlay(args):
+ exec_args = [
+ FDTOVERLAY,
+ "-i", args.base_dtb,
+ "-o", args.output_dtb,
+ ] + args.overlay_dtb
+ return subprocess.call(exec_args)
+
+def main():
+ parser = argparse.ArgumentParser()
+ subparsers = parser.add_subparsers(dest="command")
+
+ parser_compile = subparsers.add_parser("compile", help="compile DTS to DTB")
+ parser_compile.add_argument("-i", "--input-file")
+ parser_compile.add_argument("-o", "--output-file")
+
+ parser_overlay = subparsers.add_parser("overlay", help="merge DTBs")
+ parser_overlay.add_argument("output_dtb")
+ parser_overlay.add_argument("base_dtb")
+ parser_overlay.add_argument("overlay_dtb", nargs='*')
+
+ args = parser.parse_args()
+
+ if args.command == "compile":
+ return cmd_compile(args)
+ elif args.command == "overlay":
+ return cmd_overlay(args)
+ else:
+ raise Error("Unknown command: {}".format(args.command))
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/build/image/generate_initrd.py b/build/image/generate_initrd.py
new file mode 100644
index 0000000..901bc68
--- /dev/null
+++ b/build/image/generate_initrd.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Generate an initial RAM disk for the hypervisor.
+
+Packages the VMs, initrds for the VMs and the list of secondary VMs (vms.txt)
+into an initial RAM disk image.
+"""
+
+import argparse
+import os
+import shutil
+import subprocess
+import sys
+
+def Main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-f", "--file",
+ action="append", nargs=2,
+ metavar=("NAME", "PATH"),
+ help="File at host location PATH to be added to the RAM disk as NAME")
+ parser.add_argument("-s", "--staging", required=True)
+ parser.add_argument("-o", "--output", required=True)
+ args = parser.parse_args()
+
+ # Create staging folder if needed.
+ if not os.path.isdir(args.staging):
+ os.makedirs(args.staging)
+
+ # Copy files into the staging folder.
+ staged_files = []
+ for name, path in args.file:
+ shutil.copyfile(path, os.path.join(args.staging, name))
+ assert name not in staged_files
+ staged_files.append(name)
+
+ # Package files into an initial RAM disk.
+ with open(args.output, "w") as initrd:
+ # Move into the staging directory so the file names taken by cpio don't
+ # include the path.
+ os.chdir(args.staging)
+ cpio = subprocess.Popen(
+ ["cpio", "--create"],
+ stdin=subprocess.PIPE,
+ stdout=initrd,
+ stderr=subprocess.PIPE)
+ cpio.communicate(input="\n".join(staged_files).encode("utf-8"))
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/build/image/generate_linux_initrd.py b/build/image/generate_linux_initrd.py
new file mode 100644
index 0000000..c93e19a
--- /dev/null
+++ b/build/image/generate_linux_initrd.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Generate an initial RAM disk for a Linux VM."""
+
+import argparse
+import os
+import shutil
+import subprocess
+import sys
+
+def Main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--staging", required=True)
+ parser.add_argument("--output", required=True)
+ args = parser.parse_args()
+ # Package files into an initial RAM disk.
+ with open(args.output, "w") as initrd:
+ # Move into the staging directory so the file names taken by cpio don't
+ # include the path.
+ os.chdir(args.staging)
+ staged_files = [os.path.join(root, filename)
+ for (root, dirs, files) in os.walk(".") for filename in files + dirs]
+ cpio = subprocess.Popen(
+ ["cpio", "--create", "--format=newc"],
+ stdin=subprocess.PIPE,
+ stdout=initrd,
+ stderr=subprocess.PIPE)
+ cpio.communicate(input="\n".join(staged_files).encode("utf-8"))
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/build/image/image.gni b/build/image/image.gni
new file mode 100644
index 0000000..7860784
--- /dev/null
+++ b/build/image/image.gni
@@ -0,0 +1,308 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/toolchain/embedded.gni")
+
+# Build image, link to an ELF file then convert to plain binary.
+template("image_binary") {
+ assert(defined(invoker.image_name),
+ "image_binary() must specify an \"image_name\" value")
+
+ output_root = ""
+ if (defined(invoker.output_path)) {
+ output_root += "${invoker.output_path}/"
+ }
+ output_root += invoker.image_name
+
+ file_root = "${root_out_dir}/${output_root}"
+ elf_file = "${file_root}.elf"
+ bin_file = "${file_root}.bin"
+
+ elf_target = "${target_name}__elf"
+ checked_elf_target = "${target_name}__checked_elf"
+
+ # Link objects together
+ executable(elf_target) {
+ forward_variables_from(invoker,
+ [
+ "cflags",
+ "cflags_c",
+ "defines",
+ "deps",
+ "include_dirs",
+ "public_configs",
+ "public_deps",
+ "sources",
+ "testonly",
+ ])
+ output_name = "${output_root}.elf"
+ inputs = [
+ rebase_path("//build/image/image.ld"),
+ ]
+ ldflags = [
+ "-T",
+ rebase_path("//build/image/image.ld"),
+ ]
+ visibility = [
+ ":${checked_elf_target}",
+ ":${invoker.target_name}",
+ ]
+ }
+
+ # Analyze the generated ELF file and check that assembly-level fixes, e.g.
+ # for CPU errata, have been properly applied.
+ action(checked_elf_target) {
+ forward_variables_from(invoker, [ "testonly" ])
+ stamp_file = elf_file + "_check.stamp"
+
+ script = "//build/image/check_elf.py"
+ deps = [
+ ":${elf_target}",
+ ]
+ args = [
+ rebase_path(elf_file),
+ rebase_path(stamp_file),
+ ]
+ outputs = [
+ stamp_file,
+ ]
+ visibility = [ ":${invoker.target_name}" ]
+ }
+
+ action(target_name) {
+ forward_variables_from(invoker, [ "testonly" ])
+
+ script = "//build/image/convert_to_binary.py"
+
+ if (defined(invoker.check_binary) && invoker.check_binary == true) {
+ deps = [
+ ":${checked_elf_target}",
+ ]
+ } else {
+ deps = [
+ ":${elf_target}",
+ ]
+ }
+ args = [
+ "--input",
+ rebase_path(elf_file),
+ "--output",
+ rebase_path(bin_file),
+ ]
+ outputs = [
+ bin_file,
+ ]
+ }
+}
+
+# Helper to build a hypervisor image
+template("hypervisor") {
+ image_binary(target_name) {
+ forward_variables_from(invoker,
+ [
+ "cflags",
+ "cflags_c",
+ "defines",
+ "deps",
+ "public_deps",
+ "sources",
+ "testonly",
+ ])
+ image_name = target_name
+
+ # Perform checks on the generated binary to prevent regressing on some
+ # classes of bugs, typically CPU erratas.
+ check_binary = true
+ }
+}
+
+# Helper to build a virtual machine kernel
+template("vm_kernel") {
+ image_binary(target_name) {
+ forward_variables_from(invoker,
+ [
+ "cflags",
+ "cflags_c",
+ "defines",
+ "deps",
+ "include_dirs",
+ "public_configs",
+ "public_deps",
+ "sources",
+ "testonly",
+ ])
+ output_path = rebase_path(".", root_out_dir, target_out_dir)
+ image_name = target_name
+ }
+}
+
+# Build the initial RAM disk for the Linux VM.
+template("linux_initrd") {
+ initrd_base = "${target_out_dir}/${target_name}/initrd"
+ initrd_file = "${initrd_base}.img"
+ initrd_staging = "${initrd_base}"
+
+ copy("${target_name}__staging") {
+ forward_variables_from(invoker,
+ [
+ "testonly",
+ "sources",
+ "deps",
+ ])
+ outputs = [
+ "${initrd_staging}/{{source_file_part}}",
+ ]
+ }
+
+ action(target_name) {
+ forward_variables_from(invoker, [ "testonly" ])
+ script = "//build/image/generate_linux_initrd.py"
+ args = [
+ "--staging",
+ rebase_path(initrd_staging),
+ "--output",
+ rebase_path(initrd_file),
+ ]
+ deps = [
+ ":${target_name}__staging",
+ ]
+ outputs = [
+ initrd_file,
+ ]
+ }
+}
+
+template("device_tree") {
+ action_foreach(target_name) {
+ forward_variables_from(invoker,
+ [
+ "testonly",
+ "sources",
+ "deps",
+ ])
+ script = "//build/image/dtc.py"
+
+ if (defined(invoker.output_pattern)) {
+ output_pattern = invoker.output_pattern
+ } else {
+ output_pattern = "${target_out_dir}/{{source_name_part}}.dtb"
+ }
+
+ outputs = [
+ output_pattern,
+ ]
+ args = [
+ "compile",
+ "-i",
+ "{{source}}",
+ "-o",
+ rebase_path(output_pattern),
+ ]
+ }
+}
+
+template("incbin") {
+ source_set(target_name) {
+ forward_variables_from(invoker,
+ [
+ "testonly",
+ "deps",
+ ])
+
+ sources = [
+ "//build/image/incbin.S",
+ ]
+ inputs = invoker.sources
+ defines = [
+ "SECTION_NAME=" + invoker.section,
+ "FILE_PATH=\"" + rebase_path(inputs[0]) + "\"",
+ ]
+ }
+}
+
+# Build the initial RAM disk for the hypervisor.
+template("initrd") {
+ assert(defined(invoker.primary_name),
+ "initrd() must specify a \"primary_name\" value")
+ assert(defined(invoker.primary_vm),
+ "initrd() must specify a \"primary_vm\" value")
+
+ manifest_target = "${target_name}__manifest"
+ base_out_dir = "${target_out_dir}/${target_name}"
+
+ # Generate manifest.dtbo
+ device_tree(manifest_target) {
+ sources = [
+ invoker.manifest,
+ ]
+ output_pattern = "${base_out_dir}/{{source_name_part}}.dtbo"
+ }
+
+ action(target_name) {
+ forward_variables_from(invoker, [ "testonly" ])
+ script = "//build/image/generate_initrd.py"
+
+ initrd_file = "${base_out_dir}/initrd.img"
+ initrd_staging = "${base_out_dir}/initrd"
+
+ # Cannot get target outputs here as they are defined in a different file.
+ primary_vm_image = get_label_info(invoker.primary_vm, "target_out_dir") +
+ "/" + get_label_info(invoker.primary_vm, "name") + ".bin"
+
+ deps = [
+ ":${manifest_target}",
+ invoker.primary_vm,
+ ]
+ args = [
+ "--file",
+ invoker.primary_name,
+ rebase_path(primary_vm_image),
+ "--staging",
+ rebase_path(initrd_staging),
+ "--output",
+ rebase_path(initrd_file),
+ ]
+
+ if (defined(invoker.primary_initrd)) {
+ deps += [ invoker.primary_initrd ]
+ primary_initrd_outputs = get_target_outputs(invoker.primary_initrd)
+ args += [
+ "--file",
+ "initrd.img",
+ rebase_path(primary_initrd_outputs[0]),
+ ]
+ }
+
+ # Add the info about the secondary VMs. The information about the VMs is
+ # encoded in lists with the following elements:
+ #
+ # 1. File name for the VM image.
+ # 2. Build target for the VM.
+ if (defined(invoker.secondary_vms)) {
+ foreach(vm, invoker.secondary_vms) {
+ deps += [ vm[1] ]
+ args += [
+ "--file",
+ vm[0],
+ rebase_path(get_label_info(vm[1], "target_out_dir") + "/" +
+ get_label_info(vm[1], "name") + ".bin"),
+ ]
+ }
+ }
+
+ outputs = [
+ initrd_file,
+ ]
+ }
+}
diff --git a/build/image/image.ld b/build/image/image.ld
new file mode 100644
index 0000000..c42ab5f
--- /dev/null
+++ b/build/image/image.ld
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Code will start running at this symbol which is places at the start of the
+ * image.
+ */
+ENTRY(entry)
+
+/*
+ * The following would be useful to check that .init code is not called back
+ * into once it has completed but it isn't supported by ld.lld.
+ *
+ * NOCROSSREFS_TO(.init .text)
+ */
+
+SECTIONS
+{
+ /*
+ * Set the image origin to a platform specific address. The images are
+ * relocatable but some platforms, e.g. QEMU, load to the same address
+ * and it makes debugging easier if the addresses match the symbols.
+ */
+ . = ORIGIN_ADDRESS;
+
+ /*
+ * Collect together the code. This is page aligned so it can be mapped
+ * as executable-only.
+ */
+ text_begin = .;
+ .init : {
+ *(.init.entry)
+ *(.init.*)
+ }
+ .text : {
+ *(.text.*)
+ }
+ text_size = ABSOLUTE(. - text_begin);
+ . = ALIGN(4096);
+ text_end = .;
+
+ /*
+ * Collect together read-only data including relocations at the end
+ * which are applied by the entry code. This is page aligned so it can
+ * be mapped as read-only and non-executable.
+ */
+ . = ALIGN(4096);
+ rodata_begin = .;
+ .rodata : {
+ *(.rodata.*)
+ }
+ /*
+ * .rela contains Elf64_Rela entries which contain 8-byte fields so
+ * should be 8-byte aligned.
+ */
+ . = ALIGN(8);
+ rela_begin = .;
+ .rela : {
+ *(.rela.*)
+ }
+ rela_end = .;
+ /*
+ * The linker doesn't allow .dynsym and .dynstr to be discarded, see
+ * /DISCARD/ below, so make sure they don't get in the way.
+ */
+ .dynsym : {
+ *(.dynsym.*)
+ }
+ .dynstr : {
+ *(.dynstr.*)
+ }
+ /*
+ * The hftest framework adds test descriptors in the .hftest section
+ * which is examined at runtime to discover the available tests. The
+ * input sections are named after the test they include so sorting here
+ * means they are stored sorted by the name of the test suite and then
+ * by test case names. To ensure tests aren't accidentally included in
+ * images that are not meant to have them, the assertion checks for a
+ * marker to signal tests are allowed.
+ */
+ . = ALIGN(8);
+ hftest_begin = .;
+ .hftest : {
+ KEEP(*(SORT(.hftest.*)))
+ }
+ hftest_end = .;
+ ASSERT((SIZEOF(.hftest) == (DEFINED(hftest_enable) ? SIZEOF(.hftest) : 0)),
+ "Error: Image includes .hftest section but not HFTEST_ENABLE().")
+ rodata_size = ABSOLUTE(. - rodata_begin);
+ . = ALIGN(4096);
+ rodata_end = .;
+
+ /*
+ * A platform may choose to link blobs such as the FDT or the initrd
+ * into the image rather than having them loaded separately. These are
+ * placed at the end of the image and will not be mapped automatically
+ * on boot so they can be treated as if they were loaded as separate
+ * blobs. They are page aligned so they can be mapped individually.
+ *
+ * TODO: remove this when the loader can reliably deliver both the
+ * binary and a separate blob for the initrd.
+ */
+ . = ALIGN(4096);
+ initrd_begin = .;
+ .initrd : {
+ KEEP(*(.plat.initrd))
+ }
+ initrd_end = .;
+ . = ALIGN(4096);
+ fdt_begin = .;
+ .fdt : {
+ KEEP(*(.plat.fdt))
+ }
+ fdt_end = .;
+
+ /*
+ * Collect together the read-write data including .bss at the end which
+ * will be zero'd by the entry code. This is page aligned so it can be
+ * mapped as non-executable.
+ */
+ . = ALIGN(4096);
+ data_begin = .;
+ .data : {
+ *(.data)
+ }
+ /*
+ * Global offset table used for relocations. This is where relocation
+ * fix-ups are applied.
+ */
+ .got : {
+ *(.got.*)
+ }
+ /*
+ * The linker doesn't allow .dynamic to be discarded, see /DISCARD/
+ * below, so make sure it doesn't get in the way.
+ */
+ .dynamic : {
+ *(.dynamic.*)
+ }
+ /* Everything beyond this point will not be included in the binary. */
+ bin_end = .;
+ /* The entry point code assumes that .bss is 16-byte aligned. */
+ . = ALIGN(16);
+ bss_begin = .;
+ .bss : {
+ *(.bss)
+ *(COMMON)
+ }
+ . = ALIGN(16);
+ bss_end = .;
+ data_size = ABSOLUTE(. - data_begin);
+ . = ALIGN(4096);
+ data_end = .;
+
+ /*
+ * Remove unused sections from the image.
+ */
+ /DISCARD/ : {
+ /* The image loads itself so doesn't need these sections. */
+ /* ld.lld doesn't allow these to be discarded.
+ *(.dynsym)
+ *(.dynstr)
+ *(.dynamic)
+ */
+ *(.gnu.hash)
+ *(.hash)
+ *(.interp)
+ }
+
+ /*
+ * Make note of some useful values.
+ */
+
+ /* Note the first page not used in the image. */
+ . = ALIGN(4096);
+ image_end = .;
+
+ /*
+ * Calculate sizes of the binary file and image loaded into memory as
+ * well as the text, read-only and read-write data sections.
+ */
+ bin_size = ABSOLUTE(bin_end - ORIGIN_ADDRESS);
+ image_size = ABSOLUTE(image_end - ORIGIN_ADDRESS);
+}
diff --git a/build/image/incbin.S b/build/image/incbin.S
new file mode 100644
index 0000000..9a19d87
--- /dev/null
+++ b/build/image/incbin.S
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.section SECTION_NAME, "a"
+.incbin FILE_PATH
diff --git a/build/license.py b/build/license.py
new file mode 100644
index 0000000..ee1b5e9
--- /dev/null
+++ b/build/license.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Add license header to source files.
+
+If the file doesn't have the license header, add it with the appropriate comment
+style.
+"""
+
+import argparse
+import datetime
+import re
+import sys
+
+
+apache2 = """{comment} Copyright {year} The Hafnium Authors.
+{comment}
+{comment} Licensed under the Apache License, Version 2.0 (the "License");
+{comment} you may not use this file except in compliance with the License.
+{comment} You may obtain a copy of the License at
+{comment}
+{comment} https://www.apache.org/licenses/LICENSE-2.0
+{comment}
+{comment} Unless required by applicable law or agreed to in writing, software
+{comment} distributed under the License is distributed on an "AS IS" BASIS,
+{comment} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+{comment} See the License for the specific language governing permissions and
+{comment} limitations under the License."""
+
+def Main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("file")
+ parser.add_argument("--style", choices=["c", "hash"], required=True)
+ args = parser.parse_args()
+ header = "/*\n" if args.style == "c" else ""
+ year = str(datetime.datetime.now().year)
+ header += apache2.format(comment=" *" if args.style == "c" else "#", year=year)
+ header += "\n */" if args.style == "c" else ""
+ header += "\n\n"
+ header_regex = re.escape(header).replace(year, r"\d\d\d\d")
+ with open(args.file, "r") as f:
+ contents = f.read()
+ if re.search(header_regex, contents):
+ return
+ with open(args.file, "w") as f:
+ f.write(header)
+ f.write(contents)
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/build/linux/copy_dirs.py b/build/linux/copy_dirs.py
new file mode 100644
index 0000000..3e8ccf6
--- /dev/null
+++ b/build/linux/copy_dirs.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Copies all files inside one folder to another, preserving subfolders."""
+
+import argparse
+import os
+import shutil
+import sys
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("source_folder",
+ help="directory to be copied from")
+ parser.add_argument("destination_folder",
+ help="directory to be copied into")
+ parser.add_argument("stamp_file",
+ help="stamp file to be touched")
+ args = parser.parse_args()
+
+ # Walk the subfolders of the source directory and copy individual files.
+ # Not using shutil.copytree() because it never overwrites files.
+ for root, _, files in os.walk(args.source_folder):
+ for f in files:
+ abs_src_path = os.path.join(root, f)
+ rel_path = os.path.relpath(abs_src_path, args.source_folder)
+ abs_dst_path = os.path.join(args.destination_folder, rel_path)
+ abs_dst_folder = os.path.dirname(abs_dst_path)
+ if not os.path.isdir(abs_dst_folder):
+ os.makedirs(abs_dst_folder)
+ shutil.copyfile(abs_src_path, abs_dst_path)
+
+ # Touch `stamp_file`.
+ with open(args.stamp_file, "w"):
+ pass
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/build/linux/gen_depfile.py b/build/linux/gen_depfile.py
new file mode 100755
index 0000000..12fbd51
--- /dev/null
+++ b/build/linux/gen_depfile.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Generate a depfile for a folder."""
+
+import argparse
+import os
+import sys
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("root_dir", help="input directory")
+ parser.add_argument("stamp_file", help="stamp file to be touched")
+ parser.add_argument("dep_file", help="depfile to be written")
+ args = parser.parse_args()
+
+ # Compile list of all files in the folder, relative to `root_dir`.
+ sources = []
+ for root, _, files in os.walk(args.root_dir):
+ sources.extend([ os.path.join(root, f) for f in files ])
+ sources = sorted(sources)
+
+ # Write `dep_file` as a Makefile rule for `stamp_file`.
+ with open(args.dep_file, "w") as f:
+ f.write(args.stamp_file)
+ f.write(":")
+ for source in sources:
+ f.write(' ');
+ f.write(source)
+ f.write(os.linesep)
+
+ # Touch `stamp_file`.
+ with open(args.stamp_file, "w"):
+ pass
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/build/linux/linux.gni b/build/linux/linux.gni
new file mode 100644
index 0000000..f76b291
--- /dev/null
+++ b/build/linux/linux.gni
@@ -0,0 +1,186 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+template("source_dir") {
+ action("${target_name}") {
+ depfile = "${target_out_dir}/${target_name}.d"
+ outputs = [
+ "$target_out_dir/${target_name}.script.stamp",
+ ]
+
+ script = "//build/linux/gen_depfile.py"
+ args = [
+ rebase_path(invoker.path, root_build_dir),
+ rebase_path(outputs[0], root_build_dir),
+ rebase_path(depfile, root_build_dir),
+ ]
+ }
+}
+
+template("source_dir_copy") {
+ source_dir_target = "${target_name}__source_dir"
+
+ source_dir(source_dir_target) {
+ path = invoker.path
+ }
+
+ action("${target_name}") {
+ script = "//build/linux/copy_dirs.py"
+ outputs = [
+ "$target_out_dir/${target_name}.script.stamp",
+ ]
+ args = [
+ rebase_path(invoker.path),
+ rebase_path(target_out_dir),
+ rebase_path(outputs[0]),
+ ]
+ deps = [
+ ":${source_dir_target}",
+ ]
+ }
+}
+
+template("linux_kernel") {
+ source_target = "${target_name}__source"
+ defconfig_target = "${target_name}__defconfig"
+ prebuilt_target = "${target_name}__prebuilt"
+
+ kernel_dir = "./"
+
+ # Args to build/make.py to start the Linux build.
+ shared_args = [
+ "--directory",
+ rebase_path(kernel_dir),
+
+ # TODO: Build with toolchain cc instead of a hardcoded one.
+ "CC=" + rebase_path("//prebuilts/linux-x64/clang/bin/clang"),
+ "LD=" +
+ rebase_path("//prebuilts/linux-x64/gcc/bin/aarch64-linux-android-ld"),
+ "AR=" +
+ rebase_path("//prebuilts/linux-x64/gcc/bin/aarch64-linux-android-ar"),
+ "NM=" + rebase_path("//prebuilts/linux-x64/clang/bin/llvm-nm"),
+ "OBJCOPY=" + rebase_path(
+ "//prebuilts/linux-x64/gcc/bin/aarch64-linux-android-objcopy"),
+ "OBJDUMP=" + rebase_path("//prebuilts/linux-x64/clang/bin/llvm-objdump"),
+ "STRIP=" + rebase_path("//prebuilts/linux-x64/clang/bin/llvm-strip"),
+ "ARCH=arm64",
+ "CROSS_COMPILE=aarch64-linux-gnu-",
+
+ # Build out-of-tree in `target_out_dir`.
+ "O=" + rebase_path(target_out_dir),
+
+ # TODO: Remove/replace.
+ "-j24",
+ ]
+
+ # Subtarget which generates a depfile with all files in the Linux tree
+ # and gets invalidated if any of them change.
+ source_dir(source_target) {
+ path = kernel_dir
+ }
+
+ # Subtarget which runs `defconfig` and `modules_prepare`. Used by targets
+ # which do not require the whole kernel to have been built.
+ action(defconfig_target) {
+ script = "//build/make.py"
+ args = shared_args + [
+ "defconfig",
+ "modules_prepare",
+ ]
+
+ # We never use the output but GN requires each target to have one, and for
+ # its timestamp to change after a recompile. Use the .config file.
+ outputs = [
+ "${target_out_dir}/.config",
+ ]
+ deps = [
+ ":${source_target}",
+ ]
+ }
+
+ action(target_name) {
+ script = "//build/make.py"
+ output_file = "${target_out_dir}/${target_name}.bin"
+ args = shared_args + [
+ "--copy_out_file",
+ rebase_path("${target_out_dir}/arch/arm64/boot/Image"),
+ rebase_path(output_file),
+ ]
+ outputs = [
+ output_file,
+ ]
+ deps = [
+ ":${defconfig_target}",
+ ":${source_target}",
+ ]
+ }
+
+ # Subtarget for a prebuilt image, if defined.
+ if (defined(invoker.prebuilt)) {
+ copy(prebuilt_target) {
+ sources = [
+ invoker.prebuilt,
+ ]
+ outputs = [
+ "${target_out_dir}/${prebuilt_target}.bin",
+ ]
+ }
+ }
+}
+
+template("linux_kernel_module") {
+ # Out-of-tree modules cannot be built outside of their directory.
+ # So as to avoid parallel builds under different toolchains clashing,
+ # work around by copying source files to `target_out_dir`.
+
+ source_target = "${target_name}__source"
+
+ module_dir = "./"
+
+ source_dir_copy(source_target) {
+ path = module_dir
+ }
+
+ action(target_name) {
+ forward_variables_from(invoker, [ "testonly" ])
+ script = "//build/make.py"
+ args = [
+ "--directory",
+ rebase_path(target_out_dir),
+ "HAFNIUM_PATH=" + rebase_path("//"),
+ "KERNEL_PATH=" + rebase_path(invoker.kernel_dir),
+ "O=" +
+ rebase_path(get_label_info(invoker.kernel_target, "target_out_dir")),
+ "CC=" + rebase_path("//prebuilts/linux-x64/clang/bin/clang"),
+ "LD=" +
+ rebase_path("//prebuilts/linux-x64/gcc/bin/aarch64-linux-android-ld"),
+ "AR=" +
+ rebase_path("//prebuilts/linux-x64/gcc/bin/aarch64-linux-android-ar"),
+ "NM=" + rebase_path("//prebuilts/linux-x64/clang/bin/llvm-nm"),
+ "OBJCOPY=" + rebase_path(
+ "//prebuilts/linux-x64/gcc/bin/aarch64-linux-android-objcopy"),
+ "OBJDUMP=" + rebase_path("//prebuilts/linux-x64/clang/bin/llvm-objdump"),
+ "STRIP=" + rebase_path("//prebuilts/linux-x64/clang/bin/llvm-strip"),
+ "ARCH=arm64",
+ "CROSS_COMPILE=aarch64-linux-gnu-",
+ ]
+ outputs = [
+ "${target_out_dir}/${invoker.module_name}.ko",
+ ]
+ deps = [
+ ":${source_target}",
+ "${invoker.kernel_target}__defconfig",
+ ]
+ }
+}
diff --git a/build/make.py b/build/make.py
new file mode 100644
index 0000000..15cce9b
--- /dev/null
+++ b/build/make.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Runs make to build a target."""
+
+import argparse
+import os
+import shutil
+import subprocess
+import sys
+
+
+def Main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--directory", required=True)
+ parser.add_argument("--copy_out_file", nargs=2,
+ help="Copy file after building. Takes two params: <src> <dest>")
+ args, make_args = parser.parse_known_args()
+
+ os.chdir(args.directory)
+ os.environ["PWD"] = args.directory
+ status = subprocess.call(["make"] + make_args)
+ if status != 0:
+ return status
+
+ if args.copy_out_file is not None:
+ shutil.copyfile(args.copy_out_file[0], args.copy_out_file[1])
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/build/parse_strace_open.py b/build/parse_strace_open.py
new file mode 100755
index 0000000..a971220
--- /dev/null
+++ b/build/parse_strace_open.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Script which parses the output of `strace` and dumping a list of files
+that were touched by the traced processes outside of whitelisted folders.
+It assumes that strace was invoked with the following arguments:
+ -e trace=%file,chdir,%process record required syscalls
+ -qq silence 'exit code' records
+ -o <file> output format is different when writing
+ to a file from printing to the console
+"""
+
+import argparse
+import os
+import sys
+
+FORK_SYSCALLS = [
+ "clone",
+ "fork",
+ "vfork",
+ ]
+OPEN_SYSCALLS = [
+ "access",
+ "creat",
+ "lstat",
+ "mkdir",
+ "open",
+ "openat",
+ "readlink",
+ "stat",
+ ]
+
+def get_unfinished(line):
+ pos = line.find("<unfinished ...>")
+ if pos < 0:
+ return None
+ else:
+ return line[:pos]
+
+def get_resumed(line):
+ pos = line.find(" resumed>")
+ if pos < 0:
+ return None
+ else:
+ return line[pos + len(" resumed>"):]
+
+def merge_unfinished_lines(lines):
+ """Process input lines and merge those split by an interrupting syscall."""
+ # Lines in the order they were started being written.
+ finished = []
+
+ # Pending unfinished lines. Map from PID to index in `finished`.
+ cursor = {}
+
+ for line in lines:
+ pid = int(line.split()[0])
+
+ resumed = get_resumed(line)
+ if resumed is not None:
+ assert(pid in cursor)
+ unfinished = get_unfinished(resumed)
+ if unfinished is not None:
+ finished[cursor[pid]] += unfinished
+ else:
+ finished[cursor[pid]] += resumed
+ del(cursor[pid])
+ else:
+ assert(pid not in cursor)
+ unfinished = get_unfinished(line)
+ if unfinished is not None:
+ # Line is unfinished. Store its location to `cursor`.
+ cursor[pid] = len(finished)
+ finished += [ unfinished ]
+ else:
+ finished += [ line ]
+ return finished
+
+def abs_path(cwd, path):
+ """If `path` is relative, resolve it against the current working directory.
+ Also normalize the resulting path."""
+ if path[0] != '/':
+ path = os.path.join(cwd, path)
+ path = os.path.abspath(path)
+ # while '//' in path:
+ # path = path.replace('//', '/')
+ path = os.path.realpath(path)
+ return path
+
+def get_touched_files(lines, orig_cwd):
+ """Parse strace output and return all files that an open()-like syscall was
+ called on."""
+ files = set()
+
+ # Map from PID to the current working directory.
+ cwd = {}
+
+ # Map from PID to executable name
+ executable = {}
+
+ # Map from PID to the PID of the process which forked it.
+ fork_of = {}
+
+ first_pid = True
+ for line in lines:
+ # Split line: <pid> <syscall info>
+ line = line.split()
+ pid = int(line[0])
+ syscall = " ".join(line[1:])
+
+ # If seeing a PID for the first time, derive its working directory
+ # from its parent.
+ if pid not in cwd:
+ if first_pid:
+ # Very first line of strace output. Set working directory from
+ # command line arguments (should match cwd of strace).
+ first_pid = False
+ cwd[pid] = orig_cwd
+ else:
+ # There should have been a fork/clone syscall which spawned this
+ # process. Inherit its working directory.
+ cwd[pid] = cwd[fork_of[pid]]
+
+ # We are looking for lines which match:
+ # name(arg1, arg2, ..., argN) = result
+ left_bracket = syscall.find("(")
+ right_bracket = syscall.rfind(")")
+ assign_sign = syscall.rfind("=")
+ if left_bracket < 0 or right_bracket < 0 or assign_sign < right_bracket:
+ continue
+
+ syscall_name = syscall[:left_bracket]
+ syscall_result = syscall[assign_sign+2:]
+
+ syscall_args = syscall[left_bracket+1:right_bracket].split(",")
+ syscall_args = list(map(lambda x: x.strip(), syscall_args))
+
+ if syscall_name in FORK_SYSCALLS:
+ # If this is a fork, keep track of the parent-child relationship.
+ # The child's PID is the syscall's return code.
+ new_pid = int(syscall_result)
+ fork_of[new_pid] = pid
+ executable[new_pid] = executable[pid]
+ elif syscall_name == "chdir":
+ # If this is a change of working directory, keep track of it.
+ # It is in the first argument in quotes.
+ new_dir = syscall_args[0][1:-1]
+ cwd[pid] = abs_path(cwd[pid], new_dir)
+ elif syscall_name == "execve":
+ # If this is executing a new program, record its name.
+ # It is in the first argument in quotes.
+ binary_name = syscall_args[0][1:-1]
+ executable[pid] = binary_name
+ elif syscall_name in OPEN_SYSCALLS:
+ # If this is a syscall touching a file, record the path.
+ # We ignore the result code, i.e. record the path even if the
+ # syscall failed to open it.
+ arg_idx = 0
+ if syscall_name == "openat":
+ # openat() can open a file (second arg) relative to a given
+ # folder (first arg). We only support passing AT_FDCWD, ie.
+ # resolve against the current working directory.
+ arg_idx = 1
+ assert(syscall_args[0] == "AT_FDCWD")
+ fname = abs_path(cwd[pid], syscall_args[arg_idx][1:-1])
+ # Record the file and the name of the program which touched it.
+ files.add((fname, executable[pid]))
+ return files
+
+def filter_results(files, root_dir):
+ """Remove paths which are whitelisted from the results."""
+ # Anything in the Hafnium directory is allowed.
+ files = filter(lambda x: not x[0].startswith(root_dir + "/"), files)
+ # Clang puts intermediate files in /tmp.
+ files = filter(lambda x: not x[0].startswith("/tmp/"), files)
+ return list(files)
+
+def main(args):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("root_dir",
+ help="Root directory of Hafnium, cwd of strace")
+ args, make_args = parser.parse_known_args()
+
+ stdin = map(lambda x: x.strip(), sys.stdin.readlines())
+ stdin = merge_unfinished_lines(stdin)
+ files = get_touched_files(stdin, args.root_dir)
+ files = filter_results(files, args.root_dir)
+ files = sorted(list(files))
+
+ print("\n".join(map(lambda x: "{} ({})".format(x[0], x[1]), files)))
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/build/run_in_container.sh b/build/run_in_container.sh
new file mode 100755
index 0000000..4090c38
--- /dev/null
+++ b/build/run_in_container.sh
@@ -0,0 +1,117 @@
+#!/usr/bin/env bash
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+ROOT_DIR="$(dirname ${SCRIPT_DIR})"
+
+source "${SCRIPT_DIR}/docker/common.inc"
+
+if [ "${HAFNIUM_HERMETIC_BUILD:-}" == "inside" ]
+then
+ echo "ERROR: Invoked $0 recursively" 1>&2
+ exit 1
+fi
+
+# Set up a temp directory and register a cleanup function on exit.
+TMP_DIR="$(mktemp -d)"
+function cleanup() {
+ rm -rf "${TMP_DIR}"
+}
+trap cleanup EXIT
+
+# Build local image and write its hash to a temporary file.
+IID_FILE="${TMP_DIR}/imgid.txt"
+"${DOCKER}" build \
+ --build-arg LOCAL_UID="$(id -u)" \
+ --build-arg LOCAL_GID="$(id -g)" \
+ --iidfile="${IID_FILE}" \
+ -f "${SCRIPT_DIR}/docker/Dockerfile.local" \
+ "${SCRIPT_DIR}/docker"
+IMAGE_ID="$(cat ${IID_FILE})"
+
+# Parse command line arguments
+INTERACTIVE=false
+ALLOW_PTRACE=false
+while true
+do
+ case "${1:-}" in
+ -i)
+ INTERACTIVE=true
+ shift
+ ;;
+ -p)
+ ALLOW_PTRACE=true
+ shift
+ ;;
+ -*)
+ echo "ERROR: Unknown command line flag: $1" 1>&2
+ echo "Usage: $0 [-i] [-p] <command>"
+ exit 1
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+ARGS=()
+# Run with a pseduo-TTY for nicer logging.
+ARGS+=(-t)
+# Run interactive if this script was invoked with '-i'.
+if [ "${INTERACTIVE}" == "true" ]
+then
+ ARGS+=(-i)
+fi
+# Allow ptrace() syscall if invoked with '-p'.
+if [ "${ALLOW_PTRACE}" == "true" ]
+then
+ echo "WARNING: Docker seccomp profile is disabled!" 1>&2
+ ARGS+=(--cap-add=SYS_PTRACE --security-opt seccomp=unconfined)
+fi
+# Propagate "HAFNIUM_*" environment variables.
+# Note: Cannot use `env | while` because the loop would run inside a child
+# process and would not have any effect on variables in the parent.
+while read -r ENV_LINE
+do
+ VAR_NAME="$(echo ${ENV_LINE} | cut -d= -f1)"
+ case "${VAR_NAME}" in
+ HAFNIUM_HERMETIC_BUILD)
+ # Skip this one. It will be overridden below.
+ ;;
+ HAFNIUM_*)
+ ARGS+=(-e "${ENV_LINE}")
+ ;;
+ esac
+done <<< "$(env)"
+# Set environment variable informing the build that we are running inside
+# a container.
+ARGS+=(-e HAFNIUM_HERMETIC_BUILD=inside)
+# Bind-mount the Hafnium root directory. We mount it at the same absolute
+# location so that all paths match across the host and guest.
+ARGS+=(-v "${ROOT_DIR}":"${ROOT_DIR}")
+# Make all files outside of the Hafnium directory read-only to ensure that all
+# generated files are written there.
+ARGS+=(--read-only)
+# Mount a writable /tmp folder. Required by LLVM/Clang for intermediate files.
+ARGS+=(--tmpfs /tmp)
+# Set working directory.
+ARGS+=(-w "${ROOT_DIR}")
+
+echo "Running in container: $*" 1>&2
+${DOCKER} run \
+ ${ARGS[@]} \
+ "${IMAGE_ID}" \
+ /bin/bash -c "$*"
diff --git a/build/strace_open.sh b/build/strace_open.sh
new file mode 100755
index 0000000..9a9f431
--- /dev/null
+++ b/build/strace_open.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -euxo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+SCRIPT_PATH="${SCRIPT_DIR}/$(basename "${BASH_SOURCE[0]}")"
+ROOT_DIR="$(realpath ${SCRIPT_DIR}/..)"
+
+if [ "${HAFNIUM_HERMETIC_BUILD:-}" == "true" ]
+then
+ exec "${ROOT_DIR}/build/run_in_container.sh" -p ${SCRIPT_PATH} $@
+fi
+
+if [ $# != 1 ]
+then
+ echo "Usage: $0 <output_file>" 1>&2
+ exit 1
+fi
+
+MAKE="$(which make)"
+STRACE="$(which strace)"
+
+# Set up a temp directory and register a cleanup function on exit.
+TMP_DIR="$(mktemp -d)"
+function cleanup() {
+ rm -rf "${TMP_DIR}"
+}
+trap cleanup EXIT
+
+STRACE_LOG="${TMP_DIR}/strace.log"
+
+echo "Building with strace"
+pushd ${ROOT_DIR}
+${MAKE} clobber
+${STRACE} \
+ -o "${STRACE_LOG}" \
+ -f \
+ -qq \
+ -e trace=%file,chdir,%process \
+ ${MAKE}
+popd
+
+echo "Processing strace output"
+"${SCRIPT_DIR}/parse_strace_open.py" ${ROOT_DIR} < "${STRACE_LOG}" > "$1"
diff --git a/build/toolchain/BUILD.gn b/build/toolchain/BUILD.gn
new file mode 100644
index 0000000..07a35e2
--- /dev/null
+++ b/build/toolchain/BUILD.gn
@@ -0,0 +1,46 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/toolchain/embedded.gni")
+import("//build/toolchain/host.gni")
+
+host_toolchain("host") {
+ use_platform = false
+}
+
+# Toolchain for building tests which run under Linux under Hafnium.
+embedded_clang_toolchain("aarch64_linux_clang") {
+ target = "aarch64-linux-musleabi"
+
+ # TODO: Remove //inc/system if we can stop using the version of stdatomic.h
+ # from the Android prebuilt Clang.
+ extra_cflags =
+ "-nostdinc -isystem" +
+ rebase_path("//prebuilts/linux-aarch64/musl/include") + " -isystem" +
+ rebase_path("//prebuilts/linux-x64/clang/lib64/clang/9.0.3/include") +
+ " -isystem" + rebase_path("//inc/system")
+ extra_defines = "-D_LIBCPP_HAS_MUSL_LIBC=1 -D_GNU_SOURCE=1"
+ extra_ldflags = "-no-pie -lc --library-path=" +
+ rebase_path("//prebuilts/linux-aarch64/musl/lib/") + " " +
+ rebase_path("//prebuilts/linux-aarch64/musl/lib/crt1.o") +
+ " " + rebase_path(
+ "//prebuilts/linux-x64/clang/lib64/clang/9.0.3/lib/linux/libclang_rt.builtins-aarch64-android.a")
+ toolchain_args = {
+ use_platform = true
+ plat_arch = "fake"
+ plat_boot_flow = "//src/arch/fake:boot_flow"
+ plat_console = "//src/arch/fake:console"
+ plat_iommu = "//src/iommu:absent"
+ }
+}
diff --git a/build/toolchain/embedded.gni b/build/toolchain/embedded.gni
new file mode 100644
index 0000000..fa4aeae
--- /dev/null
+++ b/build/toolchain/embedded.gni
@@ -0,0 +1,356 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+declare_args() {
+ # Set by arch toolchain. Prefix for binutils tools.
+ tool_prefix = ""
+
+ # Enable link time optimizations
+ use_lto = true
+}
+
+# Template for embedded toolchains; there is no support for C++ or libraries.
+# Instead, use source_set to group source together.
+template("embedded_cc_toolchain") {
+ toolchain(target_name) {
+ assert(defined(invoker.cc), "\"cc\" must be defined for ${target_name}.")
+ assert(defined(invoker.ld), "\"ld\" must be defined for ${target_name}.")
+
+ # Collect extra flags from the toolchain.
+ extra_defines = ""
+ extra_cflags = "-ffunction-sections -fdata-sections"
+ if (use_lto) {
+ extra_cflags += " -flto"
+ }
+ extra_ldflags = "-pie --gc-sections"
+
+ if (defined(invoker.extra_defines)) {
+ extra_defines += " ${invoker.extra_defines}"
+ }
+ if (defined(invoker.extra_cflags)) {
+ extra_cflags += " ${invoker.extra_cflags}"
+ }
+ if (defined(invoker.extra_ldflags)) {
+ extra_ldflags += " ${invoker.extra_ldflags}"
+ }
+
+ # Define the tools.
+ tool("cc") {
+ depfile = "{{output}}.d"
+ command = "${invoker.cc} -MMD -MF $depfile ${extra_defines} {{defines}} {{include_dirs}} ${extra_cflags} {{cflags}} {{cflags_c}} -c {{source}} -o {{output}}"
+ depsformat = "gcc"
+ description = "CC {{output}}"
+ outputs = [
+ "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o",
+ ]
+ }
+
+ tool("asm") {
+ depfile = "{{output}}.d"
+ command = "${invoker.cc} -MMD -MF $depfile ${extra_defines} {{defines}} {{include_dirs}} ${extra_cflags} {{asmflags}} -c {{source}} -o {{output}}"
+ depsformat = "gcc"
+ description = "ASM {{output}}"
+ outputs = [
+ "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o",
+ ]
+ }
+
+ tool("link") {
+ outfile = "{{output_dir}}/{{target_output_name}}{{output_extension}}"
+ rspfile = "$outfile.rsp"
+ command = "${invoker.ld} ${extra_ldflags} {{ldflags}} -o $outfile --start-group @$rspfile --end-group"
+ description = "LINK $outfile"
+ default_output_dir = "{{root_out_dir}}"
+ rspfile_content = "{{inputs}}"
+ outputs = [
+ outfile,
+ ]
+ }
+
+ tool("alink") {
+ outfile = "{{target_out_dir}}/{{target_output_name}}.a"
+ command = "rm -f $outfile && ${invoker.ar} -rc $outfile {{inputs}}"
+ description = "ALINK $outfile"
+ outputs = [
+ outfile,
+ ]
+ }
+
+ tool("stamp") {
+ command = "touch {{output}}"
+ description = "STAMP {{output}}"
+ }
+
+ tool("copy") {
+ command = "cp -af {{source}} {{output}}"
+ description = "COPY {{source}} {{output}}"
+ }
+
+ toolchain_args = {
+ forward_variables_from(invoker.toolchain_args, "*")
+ }
+ }
+}
+
+# Specialize for clang.
+template("embedded_clang_toolchain") {
+ assert(defined(invoker.target),
+ "\"target\" must be defined for ${target_name}.")
+ assert(defined(invoker.extra_defines),
+ "\"extra_defines\" must be defined for ${target_name}")
+ assert(defined(invoker.extra_cflags),
+ "\"extra_cflags\" must be defined for ${target_name}")
+ assert(defined(invoker.extra_ldflags),
+ "\"extra_ldflags\" must be defined for ${target_name}")
+
+ embedded_cc_toolchain(target_name) {
+ cc = "clang"
+ ld = "ld.lld"
+ ar = "llvm-ar"
+
+ forward_variables_from(invoker,
+ [
+ "extra_defines",
+ "extra_cflags",
+ "extra_ldflags",
+ ])
+
+ # TODO: Remove //inc/system if we can stop using the version of stdatomic.h
+ # from the Android prebuilt Clang.
+ extra_cflags +=
+ " -target ${invoker.target} -fcolor-diagnostics -nostdinc -isystem" +
+ rebase_path("//prebuilts/linux-x64/clang/lib64/clang/9.0.3/include") +
+ " -isystem" + rebase_path("//inc/system")
+ extra_ldflags +=
+ " -O2 -lto-O2 --icf=all --fatal-warnings --color-diagnostics"
+
+ toolchain_args = {
+ if (defined(invoker.toolchain_args)) {
+ forward_variables_from(invoker.toolchain_args, "*")
+ }
+ }
+ }
+}
+
+# Specialize for mixed toolchain with clang and bfd linker.
+template("embedded_clang_bfd_toolchain") {
+ assert(defined(invoker.target),
+ "\"target\" must be defined for ${target_name}.")
+ assert(defined(invoker.tool_prefix),
+ "\"tool_prefix\" must be defined for ${target_name}.")
+ assert(defined(invoker.extra_defines),
+ "\"extra_defines\" must be defined for ${target_name}")
+ assert(defined(invoker.extra_cflags),
+ "\"extra_cflags\" must be defined for ${target_name}")
+ assert(defined(invoker.extra_ldflags),
+ "\"extra_ldflags\" must be defined for ${target_name}")
+
+ embedded_cc_toolchain(target_name) {
+ cc = "clang"
+ ld = "${invoker.tool_prefix}ld.bfd"
+ ar = "llvm-ar"
+
+ forward_variables_from(invoker,
+ [
+ "extra_defines",
+ "extra_cflags",
+ "extra_ldflags",
+ ])
+ extra_cflags += " -target ${invoker.target} -fcolor-diagnostics"
+ extra_ldflags += " --fatal-warnings"
+ if (use_lto) {
+ extra_ldflags += " -O2 -lto-O2 --icf=all"
+ }
+
+ toolchain_args = {
+ if (defined(invoker.toolchain_args)) {
+ forward_variables_from(invoker.toolchain_args, "*")
+ }
+ }
+ }
+}
+
+# Expand to clang variants.
+template("embedded_platform_toolchain") {
+ assert(defined(invoker.arch), "\"arch\" must be defined for ${target_name}.")
+ assert(defined(invoker.target),
+ "\"target\" must be defined for ${target_name}.")
+ assert(defined(invoker.tool_prefix),
+ "\"tool_prefix\" must be defined for ${target_name}.")
+ assert(defined(invoker.origin_address),
+ "\"origin_address\" must be defined for ${target_name}.")
+ assert(defined(invoker.heap_pages),
+ "\"heap_pages\" must be defined for ${target_name}.")
+ assert(defined(invoker.max_cpus),
+ "\"max_cpus\" must be defined for ${target_name}.")
+ assert(defined(invoker.max_vms),
+ "\"max_vms\" must be defined for ${target_name}.")
+ assert(defined(invoker.platform_name),
+ "\"platform_name\" must be defined for ${target_name}.")
+
+ extra_defines = ""
+ extra_cflags = "-fno-builtin -ffreestanding -fpic"
+ extra_ldflags = "--defsym=ORIGIN_ADDRESS=${invoker.origin_address}"
+ if (defined(invoker.extra_defines)) {
+ extra_defines += " ${invoker.extra_defines}"
+ }
+ if (defined(invoker.extra_cflags)) {
+ extra_cflags += " ${invoker.extra_cflags}"
+ }
+ if (defined(invoker.extra_ldflags)) {
+ extra_ldflags += " ${invoker.extra_ldflags}"
+ }
+ toolchain_args = {
+ use_platform = true
+ plat_name = invoker.platform_name
+ plat_arch = invoker.arch
+ plat_heap_pages = invoker.heap_pages
+ plat_max_cpus = invoker.max_cpus
+ plat_max_vms = invoker.max_vms
+ if (defined(invoker.toolchain_args)) {
+ forward_variables_from(invoker.toolchain_args, "*")
+ }
+ }
+
+ embedded_clang_toolchain("${target_name}_clang") {
+ target = invoker.target
+ }
+
+ embedded_clang_bfd_toolchain("${target_name}_clang_bfd") {
+ target = invoker.target
+ tool_prefix = invoker.tool_prefix
+ }
+}
+
+# Specialize for different architectures.
+
+template("aarch64_common_toolchain") {
+ assert(defined(invoker.cpu), "\"cpu\" must be defined for ${target_name}.")
+ assert(defined(invoker.target),
+ "\"target\" must be defined for ${target_name}")
+ assert(defined(invoker.tool_prefix),
+ "\"tool_prefix\" must be defined for ${target_name}")
+ assert(defined(invoker.origin_address),
+ "\"origin_address\" must be defined for ${target_name}.")
+ assert(defined(invoker.console),
+ "\"console\" must be defined for ${target_name}.")
+ assert(defined(invoker.heap_pages),
+ "\"heap_pages\" must be defined for ${target_name}.")
+ assert(defined(invoker.max_cpus),
+ "\"max_cpus\" must be defined for ${target_name}.")
+ assert(defined(invoker.max_vms),
+ "\"max_vms\" must be defined for ${target_name}.")
+ if (invoker.gic_version == 3 || invoker.gic_version == 4) {
+ assert(defined(invoker.gicd_base_address),
+ "\"gicd_base_address\" must be defined for ${target_name}.")
+ assert(defined(invoker.gicr_base_address),
+ "\"gicr_base_address\" must be defined for ${target_name}.")
+ }
+ assert(defined(invoker.platform_name),
+ "\"platform_name\" must be defined for ${target_name}.")
+
+ embedded_platform_toolchain(target_name) {
+ forward_variables_from(invoker,
+ [
+ "origin_address",
+ "heap_pages",
+ "max_cpus",
+ "max_vms",
+ "platform_name",
+ "extra_ldflags",
+ ])
+ arch = "aarch64"
+ target = invoker.target
+ tool_prefix = invoker.tool_prefix
+ extra_cflags = "-mcpu=${invoker.cpu} -mstrict-align"
+ if (defined(invoker.extra_cflags)) {
+ extra_cflags += " ${invoker.extra_cflags}"
+ }
+
+ extra_defines = ""
+ if (defined(invoker.extra_defines)) {
+ extra_defines += " ${invoker.extra_defines}"
+ }
+
+ if (invoker.gic_version > 0) {
+ extra_defines += " -DGIC_VERSION=${invoker.gic_version}"
+ }
+ if (invoker.gic_version == 3 || invoker.gic_version == 4) {
+ extra_defines += " -DGICD_BASE=${invoker.gicd_base_address} -DGICR_BASE=${invoker.gicr_base_address}"
+ }
+
+ toolchain_args = {
+ plat_boot_flow = invoker.boot_flow
+ plat_console = invoker.console
+ plat_iommu = invoker.iommu
+ forward_variables_from(invoker.toolchain_args, "*")
+ }
+ }
+}
+
+template("aarch64_toolchain") {
+ aarch64_common_toolchain("${target_name}") {
+ forward_variables_from(invoker, "*")
+ target = "aarch64-none-eabi"
+ tool_prefix = "aarch64-linux-gnu-" # TODO: this isn't right for bare metal but it works.
+ platform_name = target_name
+ }
+}
+
+template("aarch64_toolchains") {
+ aarch64_toolchain("${target_name}") {
+ forward_variables_from(invoker,
+ [
+ "origin_address",
+ "boot_flow",
+ "console",
+ "iommu",
+ "gic_version",
+ "gicd_base_address",
+ "gicr_base_address",
+ "heap_pages",
+ "max_cpus",
+ "max_vms",
+ "toolchain_args",
+ ])
+ cpu = "${invoker.cpu}+nofp"
+
+ # Add a macro so files can tell whether they are not being built for a VM.
+ extra_defines = " -DVM_TOOLCHAIN=0"
+ }
+
+ # Toolchain for building test VMs which run under Hafnium.
+ aarch64_toolchain("${target_name}_vm") {
+ forward_variables_from(invoker,
+ [
+ "origin_address",
+ "gic_version",
+ "gicd_base_address",
+ "gicr_base_address",
+ "max_cpus",
+ "toolchain_args",
+ ])
+ cpu = "${invoker.cpu}+fp"
+ boot_flow = "//src/arch/fake:boot_flow"
+ console = "//src/arch/aarch64/hftest:console"
+ iommu = "//src/iommu:absent"
+
+ # Nonsense values because they are required but shouldn't be used.
+ heap_pages = 0
+ max_vms = 0
+
+ # Add a macro so files can tell whether they are being built for a VM.
+ extra_defines = " -DVM_TOOLCHAIN=1"
+ }
+}
diff --git a/build/toolchain/gen_offset_size_header.py b/build/toolchain/gen_offset_size_header.py
new file mode 100755
index 0000000..72c4c93
--- /dev/null
+++ b/build/toolchain/gen_offset_size_header.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Generate a header file with definitions of constants parsed from a binary."""
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+
+HF_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
+BINUTILS_ROOT = os.path.join(HF_ROOT, "prebuilts", "linux-x64", "gcc", "bin")
+STRINGS = os.path.join(BINUTILS_ROOT, "aarch64-linux-android-strings")
+
+PROLOGUE = """
+/**
+ * This file was auto-generated by {}.
+ * Changes will be overwritten.
+ */
+
+#pragma once
+
+""".format(__file__)
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("bin_file",
+ help="binary file to be parsed for definitions of constants")
+ parser.add_argument("out_file", help="output file");
+ args = parser.parse_args()
+
+ # Regex for finding definitions: <HAFNIUM_DEFINE name #value />
+ regex = re.compile(r'<HAFNIUM_DEFINE\s([A-Za-z0-9_]+)\s#([0-9]+) />')
+
+ # Extract strings from the input binary file.
+ stdout = subprocess.check_output([ STRINGS, args.bin_file ])
+ stdout = str(stdout).split(os.linesep)
+
+ with open(args.out_file, "w") as f:
+ f.write(PROLOGUE)
+ for line in stdout:
+ for match in regex.findall(line):
+ f.write("#define {} ({})\n".format(
+ match[0], match[1]))
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/build/toolchain/host.gni b/build/toolchain/host.gni
new file mode 100644
index 0000000..feffa11
--- /dev/null
+++ b/build/toolchain/host.gni
@@ -0,0 +1,162 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Template for host toolchains.
+template("host_cc_toolchain") {
+ toolchain(target_name) {
+ assert(defined(invoker.ar), "\"ar\" must be defined for ${target_name}.")
+ assert(defined(invoker.cc), "\"cc\" must be defined for ${target_name}.")
+ assert(defined(invoker.cxx), "\"cxx\" must be defined for ${target_name}.")
+
+ # Collect extra flags from the toolchain.
+ extra_defines = ""
+ extra_cflags = ""
+ extra_ldflags = ""
+ if (defined(invoker.extra_defines)) {
+ extra_defines += " ${invoker.extra_defines}"
+ }
+ if (defined(invoker.extra_cflags)) {
+ extra_cflags += " ${invoker.extra_cflags}"
+ }
+ if (defined(invoker.extra_ldflags)) {
+ extra_ldflags += " ${invoker.extra_ldflags}"
+ }
+
+ tool("cc") {
+ depfile = "{{output}}.d"
+ command = "${invoker.cc} -MMD -MF $depfile ${extra_defines} {{defines}} {{include_dirs}} ${extra_cflags} {{cflags}} {{cflags_c}} -c {{source}} -o {{output}}"
+ depsformat = "gcc"
+ description = "CC {{output}}"
+ outputs = [
+ "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o",
+ ]
+ }
+
+ tool("cxx") {
+ depfile = "{{output}}.d"
+ command = "${invoker.cxx} -MMD -MF $depfile ${extra_defines} {{defines}} {{include_dirs}} ${extra_cflags} {{cflags}} {{cflags_cc}} -c {{source}} -o {{output}}"
+ depsformat = "gcc"
+ description = "CXX {{output}}"
+ outputs = [
+ "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o",
+ ]
+ }
+
+ tool("alink") {
+ rspfile = "{{output}}.rsp"
+ command = "rm -f {{output}} && ${invoker.ar} rcs {{output}} @$rspfile"
+ description = "AR {{target_output_name}}{{output_extension}}"
+ rspfile_content = "{{inputs}}"
+ outputs = [
+ "{{target_out_dir}}/{{target_output_name}}{{output_extension}}",
+ ]
+ default_output_extension = ".a"
+ output_prefix = "lib"
+ }
+
+ tool("solink") {
+ soname = "{{target_output_name}}{{output_extension}}" # e.g. "libfoo.so".
+ sofile = "{{output_dir}}/$soname"
+ rspfile = soname + ".rsp"
+
+ command = "${invoker.cxx} -shared ${extra_ldflags} {{ldflags}} -o $sofile -Wl,-soname=$soname @$rspfile"
+ rspfile_content = "-Wl,--whole-archive {{inputs}} {{solibs}} -Wl,--no-whole-archive {{libs}}"
+
+ description = "SOLINK $soname"
+
+ # Use this for {{output_extension}} expansions unless a target manually
+ # overrides it (in which case {{output_extension}} will be what the target
+ # specifies).
+ default_output_extension = ".so"
+
+ # Use this for {{output_dir}} expansions unless a target manually overrides
+ # it (in which case {{output_dir}} will be what the target specifies).
+ default_output_dir = "{{root_out_dir}}"
+
+ outputs = [
+ sofile,
+ ]
+ link_output = sofile
+ depend_output = sofile
+ output_prefix = "lib"
+ }
+
+ tool("link") {
+ outfile = "{{output_dir}}/{{target_output_name}}{{output_extension}}"
+ rspfile = "$outfile.rsp"
+ command = "${invoker.cxx} ${extra_ldflags} {{ldflags}} -o $outfile -Wl,--start-group @$rspfile {{solibs}} -Wl,--end-group {{libs}}"
+ description = "LINK $outfile"
+ default_output_dir = "{{root_out_dir}}"
+ rspfile_content = "{{inputs}}"
+ outputs = [
+ outfile,
+ ]
+ }
+
+ tool("stamp") {
+ command = "touch {{output}}"
+ description = "STAMP {{output}}"
+ }
+
+ tool("copy") {
+ command = "cp -af {{source}} {{output}}"
+ description = "COPY {{source}} {{output}}"
+ }
+
+ if (defined(invoker.toolchain_args)) {
+ toolchain_args = {
+ forward_variables_from(invoker.toolchain_args, "*")
+ }
+ }
+ }
+}
+
+template("host_toolchain") {
+ assert(defined(invoker.use_platform),
+ "\"use_platform\" must be defined for ${target_name}.")
+ if (invoker.use_platform) {
+ assert(defined(invoker.heap_pages),
+ "\"heap_pages\" must be defined for ${target_name}.")
+ assert(defined(invoker.max_cpus),
+ "\"max_cpus\" must be defined for ${target_name}.")
+ assert(defined(invoker.max_vms),
+ "\"max_vms\" must be defined for ${target_name}.")
+ }
+
+ # Specialize for clang.
+ host_cc_toolchain("${target_name}_clang") {
+ ar = "llvm-ar"
+ cc = "clang -fcolor-diagnostics"
+ cxx = "clang++ -fcolor-diagnostics -stdlib=libc++"
+
+ # TODO: remove the need for this
+ extra_defines = "-DPL011_BASE=0"
+
+ if (invoker.use_platform) {
+ toolchain_args = {
+ use_platform = true
+
+ # When building for the ${target_name}, use the fake architecture to make things
+ # testable.
+ plat_arch = "fake"
+ plat_boot_flow = "//src/arch/fake:boot_flow"
+ plat_console = "//src/arch/fake:console"
+ plat_iommu = "//src/iommu:absent"
+ plat_heap_pages = invoker.heap_pages
+ plat_max_cpus = invoker.max_cpus
+ plat_max_vms = invoker.max_vms
+ }
+ }
+ }
+}
diff --git a/build/toolchain/offset_size_header.gni b/build/toolchain/offset_size_header.gni
new file mode 100644
index 0000000..b409284
--- /dev/null
+++ b/build/toolchain/offset_size_header.gni
@@ -0,0 +1,84 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This template auto-generate a C header file with "#define" constants, e.g.
+# struct sizes and member offsets.
+#
+# It uses a trick similar to other projects, e.g. Linux, where the integer
+# constant is used as an immediate in an inline assembly block. The source file
+# is compiled and the constant extracted by a script which generates the header
+# file. For easy grep-ing, the constant is compiled into a '.ascii' string,
+# surrounded by magic strings, and extracted using the 'strings' binutils tool.
+#
+# To guarantee correctness, the same source file is compiled again as part
+# of the parent target but this time the declarations are converted to
+# static_asserts to check the values at its compile-time.
+template("offset_size_header") {
+ target_lib = "${target_name}__lib"
+ target_header = "${target_name}__header"
+
+ # Compile source files into binaries that contain strings with definitions
+ # of constants.
+ static_library(target_lib) {
+ forward_variables_from(invoker,
+ [
+ "sources",
+ "deps",
+ "test_only",
+ ])
+ defines = [ "GENERATE_BINARY" ]
+
+ # Disable LTO to force emitting assembly.
+ cflags = [ "-fno-lto" ]
+ }
+
+ # Extract strings from the static library, parse definitions and generate
+ # a header file.
+ action(target_header) {
+ forward_variables_from(invoker, [ "test_only" ])
+ lib_file = "${target_out_dir}/${target_lib}.a"
+ out_file = "${root_gen_dir}/offset_size_header/${invoker.path}"
+
+ script = "//build/toolchain/gen_offset_size_header.py"
+ args = [
+ rebase_path(lib_file, root_build_dir),
+ rebase_path(out_file, root_build_dir),
+ ]
+ deps = [
+ ":$target_lib",
+ ]
+ outputs = [
+ out_file,
+ ]
+ }
+
+ # This source_set will be compiled into the target that depends on this one.
+ # This generates static_asserts which check the constants in the generated
+ # header against compile-time structs.
+ source_set(target_name) {
+ forward_variables_from(invoker,
+ [
+ "sources",
+ "test_only",
+ ])
+ cflags = [
+ "-include",
+ invoker.path,
+ ]
+ defines = [ "VERIFY_HEADER" ]
+ deps = [
+ ":$target_header",
+ ]
+ }
+}
diff --git a/build/toolchain/platform.gni b/build/toolchain/platform.gni
new file mode 100644
index 0000000..7ff4b5a
--- /dev/null
+++ b/build/toolchain/platform.gni
@@ -0,0 +1,40 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Configuration of the build for the platform.
+declare_args() {
+ # The name of the platform.
+ plat_name = ""
+
+ # The architecture of the platform.
+ plat_arch = ""
+
+ # Boot flow driver to be used by the platform, specified as build target.
+ plat_boot_flow = ""
+
+ # Console driver to be used for the platform, specified as build target.
+ plat_console = ""
+
+ # IOMMU driver to be used for the platform, specified as build target.
+ plat_iommu = ""
+
+ # The number of pages to allocate for the hypervisor heap.
+ plat_heap_pages = 0
+
+ # The maximum number of CPUs available on the platform.
+ plat_max_cpus = 1
+
+ # The maximum number of VMs required for the platform.
+ plat_max_vms = 0
+}
diff --git a/docs/Architecture.md b/docs/Architecture.md
new file mode 100644
index 0000000..e0261d8
--- /dev/null
+++ b/docs/Architecture.md
@@ -0,0 +1,199 @@
+# Hafnium architecture
+
+The purpose of Hafnium is to provide memory isolation between a set of security
+domains, to better separate untrusted code from security-critical code. It is
+implemented as a type-1 hypervisor, where each security domain is a VM.
+
+On AArch64 (currently the only supported architecture) it runs at EL2, while the
+VMs it manages run at EL1 (and user space applications within those VMs at EL0).
+A Secure Monitor such as
+[Trusted Firmware-A](https://www.trustedfirmware.org/about/) runs underneath it
+at EL3.
+
+Hafnium provides memory isolation between these VMs by managing their stage 2
+page tables, and using IOMMUs to restrict how DMA devices can be used to access
+memory. It must also prevent them from accessing system resources in a way which
+would allow them to escape this containment. It also provides:
+
+* Means for VMs to communicate with each other through message passing and
+ memory sharing, according to the Arm
+ [Secure Partition Communication Interface (SPCI)](https://developer.arm.com/docs/den0077/a).
+* Emulation of some basic hardware features such as timers.
+* A simple paravirtualised interrupt controller for secondary VMs, as they
+ don't have access to hardware interrupts.
+* A simple logging API for bringup and low-level debugging of VMs.
+
+See the [VM interface](VmInterface.md) documentation for more details.
+
+Hafnium makes a distinction between a **primary VM**, which would typically run
+the main user-facing operating system such as Android, and a number of
+**secondary VMs** which are smaller and exist to provide various services to the
+primary VM. The primary VM typically owns the majority of the system resources,
+and is likely to be more latency-sensitive as it is running user-facing tasks.
+Some of the differences between primary and secondary VMs are explained below.
+
+[TOC]
+
+## Security model
+
+Hafnium runs a set of VMs without trusting any of them. Neither do the VMs trust
+each other. Hafnium aims to prevent malicious software running in one VM from
+compromising any of the other VMs. Specifically, we guarantee
+**confidentiality** and **memory integrity** of each VM: no other VM should be
+able to read or modify the memory that belongs to a VM without that VM's
+consent.
+
+We do not make any guarantees of **availability** of VMs, except for the primary
+VM. In other words, a compromised primary VM may prevent secondary VMs from
+running, but not gain unauthorised access to their memory. A compromised
+secondary VM should not be able to prevent the primary VM or other secondary VMs
+from running.
+
+## Design principles
+
+Hafnium is designed with the following principles in mind:
+
+* Open design
+ * Hafnium is developed as open source, available for all to use,
+ contribute and scrutinise.
+* Economy of mechanism
+ * Hafnium strives to be as small and simple of possible, to reduce the
+ attack surface.
+ * This also makes Hafnium more amenable to formal verification.
+* Least privilege
+ * Each VM is a separate security domain and is given access only to what
+ it needs, to reduce the impact if it is compromised.
+ * Everything that doesn't strictly need to be part of Hafnium itself (in
+ EL2) should be moved to a VM (in EL1).
+* Defence in depth
+ * Hafnium provides an extra layer of security isolation on top of those
+ provided by the OS kernel, to better isolate sensitive workloads from
+ untrusted code.
+
+## VM model
+
+A [VM](../inc/hf/vm.h) in Hafnium consists of:
+
+* A set of memory pages owned by and/or available to the VM, stored in the
+ stage 2 page table managed by Hafnium.
+* One or more vCPUs. (The primary VM always has the same number of vCPUs as
+ the system has physical CPUs; secondary VMs have a configurable number.)
+* A one page TX buffer used for sending messages to other VMs.
+* A one page RX buffer used for receiving messages from other VMs.
+* Some configuration information (VM ID, whitelist of allowed SMCs).
+* Some internal state maintained by Hafnium (locks, mailbox wait lists,
+ mailbox state, log buffer).
+
+Each [vCPU](../inc/hf/vcpu.h) also has:
+
+* A set of saved registers, for when it isn't being run on a physical CPU.
+* A current state (switched off, ready to run, running, waiting for a message
+ or interrupt, aborted).
+* A set of virtual interrupts which may be enabled and/or pending.
+* Some internal locking state.
+
+VMs and their vCPUs are configured statically from a [manifest](Manifest.md)
+read at boot time. There is no way to create or destroy VMs at run time.
+
+## System resources
+
+### CPU
+
+Unlike many other type-1 hypervisors, Hafnium does not include a scheduler.
+Instead, we rely on the primary VM to handle scheduling, calling Hafnium when it
+wants to run a secondary VM's vCPU. This is because:
+
+* In line with our design principles of _economy of mechanism_ and _least
+ privilege_, we prefer to avoid complexity in Hafnium and instead rely on VMs
+ to handle complex tasks.
+* According to our security model, we don't guarantee availability of
+ secondary VMs, so it is acceptable for a compromised primary VM to deny CPU
+ time to secondary VMs.
+* A lot of effort has been put into making the Linux scheduler work well to
+ maintain a responsive user experience without jank, manage power
+ efficiently, and handle heterogeneous CPU architectures such as big.LITTLE.
+ We would rather avoid re-implementing this.
+
+Hafnium therefore maintains a 1:1 mapping of physical CPUs to vCPUs for the
+primary VM, and allows the primary VM to control the power state of physical
+CPUs directly through the standard Arm Power State Coordination Interface
+(PSCI). The primary VM should then create kernel threads for each secondary VM
+vCPU and schedule them to run the vCPUs according to the
+[interface expectations defined by Hafnium](SchedulerExpectations.md). PSCI
+calls made by secondary VMs are handled by Hafnium, to change the state of the
+VM's vCPUs. In the case of (Android) Linux running in the primary VM this is
+handled by the Hafnium kernel module.
+
+#### Example
+
+For example, considering a simple system with a single physical CPU, and a
+single secondary VM with one vCPU, where the primary VM kernel has created
+**thread 1** to run the secondary VM's vCPU while **thread 2** is some other
+normal thread:
+
+![scheduler example sequence diagram](scheduler.png)
+
+1. Scheduler chooses thread 1 to run.
+2. Scheduler runs thread 1, and configures a physical timer to expire once the
+ quantum runs out.
+3. Thread 1 is responsible for running a vCPU, so it asks Hafnium to run it.
+4. Hafnium switches to the secondary VM vCPU.
+5. Eventually the quantum runs out and the physical timer interrupts the CPU.
+6. Hafnium traps the interrupt. Physical interrupts are owned by the primary
+ VM, so it switches back to the primary VM.
+7. The interrupt handler in the primary VM gets invoked, and calls the
+ scheduler.
+8. Scheduler chooses a different thread to run (thread 2).
+9. Scheduler runs thread 2.
+
+### Memory
+
+At boot time each VM owns a mutually exclusive subset of memory pages, as
+configured by the [manifest](Manifest.md). These pages are all identity mapped
+in the stage 2 page table which Hafnium manages for the VM, so that it has full
+access to use them however it wishes.
+
+Hafnium maintains state of which VM **owns** each page, and which VMs have
+**access** to it. It does this using the stage 2 page tables of the VMs, with
+some extra application-defined bits in the page table entries. A VM may share,
+lend or donate memory pages to another VM using the appropriate SPCI requests. A
+given page of memory may never be shared with more than two VMs, either in terms
+of ownership or access. Thus, the following states are possible for each page,
+for some values of X and Y:
+
+* Owned by VM X, accessible only by VM X
+ * This is the initial state for each page, and also the state of a page
+ that has been donated.
+* Owned by VM X, accessible only by VM Y
+ * This state is reached when a page is lent.
+* Owned by VM X, accessible by VMs X and Y
+ * This state is reached when a page is shared.
+
+For now, in the interests of simplicity, Hafnium always uses identity mapping in
+all page tables it manages (stage 2 page tables for VMs, and stage 1 for itself)
+– i.e. the IPA (intermediate physical address) is always equal to the PA
+(physical address) in the stage 2 page table, if it is mapped at all.
+
+### Devices
+
+From Hafnium's point of view a device consists of:
+
+* An MMIO address range (i.e. a set of pages).
+* A set of interrupts that the device may generate.
+* Some IOMMU configuration associated with the device.
+
+For now, each device is associated with exactly one VM, which is statically
+assigned at boot time (through the manifest) and cannot be changed at runtime.
+
+Hafnium is responsible for mapping the device's MMIO pages into the owning VM's
+stage 2 page table with the appropriate attributes, and for configuring the
+IOMMU so that the device can only access the memory that is accessible by its
+owning VM. This needs to be kept in sync as the VM's memory access changes with
+memory sharing operations. Hafnium may also need to re-initialise the IOMMU if
+the device is powered off and powered on again.
+
+The primary VM is responsible for forwarding interrupts to the owning VM, in
+case the device is owned by a secondary VM. This does mean that a compromised
+primary VM may choose not to forward interrupts, or to inject spurious
+interrupts, but this is consistent with our security model that secondary VMs
+are not guaranteed any level of availability.
diff --git a/docs/CodeStructure.md b/docs/CodeStructure.md
new file mode 100644
index 0000000..b3fa8d6
--- /dev/null
+++ b/docs/CodeStructure.md
@@ -0,0 +1,75 @@
+# Code structure
+
+The Hafnium repository contains Hafnium itself, along with unit tests and
+integration tests, a small client library for VMs, a Linux kernel module for the
+primary VM, prebuilt binaries of tools needed for building it and running tests.
+Everything is built with [GN](https://gn.googlesource.com/gn/).
+
+Hafnium can be built for an **architecture**, currently including:
+
+* `aarch64`: 64-bit Armv8
+* `fake`: A dummy architecture used for running unit tests on the host system.
+
+And for a **platform**, such as:
+
+* `aem_v8a_fvp`: The Arm [Fixed Virtual Platform](FVP.md) emulator.
+* `qemu_aarch64`: QEMU emulating an AArch64 device.
+* `rpi4`: A Raspberry Pi 4 board.
+
+Each platform has a single associated architecture.
+
+The source tree is organised as follows:
+
+* [`build`](../build): Common GN configuration, build scripts, and linker
+ script.
+* [`docs`](.): Documentation
+* [`driver/linux`](../driver/linux): Linux kernel driver for Hafnium, for use
+ in the primary VM.
+* [`inc`](../inc): Header files...
+ * [`hf`](../inc/hf): ... internal to Hafnium
+ * [`arch`](../inc/hf/arch): Architecture-dependent modules, which have
+ a common interface but separate implementations per architecture.
+ This includes details of CPU initialisation, exception handling,
+ timers, page table management, and other system registers.
+ * [`plat`](../inc/hf/plat): Platform-dependent modules, which have a
+ common interface but separate implementations per platform. This
+ includes details of the boot flow, and a UART driver for the debug
+ log console.
+ * [`system`](../inc/system): ... which are included by the `stdatomic.h`
+ which we use from Android Clang but not really needed, so we use dummy
+ empty versions.
+ * [`vmapi/hf`](../inc/vmapi/hf): ... for the interface exposed to VMs.
+* [`kokoro`](../kokoro): Scripts and configuration for continuous integration
+ and presubmit checks.
+* [`prebuilts`](../prebuilts): Prebuilt binaries needed for building Hafnium
+ or running tests.
+* [`project`](../project): Configuration and extra code for each **project**.
+ A project is a set of one or more _platforms_ (see above) that are built
+ together. Hafnium comes with the [`reference`](../project/reference) project
+ for running it on some common emulators and development boards. To port
+ Hafnium to a new board, you can create a new project under this directory
+ with the platform or platforms you want to add, without affecting the core
+ Hafnium code.
+* [`src`](../src): Source code for Hafnium itself in C and assembly, and
+ [unit tests](Testing.md) in C++.
+ * [`arch`](../src/arch): Implementation of architecture-dependent modules.
+* [`test`](../test): [Integration tests](Testing.md)
+ * [`arch`](../test/arch): Tests for components of Hafnium that need to be
+ run on a real architecture.
+ * [`hftest`](../test/hftest): A simple test framework that supports
+ running tests standalone on bare metal, in VMs under Hafnium, or as
+ user-space binaries under Linux under Hafnium.
+ * [`linux`](../test/linux): Tests which are run in a Linux VM under
+ Hafnium.
+ * [`vmapi`](../test/vmapi): Tests which are run in minimal test VMs under
+ Hafnium.
+ * [`arch`](../test/vmapi/arch): Tests which are rely on specific
+ architectural details such as the GIC version.
+ * [`primary_only`](../test/vmapi/primary_only): Tests which run only a
+ single (primary) VM.
+ * [`primary_with_secondaries`](../test/vmapi/primary_with_secondaries):
+ Test which run with a primary VM and one or more secondary VMs to
+ test how they interact.
+* [`third_party`](../third_party): Third party code needed for building
+ Hafnium.
+* [`vmlib`](../vmlib): A small client library for VMs running under Hafnium.
diff --git a/docs/FVP.md b/docs/FVP.md
new file mode 100644
index 0000000..0db0bef
--- /dev/null
+++ b/docs/FVP.md
@@ -0,0 +1,40 @@
+# Running Hafnium under Arm FVP
+
+Arm offers a series of emulators known as Fixed Virtual Platforms (FVPs), which
+simulate various processors. They are generally more accurate to the hardware
+than QEMU, at the cost of being considerably slower. We support running
+[tests](Testing.md) on the FVP as well as QEMU.
+
+## Set up
+
+1. Download the
+ [Armv8-A Base Platform FVP](https://developer.arm.com/products/system-design/fixed-virtual-platforms)
+ from Arm.
+1. Unzip it to a directory called `fvp` alongside the root directory of your
+ Hafnium checkout.
+
+## Running tests
+
+To run tests with the FVP instead of QEMU, from the root directory of your
+Hafnium checkout:
+
+```shell
+$ make && kokoro/ubuntu/test.sh --fvp
+```
+
+See the `fvp` function in
+[`hftest.py`](http://cs/hafnium/test/hftest/hftest.py?q=symbol:fvp) for details
+on how this works.
+
+## Other resources
+
+When running tests under the FVP we also use a prebuilt version of TF-A, which
+is checked in under
+[`prebuilts/linux-aarch64/arm-trusted-firmware/`](https://hafnium.googlesource.com/hafnium/prebuilts/+/refs/heads/master/linux-aarch64/arm-trusted-firmware/).
+The
+[README](https://hafnium.googlesource.com/hafnium/prebuilts/+/refs/heads/master/linux-aarch64/arm-trusted-firmware/README.md)
+there has details on how it was built. The source code is available from the
+[Arm Trusted Firmware site](https://developer.trustedfirmware.org/dashboard/view/6/).
+
+Documentation of the FVP (including memory maps) is
+[available from Arm](https://static.docs.arm.com/100966/1101/fast_models_fvp_rg_100966_1101_00_en.pdf).
diff --git a/docs/GettingStarted.md b/docs/GettingStarted.md
new file mode 100644
index 0000000..43ef6c5
--- /dev/null
+++ b/docs/GettingStarted.md
@@ -0,0 +1,95 @@
+# Getting started
+
+[TOC]
+
+## Getting the source code
+
+```shell
+git clone --recurse-submodules https://hafnium.googlesource.com/hafnium && (cd hafnium && f=`git rev-parse --git-dir`/hooks/commit-msg ; curl -Lo $f https://gerrit-review.googlesource.com/tools/hooks/commit-msg ; chmod +x $f)
+```
+
+To upload a commit for review:
+
+```shell
+git push origin HEAD:refs/for/master
+```
+
+Browse source at https://hafnium.googlesource.com/hafnium. Review CLs at
+https://hafnium-review.googlesource.com/.
+
+See details of [how to contribute](../CONTRIBUTING.md).
+
+## Compiling the hypervisor
+
+Install prerequisites:
+
+```shell
+sudo apt install make libssl-dev flex bison
+```
+
+By default, the hypervisor is built with clang for a few target platforms along
+with tests. Each project in the `project` directory specifies a root
+configurations of the build. Adding a project is the preferred way to extend
+support to new platforms. The target project that is built is selected by the
+`PROJECT` make variable, the default project is 'reference'.
+
+```shell
+make PROJECT=<project_name>
+```
+
+The compiled image can be found under `out/<project>`, for example the QEMU
+image is at `out/reference/qemu_aarch64_clang/hafnium.bin`.
+
+## Running on QEMU
+
+You will need at least version 2.9 for QEMU. The following command line can be
+used to run Hafnium on it:
+
+```shell
+qemu-system-aarch64 -M virt,gic_version=3 -cpu cortex-a57 -nographic -machine virtualization=true -kernel out/reference/qemu_aarch64_clang/hafnium.bin
+```
+
+Though it is admittedly not very useful because it doesn't have any virtual
+machines to run. Follow the [Hafnium RAM disk](HafniumRamDisk.md) instructions
+to create an initial RAM disk for Hafnium with Linux as the primary VM.
+
+Next, you need to create a manifest which will describe the VM to Hafnium.
+Follow the [Manifest](Manifest.md) instructions and build a DTBO with:
+```
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ hypervisor {
+ compatible = "hafnium,hafnium";
+ vm1 {
+ debug_name = "Linux VM";
+ kernel_filename = "vmlinuz";
+ ramdisk_filename = "initrd.img";
+ };
+ };
+};
+```
+
+Dump the DTB used by QEMU:
+```shell
+qemu-system-aarch64 -M virt,gic_version=3 -cpu cortex-a57 -nographic -machine virtualization=true -kernel out/reference/qemu_aarch64_clang/hafnium.bin -initrd initrd.img -append "rdinit=/sbin/init" -machine dumpdtb=qemu.dtb
+```
+and follow instructions in [Manifest](Manifest.md) to overlay it with the manifest.
+
+The following command line will run Hafnium, with the RAM disk just created,
+which will then boot into the primary Linux VM:
+
+```shell
+qemu-system-aarch64 -M virt,gic_version=3 -cpu cortex-a57 -nographic -machine virtualization=true -kernel out/reference/qemu_aarch64_clang/hafnium.bin -initrd initrd.img -append "rdinit=/sbin/init" -dtb qemu_with_manifest.dtb
+```
+
+## Running tests
+
+After building, presubmit tests can be run with the following command line:
+
+```shell
+./kokoro/ubuntu/test.sh
+```
+
+Read about [testing](Testing.md) for more details about the tests.
diff --git a/docs/HafniumRamDisk.md b/docs/HafniumRamDisk.md
new file mode 100644
index 0000000..c09568d
--- /dev/null
+++ b/docs/HafniumRamDisk.md
@@ -0,0 +1,20 @@
+# Hafnium RAM disk
+
+Hafnium expects to find the following files in the root directory of its RAM
+disk:
+
+* `vmlinuz` -- the kernel of the primary VM.
+* `initrd.img` -- the initial ramdisk of the primary VM.
+* kernels for the secondary VMs, whose names are described in the manifest.
+
+Follow the [preparing Linux](PreparingLinux.md) instructions to produce
+`vmlinuz` and `initrd.img` for a basic Linux primary VM.
+
+## Create a RAM disk for Hafnium
+
+Assuming that a subdirectory called `initrd` contains the files listed in the
+previous section, we can build `initrd.img` with the following command:
+
+```shell
+cd initrd; find . | cpio -o > ../initrd.img; cd -
+```
diff --git a/docs/HermeticBuild.md b/docs/HermeticBuild.md
new file mode 100644
index 0000000..1d3f66a
--- /dev/null
+++ b/docs/HermeticBuild.md
@@ -0,0 +1,95 @@
+# Hermetic build
+
+Hafnium build is not hermetic as it uses some system tools and libraries, e.g.
+`bison` and `libssl`. To ensure consistency and repeatability, the team
+maintains and periodically publishes a container image as the reference build
+environment. The image is hosted on Google Cloud Platform as
+`eu.gcr.io/hafnium-build/hafnium_ci`.
+
+Building inside a container is always enabled only for Kokoro pre-submit tests
+but can be enabled for local builds too. It is disabled by default as it
+requires the use of Docker which currently supports rootless containers only in
+nightly builds. As rootless container tools mature, Hafnium may change the
+default settings. For now, running the hermetic build locally is intended
+primarily to reproduce issues in pre-submit tests.
+
+[TOC]
+
+## Installing Docker
+
+### Stable
+
+If you don't mind running a Docker daemon with root privileges on your system,
+you can follow the [official guide](https://docs.docker.com/install/) to install
+Docker, or [go/installdocker](https://goto.google.com/installdocker) if you are
+a Googler.
+
+Because the daemon runs as root, files generated by the container are owned by
+root as well. To work around this, the build will automatically derive a local
+container image from the base container, adding user `hafnium` with the same
+UID/GID as the local user.
+
+### Nightly with rootless
+
+The latest nightly version of Docker has support for running containers with
+user namespaces, thus eliminating the need for a daemon with root privileges. It
+can be installed into the local user's `bin` directory with a script:
+
+```shell
+curl -fsSL https://get.docker.com/rootless -o get-docker.sh
+sh get-docker.sh
+```
+
+The script will also walk you through the installation of dependencies, changes
+to system configuration files and environment variable values needed by the
+client to discover the rootless daemon.
+
+## Enabling for local builds
+
+Hermetic builds are controlled by the `HAFNIUM_HERMETIC_BUILD` environment
+variable. Setting it to `true` instructs the build to run commands inside the
+container. Any other value disables the feature.
+
+To always enable hermetic builds, put this line in your `~/.bashrc`:
+
+```shell
+export HAFNIUM_HERMETIC_BUILD=true
+```
+
+When you now run `make`, you should see the following line:
+
+```shell
+$ make
+Running in container: make all
+...
+```
+
+## Running commands inside the container
+
+An arbitrary command can be executed inside the container with
+`build/run_in_container.sh [-i] <command> ...`. This is done automatically
+inside `Makefile` and `kokoro/ubuntu/build.sh` which detect whether they are
+already running inside the container and respawn themselves using
+`run_in_container.sh` if not.
+
+For example, you can spawn a shell with:
+
+```shell
+./build/run_in_container.sh -i bash
+```
+
+## Building container image
+
+The container image is defined in `build/docker/Dockerfile` and can be built
+locally:
+
+```shell
+./build/docker/build.sh
+```
+
+Owners of the `hafnium-build` GCP repository can publish the new image (requires
+[go/cloud-sdk](https://goto.google.com/cloud-sdk) installed and authenticated):
+
+```shell
+./build/docker/publish.sh
+```
diff --git a/docs/Manifest.md b/docs/Manifest.md
new file mode 100644
index 0000000..8552fb1
--- /dev/null
+++ b/docs/Manifest.md
@@ -0,0 +1,98 @@
+# Hafnium Manifest
+
+[TOC]
+
+## Format
+
+The format of the manifest is a simple DeviceTree overlay:
+
+```
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ hypervisor {
+ compatible = "hafnium,hafnium";
+
+ vm1 {
+ debug_name = "name";
+ kernel_filename = "vmlinuz";
+ ramdisk_filename = "initrd.img";
+ };
+
+ vm2 {
+ debug_name = "name";
+ kernel_filename = "filename";
+ vcpu_count = <N>;
+ mem_size = <M>;
+ };
+ ...
+ };
+};
+```
+
+Note: `&{/}` is a syntactic sugar expanded by the DTC compiler. Make sure to
+use the DTC in `prebuilts/` as the version packaged with your OS may not support
+it yet.
+
+## Example
+
+The following manifest defines a primary VM with two secondary VMs. The first
+secondary VM has 1MB of memory, 2 CPUs and kernel image called `kernel0`
+(matches filename in Hafnium's [ramdisk](HafniumRamDisk.md)). The second has 2MB
+of memory, 4 CPUs and, by omitting the `kernel_filename` property, a kernel
+preloaded into memory. The primary VM is given all remaining memory, the same
+number of CPUs as the hardware, a kernel image called `vmlinuz` and a ramdisk
+`initrd.img`. Secondaries cannot have a ramdisk.
+
+```
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ hypervisor {
+ compatible = "hafnium,hafnium";
+
+ vm1 {
+ debug_name = "primary VM";
+ kernel_filename = "vmlinuz";
+ ramdisk_filename = "initrd.img";
+
+ smc_whitelist = <
+ 0x04000000
+ 0x3200ffff
+ >;
+ };
+
+ vm2 {
+ debug_name = "secondary VM 1";
+ kernel_filename = "kernel0";
+ vcpu_count = <2>;
+ mem_size = <0x100000>;
+
+ smc_whitelist_permissive;
+ };
+
+ vm3 {
+ debug_name = "secondary VM 2";
+ vcpu_count = <4>;
+ mem_size = <0x200000>;
+ };
+ };
+};
+```
+
+## Compiling
+
+Hafnium expects the manifest as part of the board FDT, i.e. DeviceTree in binary
+format (DTB).
+
+First, compile the manifest into a DTBO (binary overlay):
+```shell
+prebuilts/linux-x64/dtc/dtc -I dts -O dtb --out-version 17 -o manifest.dtbo <manifest_source_file>
+```
+
+Then overlay it with the DTB of your board:
+```shell
+prebuilts/linux-x64/dtc/fdtoverlay -i <board DTB> -o <output DTB> manifest.dtbo
+```
diff --git a/docs/PreparingLinux.md b/docs/PreparingLinux.md
new file mode 100644
index 0000000..3332096
--- /dev/null
+++ b/docs/PreparingLinux.md
@@ -0,0 +1,88 @@
+# Preparing Linux
+
+To boot Linux, a kernel image (`vmlinuz`) and a suitable initial RAM disk
+(`initrd.img`) need to be created.
+
+[TOC]
+
+## Build the kernel
+
+The Linux kernel for the primary VM can be built using the following
+command-line:
+
+```shell
+git clone https://github.com/torvalds/linux.git
+cd linux
+ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make defconfig
+ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make -j24
+```
+
+The compiled image is stored in `arch/arm64/boot/Image`. This will later be
+copied to the Hafnium RAM disk's root as `vmlinuz`.
+
+## Build the kernel Module
+
+From the Hafnium root directory, the following commands can be used to compile
+the kernel module, replacing `<kernel-path>` with the path to the kernel checked
+out in the previous section:
+
+```shell
+cd hafnium/driver/linux/
+ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- KERNEL_PATH=<kernel-path> make
+```
+
+The compiled module is called `hafnium.ko`, and will later be copied into the
+RAM disk for Linux.
+
+## Build Busybox
+
+To make Linux useful, it needs a shell. These following instructions will
+construct a file system for the Linux RAM disk with the BusyBox shell as the
+init process.
+
+```shell
+git clone git://busybox.net/busybox.git
+cd busybox
+ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make defconfig
+ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make menuconfig
+```
+
+At this point you should ensure that the option `Settings > Build static binary
+(no shared libs)` is selected. Then you can proceed with the following commands:
+
+```shell
+ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make -j24
+ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make install
+cd _install
+mkdir proc
+mkdir sys
+mkdir -p etc/init.d
+cat <<EOF > etc/init.d/rcS
+#!bin/sh
+mount -t proc none /proc
+mount -t sysfs none /sys
+EOF
+chmod u+x etc/init.d/rcS
+grep -v tty ../examples/inittab > ./etc/inittab
+```
+
+## Create a RAM disk for Linux
+
+At this point you can copy into the current directory additional files you may
+want in the RAM disk, for example, the kernel module built in the previous
+section. Assuming the BusyBox root directory is in the same parent directory as
+the Hafnium root directory:
+
+```shell
+cp ../../hafnium/driver/linux/hafnium.ko .
+```
+
+Then run the following commands:
+
+```shell
+find . | cpio -o -H newc | gzip > ../initrd.img
+cd ..
+```
+
+The resulting file is `initrd.img`. It should be copied to the Hafnium RAM
+disk's root.
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000..635da70
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,20 @@
+# Documentation index
+
+* [README](../README.md)
+* [Contributing](../CONTRIBUTING.md)
+* [Style guide](StyleGuide.md)
+
+## Building and running Hafnium
+* [Getting started](GettingStarted.md)
+* [Building a RAM disk with VMs for Hafnium to run](HafniumRamDisk.md)
+* [Manifest format](Manifest.md)
+* [Building a Linux image for Hafnium](PreparingLinux.md)
+* [Building Hafnium hermetically with Docker](HermeticBuild.md)
+* [Running Hafnium under Arm FVP](FVP.md)
+
+## Design details
+* [Architecture](Architecture.md)
+* [Code structure](CodeStructure.md)
+* [Test infrastructure](Testing.md)
+* [The interface Hafnium provides to VMs](VmInterface.md)
+* [Scheduler VM expectations](SchedulerExpectations.md)
diff --git a/docs/SchedulerExpectations.md b/docs/SchedulerExpectations.md
new file mode 100644
index 0000000..7dfe193
--- /dev/null
+++ b/docs/SchedulerExpectations.md
@@ -0,0 +1,107 @@
+# Scheduler VM expectations
+
+Hafnium requires there to be a special 'primary' or 'scheduler' VM which is
+responsible for scheduling the other VMs. There are some particular expectations
+on this VM that are required for the rest of the system to function normally.
+
+[TOC]
+
+## Scheduling
+
+The scheduler VM is responsible for scheduling the vCPUs of all the other VMs.
+It should request information about the VMs in the system using the
+`SPCI_PARTITION_INFO_GET` function, and then schedule their vCPUs as it wishes.
+The recommended way of doing this is to create a kernel thread for each vCPU,
+which will repeatedly run that vCPU by calling `SPCI_RUN`.
+
+`SPCI_RUN` will return one of several possible functions, which must be handled
+as follows:
+
+### `SPCI_INTERRUPT`
+
+The vCPU has been preempted but still has work to do. If the scheduling quantum
+has not expired, the scheduler MUST call `hf_vcpu_run` on the vCPU to allow it
+to continue.
+
+### `SPCI_YIELD`
+
+The vCPU has voluntarily yielded the CPU. The scheduler SHOULD take a scheduling
+decision to give cycles to those that need them but MUST call `hf_vcpu_run` on
+the vCPU at a later point.
+
+### `SPCI_MSG_WAIT`
+
+The vCPU is blocked waiting for a message. The scheduler MUST take it off the
+run queue and not call `SPCI_RUN` on the vCPU until it has either:
+
+* injected an interrupt
+* sent it a message
+* received `HF_SPCI_RUN_WAKE_UP` for it from another vCPU
+* the timeout provided in `w2` is not `SPCI_SLEEP_INDEFINITE` and the
+ specified duration has expired.
+
+### `SPCI_MSG_SEND`
+
+A message has been sent by the vCPU. If the recipient is the scheduler VM itself
+then it can handle it as it pleases. Otherwise the scheduler MUST run a vCPU
+from the recipient VM and priority SHOULD be given to those vCPUs that are
+waiting for a message. The scheduler should call SPCI_RUN again on the sending
+VM as usual.
+
+### `SPCI_RX_RELEASE`
+
+The vCPU has made the mailbox writable and there are pending waiters. The
+scheduler MUST call `hf_mailbox_waiter_get()` repeatedly and notify all waiters
+by injecting an `HF_MAILBOX_WRITABLE_INTID` interrupt. The scheduler should call
+SPCI_RUN again on the sending VM as usual.
+
+### `HF_SPCI_RUN_WAIT_FOR_INTERRUPT`
+
+_This is a Hafnium-specific function not part of the SPCI standard._
+
+The vCPU is blocked waiting for an interrupt. The scheduler MUST take it off the
+run queue and not call `SPCI_RUN` on the vCPU until it has either:
+
+* injected an interrupt
+* received `HF_SPCI_RUN_WAKE_UP` for it from another vCPU
+* the timeout provided in `w2` is not `SPCI_SLEEP_INDEFINITE` and the
+ specified duration has expired.
+
+### `HF_SPCI_RUN_WAKE_UP`
+
+_This is a Hafnium-specific function not part of the SPCI standard._
+
+Hafnium would like `hf_vcpu_run` to be called on another vCPU, specified by
+`hf_vcpu_run_return.wake_up`. The scheduler MUST either wake the vCPU in
+question up if it is blocked, or preempt and re-run it if it is already running
+somewhere. This gives Hafnium a chance to update any CPU state which might have
+changed. The scheduler should call SPCI_RUN again on the sending VM as usual.
+
+### `SPCI_ERROR`
+
+#### `SPCI_ABORTED`
+
+The vCPU has aborted triggering the whole VM to abort. The scheduler MUST treat
+this the same as `HF_SPCI_RUN_WAKE_UP` for all the other vCPUs of the VM. For
+this vCPU the scheduler SHOULD either never call SPCI_RUN on the vCPU again, or
+treat it the same as `HF_SPCI_RUN_WAIT_FOR_INTERRUPT`.
+
+#### Any other error code
+
+This should not happen if the scheduler VM has called `SPCI_RUN` correctly, but
+in case there is some other error it should be logged. The scheduler SHOULD
+either try again or suspend the vCPU indefinitely.
+
+## Interrupt handling
+
+The scheduler VM is responsible for handling all hardware interrupts. Many of
+these will be intended for the scheduler VM itself and it can handle them as
+usual. However, it must also:
+
+* Enable, handle and ignore interrupts for the non-secure hypervisor physical
+ timer (PPI 10, IRQ 26).
+* Forward interrupts intended for secondary VMs to an appropriate vCPU of the
+ VM by calling `hf_interrupt_inject` and then running the vCPU as usual with
+ `SPCI_RUN`. (If the vCPU is already running at the time that
+ `hf_interrupt_inject` is called then it must be preempted and run again so
+ that Hafnium can inject the interrupt.)
diff --git a/docs/StyleGuide.md b/docs/StyleGuide.md
new file mode 100644
index 0000000..3cdd290
--- /dev/null
+++ b/docs/StyleGuide.md
@@ -0,0 +1,83 @@
+# Style guide
+
+Hafnium's coding style has been based on the
+[Linux style](https://www.kernel.org/doc/html/v4.17/process/coding-style.html)
+with explicit modifications:
+
+* Always use braces for conditionals and loops. (No SSL `goto fail;`, thanks.)
+
+Following this, we generally fall back to the subset of the
+[Google C++ style guide](https://google.github.io/styleguide/cppguide.html) that
+is applicable to C.
+
+We try to automate this where possible with clang-format and clang-tidy but that
+doesn't capture everything we'd like today. Where the style enforced by this
+tooling conflicts with what is in this document we accept what the tooling
+requires, and try to improve it if possible.
+
+[TOC]
+
+## Clarifications
+
+* Yes, it does mean all variables are declared, C90-style, at the top of
+ scope, even those loop induction variables.
+* Linux encourages no braces around single-statement branches. We follow
+ Google and require braces around all scope blocks.
+
+## Naming symbols
+
+* Arch-specific functions should start with `arch_`.
+* Platform-specific functions should start with `plat_`.
+* Non-static functions should generally start with the name of the file they
+ are declared in (after the `arch_` or `plat_` prefix if appropriate), though
+ there are quite a few exceptions to this rule.
+* Prefer `x_count` over `num_x`.
+
+## Prose
+
+These rules apply to comments and other natural language text.
+
+* Capitalize acronyms.
+ * CPU, vCPU, VM, EL2, SPCI, QEMU
+* Spell out Hafnium in full, not Hf.
+* Use single spaces.
+* Sentences end with full stops.
+* If the comment fits on one line use `/* */`, otherwise space it out:
+
+ ```
+ /*
+ * Informative long comment
+ * with extra information.
+ */
+ ```
+
+* Doc-ish comments start with `/**`.
+
+ * Use for:
+ * Function definitions (not declarations)
+ * Struct declarations
+ * Enum values
+ * Do not use for:
+ * Macros
+ * Definitions of globals
+
+* References to code symbols use backticks, e.g. `` `my_symbol` ``.
+
+## Coding practices
+
+* Function macros should be functions instead, that way you get types.
+* Lock ordering is described at the top of *api.c*.
+* Use opaque types to avoid implicit casts when it will help avoid mistakes.
+ e.g. *addr.h*
+* Avoid inline casting. C doesn't give much protection so be formal about the
+ transformations. e.g. *addr.h*
+* If a function acquires a resource, there must be a single exit path to free
+ the resource. Tracking down multiple exit points is hard and requires
+ duplicated code which is harder. This may require splitting functions into
+ subfunctions. Early exit is okay if there aren't any clean up tasks.
+* Don't use function pointers. It makes analysis hard and is often a target of
+ attacks.
+* Be liberal with CHECK. Use it to assert pre-/post- conditions.
+* No self-modifying code.
+* Build targets should include all the direct dependencies for their sources,
+ where possible, rather than relying on transitive dependencies.
diff --git a/docs/Testing.md b/docs/Testing.md
new file mode 100644
index 0000000..d79aafc
--- /dev/null
+++ b/docs/Testing.md
@@ -0,0 +1,82 @@
+# Testing
+
+[TOC]
+
+## Overview
+
+Hafnium has 4 main kinds of tests:
+
+1. Host tests
+ * Unit tests of core functionality, e.g. page table manipulation.
+ * Source in `src/*_test.cc`.
+ * Using the [Google Test](https://github.com/google/googletest) framework,
+ built against 'fake' architecture (`src/arch/fake`).
+1. Arch tests
+ * Architecture-specific unit tests, e.g. MMU setup.
+ * Source under `test/arch`.
+ * Using our own _hftest_ framework, with `standalone_main.c`.
+ * Build own hypervisor image, run in EL2.
+1. VM API tests
+ * Exercise hypervisor API from both primary and secondary VMs.
+ * Source under `test/vmapi`.
+ * Tests are run from the primary VM under a normal build of the Hafnium
+ hypervisor, possibly communicating with a helper service in one or more
+ secondary VMs.
+ * Using our own _hftest_ framework, with `standalone_main.c` for the
+ primary VM and `hftest_service.c` for secondary VMs.
+ * Build own primary and secondary VMs, run in EL1 under actual Hafnium
+ image.
+1. Linux tests
+ * Exercise the Hafnium Linux kernel module.
+ * Source under `test/linux`.
+ * Tests are run from userspace (PID 1) under Linux in the primary VM under
+ Hafnium, possibly with other secondary VMs.
+ * Using our own _hftest_ framework, with `linux_main.c`.
+
+Host tests run directly on the host machine where they are built, whereas the
+other 3 types can run under an emulator such as QEMU, or on real hardware.
+
+## Presubmit
+
+Presubmit builds everything, runs all tests and checks the source for formatting
+and lint errors. This can be run locally with:
+
+```shell
+./kokoro/ubuntu/build.sh
+```
+
+Or to just run the tests after having built everything manually run:
+
+```shell
+./kokoro/ubuntu/test.sh
+```
+
+## QEMU tests
+
+These tests boot Hafnium on QEMU and the VMs make calls to Hafnium to test its
+behaviour. They can also be run on the Arm [FVP](FVP.md) and in some cases on
+real hardware.
+
+### hftest
+
+Having a framework for tests makes them easier to read and write. _hftest_ is a
+framework to meet the needs of VM based tests for Hafnium. It consists of:
+
+* assertions
+* test declarations
+* base VM image
+* driver script
+
+Assertions should be familiar from other testing libraries. They make use of
+C11's `_Generic` expressions for type genericity.
+
+Test declarations name the test and the suite that the test is part of.
+Declarations are converted into descriptors stored in the `.hftest` section of
+the VM image which allows the image to inspect the structure of the tests it
+contains. The linker sorts the descriptors by their symbol name which is how
+descriptors from the same suite are grouped together for easier parsing.
+
+The base VM image offers a command line interface, via the bootargs, to query
+the tests in the image and to run specific tests. The driver script uses this
+interface to execute tests, each with a fresh QEMU boot to give a fresh
+environment.
diff --git a/docs/VmInterface.md b/docs/VmInterface.md
new file mode 100644
index 0000000..47de251
--- /dev/null
+++ b/docs/VmInterface.md
@@ -0,0 +1,177 @@
+# VM interface
+
+This page provides an overview of the interface Hafnium provides to VMs. Hafnium
+makes a distinction between the 'primary VM', which controls scheduling and has
+more direct access to some hardware, and 'secondary VMs' which exist mostly to
+provide services to the primary VM, and have a more paravirtualised interface.
+The intention is that the primary VM can run a mostly unmodified operating
+system (such as Linux) with the addition of a Hafnium driver which
+[fulfils certain expectations](SchedulerExpectations.md), while secondary VMs
+will run more specialised trusted OSes or bare-metal code which is designed with
+Hafnium in mind.
+
+The interface documented here is what is planned for the first release of
+Hafnium, not necessarily what is currently implemented.
+
+[TOC]
+
+## CPU scheduling
+
+The primary VM will have one vCPU for each physical CPU, and control the
+scheduling.
+
+Secondary VMs will have a configurable number of vCPUs, scheduled on arbitrary
+physical CPUs at the whims of the primary VM scheduler.
+
+All VMs will start with a single active vCPU. Subsequent vCPUs can be started
+through PSCI.
+
+## PSCI
+
+The primary VM will be able to control the physical CPUs through the following
+PSCI 1.1 calls, which will be forwarded to the underlying implementation in EL3:
+
+* PSCI_VERSION
+* PSCI_FEATURES
+* PSCI_SYSTEM_OFF
+* PSCI_SYSTEM_RESET
+* PSCI_AFFINITY_INFO
+* PSCI_CPU_SUSPEND
+* PSCI_CPU_OFF
+* PSCI_CPU_ON
+
+All other PSCI calls are unsupported.
+
+Secondary VMs will be able to control their vCPUs through the following PSCI 1.1
+calls, which will be implemented by Hafnium:
+
+* PSCI_VERSION
+* PSCI_FEATURES
+* PSCI_AFFINITY_INFO
+* PSCI_CPU_SUSPEND
+* PSCI_CPU_OFF
+* PSCI_CPU_ON
+
+All other PSCI calls are unsupported.
+
+## Hardware timers
+
+The primary VM will have access to both the physical and virtual EL1 timers
+through the usual control registers (`CNT[PV]_TVAL_EL0` and `CNT[PV]_CTL_EL0`).
+
+Secondary VMs will have access to the virtual timer only, which will be emulated
+with help from the kernel driver in the primary VM.
+
+## Interrupts
+
+The primary VM will have direct access to control the physical GIC, and receive
+all interrupts (other than anything already trapped by TrustZone). It will be
+responsible for forwarding any necessary interrupts to secondary VMs. The
+Interrupt Translation Service (ITS) will be disabled by Hafnium so that it
+cannot be used to circumvent access controls.
+
+Secondary VMs will have access to a simple paravirtualized interrupt controller
+through two hypercalls: one to enable or disable a given virtual interrupt ID,
+and one to get and acknowledge the next pending interrupt. There is no concept
+of interrupt priorities or a distinction between edge and level triggered
+interrupts. Secondary VMs may also inject interrupts into their own vCPUs.
+
+## Performance counters
+
+VMs will be blocked from accessing performance counter registers (for the
+performance monitor extensions described in chapter D5 of the Armv8-A reference
+manual) in production, to prevent them from being used as a side channel to leak
+data between VMs.
+
+Hafnium may allow VMs to use them in debug builds.
+
+## Debug registers
+
+VMs will be blocked from accessing debug registers in production builds, to
+prevent them from being used to circumvent access controls.
+
+Hafnium may allow VMs to use these registers in debug builds.
+
+## RAS Extension registers
+
+Secondary VMs will be blocked from using registers associated with the RAS
+Extension.
+
+## Asynchronous message passing
+
+VMs will be able to send messages of up to 4 KiB to each other asynchronously,
+with no queueing, as specified by SPCI.
+
+## Memory
+
+VMs will statically be given access to mutually-exclusive regions of the
+physical address space at boot. This includes MMIO space for controlling
+devices, plus a fixed amount of RAM for secondaries, and all remaining address
+space to the primary. Note that this means that only one VM can control any
+given page of MMIO registers for a device.
+
+VMs may choose to donate or share their memory with other VMs at runtime. Any
+given page may be shared with at most 2 VMs at once (including the original
+owning VM). Memory which has been donated or shared may not be forcefully
+reclaimed, but the VM with which it was shared may choose to return it.
+
+## Cache
+
+VMs will be blocked from using cache maintenance instructions that operate by
+set/way. These operations are difficult to virtualize, and could expose the
+system to side-channel attacks.
+
+## Logging
+
+VMs may send a character to a shared log by means of a hypercall or SMC call.
+These log messages will be buffered per VM to make complete lines, then output
+to a Hafnium-owned UART and saved in a shared ring buffer which may be extracted
+from RAM dumps. VM IDs will be prepended to these logs.
+
+This log API is intended for use in early bringup and low-level debugging. No
+sensitive data should be logged through it. Higher level logs can be sent to the
+primary VM through the asynchronous message passing mechanism described above,
+or through shared memory.
+
+## Configuration
+
+Hafnium will read configuration from a flattened device tree blob (FDT). This
+may either be the same device tree used for the other details of the system or a
+separate minimal one just for Hafnium. This will include at least:
+
+* The available RAM.
+* The number of secondary VMs, how many vCPUs each should have, how much
+ memory to assign to each of them, and where to load their initial images.
+ (Most likely the initial image will be a minimal loader supplied with
+ Hafnium which will validate and load the rest of the image from the primary
+ later on.)
+* Which devices exist on the system, their details (MMIO regions, interrupts
+ and SYSMMU details), and which VM each is assigned to.
+ * A single physical device may be split into multiple logical ‘devices’
+ from Hafnium’s point of view if necessary to have different VMs own
+ different parts of it.
+* A whitelist of which SMC calls each VM is allowed to make.
+
+## Failure handling
+
+If a secondary VM tries to do something it shouldn't, Hafnium will either inject
+a fault or kill it and inform the primary VM. The primary VM may choose to
+restart the system or to continue without the secondary VM.
+
+If the primary VM tries to do something it shouldn't, Hafnium will either inject
+a fault or restart the system.
+
+## TrustZone communication
+
+The primary VM will be able to communicate with a TEE running in TrustZone
+either through SPCI messages or through whitelisted SMC calls, and through
+shared memory.
+
+## Other SMC calls
+
+Other than the PSCI calls described above and those used to communicate with
+Hafnium, all other SMC calls will be blocked by default. Hafnium will allow SMC
+calls to be whitelisted on a per-VM, per-function ID basis, as part of the
+static configuration described above. These whitelisted SMC calls will be
+forwarded to the EL3 handler with the client ID (as described by the SMCCC) set
+to the calling VM's ID.
diff --git a/docs/scheduler.png b/docs/scheduler.png
new file mode 100644
index 0000000..afce871
--- /dev/null
+++ b/docs/scheduler.png
Binary files differ
diff --git a/driver/linux/.gitignore b/driver/linux/.gitignore
new file mode 100644
index 0000000..8fd2089
--- /dev/null
+++ b/driver/linux/.gitignore
@@ -0,0 +1,7 @@
+*.ko
+*.mod.c
+*.o
+.*o.cmd
+.tmp_versions/
+Module.symvers
+modules.order
diff --git a/driver/linux/BUILD.gn b/driver/linux/BUILD.gn
new file mode 100644
index 0000000..6c7d688
--- /dev/null
+++ b/driver/linux/BUILD.gn
@@ -0,0 +1,21 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/linux/linux.gni")
+
+linux_kernel_module("linux") {
+ module_name = "hafnium"
+ kernel_target = "//third_party/linux:linux"
+ kernel_dir = "//third_party/linux"
+}
diff --git a/driver/linux/LICENSE b/driver/linux/LICENSE
new file mode 100644
index 0000000..d159169
--- /dev/null
+++ b/driver/linux/LICENSE
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/driver/linux/Makefile b/driver/linux/Makefile
new file mode 100644
index 0000000..0add537
--- /dev/null
+++ b/driver/linux/Makefile
@@ -0,0 +1,45 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# By default, assume this was checked out as a submodule of the Hafnium repo
+# and that Linux was checked out along side that checkout. These paths can be
+# overridden if that assumption is incorrect.
+HAFNIUM_PATH ?= $(CURDIR)/../..
+
+ifneq ($(KERNELRELEASE),)
+
+obj-m += hafnium.o
+
+hafnium-y += main.o
+hafnium-y += vmlib/aarch64/call.o
+hafnium-y += vmlib/spci.o
+
+ccflags-y = -I$(HAFNIUM_PATH)/inc/vmapi -I$(M)/inc
+
+else
+
+KERNEL_PATH ?= $(HAFNIUM_PATH)/third_party/linux
+ARCH ?= arm64
+CROSS_COMPILE ?= aarch64-linux-gnu-
+CHECKPATCH ?= $(KERNEL_PATH)/scripts/checkpatch.pl -q
+
+all:
+ cp -r $(HAFNIUM_PATH)/vmlib/ $(CURDIR)
+ make -C $(KERNEL_PATH) HAFNIUM_PATH=$(HAFNIUM_PATH) M=$(CURDIR) O=$(O) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
+
+clean:
+ make -C $(KERNEL_PATH) HAFNIUM_PATH=$(HAFNIUM_PATH) M=$(CURDIR) O=$(O) clean
+ rm -rf vmlib
+
+checkpatch:
+ $(CHECKPATCH) -f main.c
+
+endif
diff --git a/inc/uapi/hf/socket.h b/driver/linux/inc/uapi/hf/socket.h
similarity index 100%
rename from inc/uapi/hf/socket.h
rename to driver/linux/inc/uapi/hf/socket.h
diff --git a/main.c b/driver/linux/main.c
similarity index 100%
rename from main.c
rename to driver/linux/main.c
diff --git a/inc/hf/abort.h b/inc/hf/abort.h
new file mode 100644
index 0000000..f5548e0
--- /dev/null
+++ b/inc/hf/abort.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdnoreturn.h>
+
+noreturn void abort(void);
diff --git a/inc/hf/addr.h b/inc/hf/addr.h
new file mode 100644
index 0000000..7845aa9
--- /dev/null
+++ b/inc/hf/addr.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "hf/arch/types.h"
+
+/** An opaque type for a physical address. */
+typedef struct {
+ uintpaddr_t pa;
+} paddr_t;
+
+/** An opaque type for an intermediate physical address. */
+typedef struct {
+ uintpaddr_t ipa;
+} ipaddr_t;
+
+/** An opaque type for a virtual address. */
+typedef struct {
+ uintvaddr_t va;
+} vaddr_t;
+
+/**
+ * Initializes a physical address.
+ */
+static inline paddr_t pa_init(uintpaddr_t p)
+{
+ return (paddr_t){.pa = p};
+}
+
+/**
+ * Extracts the absolute physical address.
+ */
+static inline uintpaddr_t pa_addr(paddr_t pa)
+{
+ return pa.pa;
+}
+
+/**
+ * Advances a physical address.
+ */
+static inline paddr_t pa_add(paddr_t pa, size_t n)
+{
+ return pa_init(pa_addr(pa) + n);
+}
+
+/**
+ * Returns the difference between two physical addresses.
+ */
+static inline size_t pa_difference(paddr_t start, paddr_t end)
+{
+ return pa_addr(end) - pa_addr(start);
+}
+
+/**
+ * Initializes an intermeditate physical address.
+ */
+static inline ipaddr_t ipa_init(uintpaddr_t ipa)
+{
+ return (ipaddr_t){.ipa = ipa};
+}
+
+/**
+ * Extracts the absolute intermediate physical address.
+ */
+static inline uintpaddr_t ipa_addr(ipaddr_t ipa)
+{
+ return ipa.ipa;
+}
+
+/**
+ * Advances an intermediate physical address.
+ */
+static inline ipaddr_t ipa_add(ipaddr_t ipa, size_t n)
+{
+ return ipa_init(ipa_addr(ipa) + n);
+}
+
+/**
+ * Initializes a virtual address.
+ */
+static inline vaddr_t va_init(uintvaddr_t v)
+{
+ return (vaddr_t){.va = v};
+}
+
+/**
+ * Extracts the absolute virtual address.
+ */
+static inline uintvaddr_t va_addr(vaddr_t va)
+{
+ return va.va;
+}
+
+/**
+ * Casts a physical address to a virtual address.
+ */
+static inline vaddr_t va_from_pa(paddr_t pa)
+{
+ return va_init(pa_addr(pa));
+}
+
+/**
+ * Casts a physical address to an intermediate physical address.
+ */
+static inline ipaddr_t ipa_from_pa(paddr_t pa)
+{
+ return ipa_init(pa_addr(pa));
+}
+
+/**
+ * Casts a virtual address to a physical address.
+ */
+static inline paddr_t pa_from_va(vaddr_t va)
+{
+ return pa_init(va_addr(va));
+}
+
+/**
+ * Casts an intermediate physical address to a physical address.
+ */
+static inline paddr_t pa_from_ipa(ipaddr_t ipa)
+{
+ return pa_init(ipa_addr(ipa));
+}
+
+/**
+ * Casts a pointer to a virtual address.
+ */
+static inline vaddr_t va_from_ptr(const void *p)
+{
+ return (vaddr_t){.va = (uintvaddr_t)p};
+}
+
+/**
+ * Casts a virtual address to a pointer. Only use when the virtual address is
+ * mapped for the calling context.
+ * TODO: check the mapping for a range and return a memiter?
+ */
+static inline void *ptr_from_va(vaddr_t va)
+{
+ return (void *)va_addr(va);
+}
diff --git a/inc/hf/api.h b/inc/hf/api.h
new file mode 100644
index 0000000..cfbff86
--- /dev/null
+++ b/inc/hf/api.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/cpu.h"
+#include "hf/mpool.h"
+#include "hf/vm.h"
+
+#include "vmapi/hf/call.h"
+#include "vmapi/hf/spci.h"
+
+void api_init(struct mpool *ppool);
+spci_vm_count_t api_vm_get_count(void);
+spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id,
+ const struct vcpu *current);
+void api_regs_state_saved(struct vcpu *vcpu);
+int64_t api_mailbox_writable_get(const struct vcpu *current);
+int64_t api_mailbox_waiter_get(spci_vm_id_t vm_id, const struct vcpu *current);
+int64_t api_debug_log(char c, struct vcpu *current);
+
+struct vcpu *api_preempt(struct vcpu *current);
+struct vcpu *api_wait_for_interrupt(struct vcpu *current);
+struct vcpu *api_vcpu_off(struct vcpu *current);
+struct vcpu *api_abort(struct vcpu *current);
+struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu);
+
+int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current);
+uint32_t api_interrupt_get(struct vcpu *current);
+int64_t api_interrupt_inject(spci_vm_id_t target_vm_id,
+ spci_vcpu_index_t target_vcpu_idx, uint32_t intid,
+ struct vcpu *current, struct vcpu **next);
+
+struct spci_value api_spci_msg_send(spci_vm_id_t sender_vm_id,
+ spci_vm_id_t receiver_vm_id, uint32_t size,
+ uint32_t attributes, struct vcpu *current,
+ struct vcpu **next);
+struct spci_value api_spci_msg_recv(bool block, struct vcpu *current,
+ struct vcpu **next);
+struct spci_value api_spci_rx_release(struct vcpu *current, struct vcpu **next);
+struct spci_value api_spci_rxtx_map(ipaddr_t send, ipaddr_t recv,
+ uint32_t page_count, struct vcpu *current,
+ struct vcpu **next);
+void api_yield(struct vcpu *current, struct vcpu **next);
+struct spci_value api_spci_version(void);
+struct spci_value api_spci_id_get(const struct vcpu *current);
+struct spci_value api_spci_features(uint32_t function_id);
+struct spci_value api_spci_run(spci_vm_id_t vm_id, spci_vcpu_index_t vcpu_idx,
+ const struct vcpu *current, struct vcpu **next);
diff --git a/inc/hf/arch/cpu.h b/inc/hf/arch/cpu.h
new file mode 100644
index 0000000..a7fe5a8
--- /dev/null
+++ b/inc/hf/arch/cpu.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "hf/arch/types.h"
+
+#include "hf/addr.h"
+#include "hf/vcpu.h"
+
+#include "vmapi/hf/spci.h"
+
+/**
+ * Reset the register values other than the PC and argument which are set with
+ * `arch_regs_set_pc_arg()`.
+ */
+void arch_regs_reset(struct vcpu *vcpu);
+
+/**
+ * Updates the given registers so that when a vCPU runs, it starts off at the
+ * given address (pc) with the given argument.
+ *
+ * This function must only be called on an arch_regs that is known not be in use
+ * by any other physical CPU.
+ */
+void arch_regs_set_pc_arg(struct arch_regs *r, ipaddr_t pc, uintreg_t arg);
+
+/**
+ * Updates the register holding the return value of a function.
+ *
+ * This function must only be called on an arch_regs that is known not be in use
+ * by any other physical CPU.
+ */
+void arch_regs_set_retval(struct arch_regs *r, struct spci_value v);
+
+/**
+ * Initialize and reset CPU-wide register values.
+ */
+void arch_cpu_init(void);
diff --git a/inc/hf/arch/init.h b/inc/hf/arch/init.h
new file mode 100644
index 0000000..0ec80a5
--- /dev/null
+++ b/inc/hf/arch/init.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/**
+ * Performs arch specific boot time initialization.
+ *
+ * It must only be called once, on first boot and must be called as early as
+ * possible.
+ */
+void arch_one_time_init(void);
diff --git a/inc/hf/arch/irq.h b/inc/hf/arch/irq.h
new file mode 100644
index 0000000..c1ee943
--- /dev/null
+++ b/inc/hf/arch/irq.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/**
+ * Disables interrupts.
+ */
+void arch_irq_disable(void);
+
+/**
+ * Enables interrupts.
+ */
+void arch_irq_enable(void);
diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h
new file mode 100644
index 0000000..e8cfa36
--- /dev/null
+++ b/inc/hf/arch/mm.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include "hf/addr.h"
+
+/*
+ * A page table entry (PTE) will take one of the following forms:
+ *
+ * 1. absent : There is no mapping.
+ * 2. invalid block : Represents a block that is not in the address space.
+ * 3. valid block : Represents a block that is in the address space.
+ * 4. table : Represents a reference to a table of PTEs.
+ */
+
+/**
+ * Creates an absent PTE.
+ */
+pte_t arch_mm_absent_pte(uint8_t level);
+
+/**
+ * Creates a table PTE.
+ */
+pte_t arch_mm_table_pte(uint8_t level, paddr_t pa);
+
+/**
+ * Creates a block PTE.
+ */
+pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs);
+
+/**
+ * Checks whether a block is allowed at the given level of the page table.
+ */
+bool arch_mm_is_block_allowed(uint8_t level);
+
+/**
+ * Determines if a PTE is present i.e. it contains information and therefore
+ * needs to exist in the page table. Any non-absent PTE is present.
+ */
+bool arch_mm_pte_is_present(pte_t pte, uint8_t level);
+
+/**
+ * Determines if a PTE is valid i.e. it can affect the address space. Tables and
+ * valid blocks fall into this category. Invalid blocks do not as they hold
+ * information about blocks that are not in the address space.
+ */
+bool arch_mm_pte_is_valid(pte_t pte, uint8_t level);
+
+/**
+ * Determines if a PTE is a block and represents an address range, valid or
+ * invalid.
+ */
+bool arch_mm_pte_is_block(pte_t pte, uint8_t level);
+
+/**
+ * Determines if a PTE represents a reference to a table of PTEs.
+ */
+bool arch_mm_pte_is_table(pte_t pte, uint8_t level);
+
+/**
+ * Clears the bits of an address that are ignored by the page table. In effect,
+ * the address is rounded down to the start of the corresponding PTE range.
+ */
+paddr_t arch_mm_clear_pa(paddr_t pa);
+
+/**
+ * Extracts the start address of the PTE range.
+ */
+paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level);
+
+/**
+ * Extracts the address of the table referenced by the PTE.
+ */
+paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level);
+
+/**
+ * Extracts the attributes of the PTE.
+ */
+uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level);
+
+/**
+ * Merges the attributes of a block into those of its containing table.
+ */
+uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
+ uint64_t block_attrs);
+
+/**
+ * Invalidates the given range of stage-1 TLB.
+ */
+void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end);
+
+/**
+ * Invalidates the given range of stage-2 TLB.
+ */
+void arch_mm_invalidate_stage2_range(ipaddr_t va_begin, ipaddr_t va_end);
+
+/**
+ * Writes back the given range of virtual memory to such a point that all cores
+ * and devices will see the updated values. The corresponding cache lines are
+ * also invalidated.
+ */
+void arch_mm_flush_dcache(void *base, size_t size);
+
+/**
+ * Gets the maximum level allowed in the page table for stage-1.
+ */
+uint8_t arch_mm_stage1_max_level(void);
+
+/**
+ * Gets the maximum level allowed in the page table for stage-2.
+ */
+uint8_t arch_mm_stage2_max_level(void);
+
+/**
+ * Gets the number of concatenated page tables used at the root for stage-1.
+ *
+ * Tables are concatenated at the root to avoid introducing another level in the
+ * page table meaning the table is shallow and wide. Each level is an extra
+ * memory access when walking the table so keeping it shallow reduces the memory
+ * accesses to aid performance.
+ */
+uint8_t arch_mm_stage1_root_table_count(void);
+
+/**
+ * Gets the number of concatenated page tables used at the root for stage-2.
+ */
+uint8_t arch_mm_stage2_root_table_count(void);
+
+/**
+ * Converts the mode into stage-1 attributes for a block PTE.
+ */
+uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode);
+
+/**
+ * Converts the mode into stage-2 attributes for a block PTE.
+ */
+uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode);
+
+/**
+ * Converts the stage-2 block attributes back to the corresponding mode.
+ */
+uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs);
+
+/**
+ * Initializes the arch specific memory management.
+ */
+bool arch_mm_init(paddr_t table);
diff --git a/inc/hf/arch/plat/smc.h b/inc/hf/arch/plat/smc.h
new file mode 100644
index 0000000..29493fc
--- /dev/null
+++ b/inc/hf/arch/plat/smc.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "vmapi/hf/spci.h"
+
+/**
+ * Called after an SMC has been forwarded. `args` contains the arguments passed
+ * to the SMC and `ret` contains the return values that will be set in the vCPU
+ * registers after this call returns.
+ */
+void plat_smc_post_forward(struct spci_value args, struct spci_value *ret);
diff --git a/inc/hf/arch/std.h b/inc/hf/arch/std.h
new file mode 100644
index 0000000..9fd3ec7
--- /dev/null
+++ b/inc/hf/arch/std.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+int memcmp(const void *a, const void *b, size_t n);
+
+int strcmp(const char *a, const char *b);
+
+#define ctz(x) __builtin_ctz(x)
+
+/* Compatibility with old compilers */
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+/**
+ * Check whether the value `v` is aligned to the boundary `a`,
+ * with `a` power of 2.
+ */
+#if __has_builtin(__builtin_is_aligned)
+#define is_aligned(v, a) __builtin_is_aligned((v), (a))
+#else
+#define is_aligned(v, a) (((uintptr_t)(v) & (a - 1)) == 0)
+#endif
+
+/**
+ * Align up the value `v` to the boundary `a`, with `a` power of 2.
+ */
+#if __has_builtin(__builtin_align_up)
+#define align_up(v, a) __builtin_align_up((v), (a))
+#else
+#define align_up(v, a) (((uintptr_t)(v) + (a - 1)) & ~(a - 1))
+#endif
+
+/**
+ * Align down the value `v` to the boundary `a`, with `a` power of 2.
+ */
+#if __has_builtin(__builtin_align_down)
+#define align_down(v, a) __builtin_align_down((v), (a))
+#else
+#define align_down(v, a) ((uintptr_t)(v) & ~(a - 1))
+#endif
+
+#ifndef be16toh
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+
+#define be16toh(v) __builtin_bswap16(v)
+#define be32toh(v) __builtin_bswap32(v)
+#define be64toh(v) __builtin_bswap64(v)
+
+#define htobe16(v) __builtin_bswap16(v)
+#define htobe32(v) __builtin_bswap32(v)
+#define htobe64(v) __builtin_bswap64(v)
+
+#define le16toh(v) (v)
+#define le32toh(v) (v)
+#define le64toh(v) (v)
+
+#define htole16(v) (v)
+#define htole32(v) (v)
+#define htole64(v) (v)
+
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+#define be16toh(v) (v)
+#define be32toh(v) (v)
+#define be64toh(v) (v)
+
+#define htobe16(v) (v)
+#define htobe32(v) (v)
+#define htobe64(v) (v)
+
+#define le16toh(v) __builtin_bswap16(v)
+#define le32toh(v) __builtin_bswap32(v)
+#define le64toh(v) __builtin_bswap64(v)
+
+#define htole16(v) __builtin_bswap16(v)
+#define htole32(v) __builtin_bswap32(v)
+#define htole64(v) __builtin_bswap64(v)
+
+#else
+
+/*
+ * __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ &&
+ * __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
+ */
+
+#error "Unsupported byte order"
+
+#endif
+#endif
diff --git a/inc/hf/arch/timer.h b/inc/hf/arch/timer.h
new file mode 100644
index 0000000..cd2a433
--- /dev/null
+++ b/inc/hf/arch/timer.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "hf/arch/types.h"
+
+/**
+ * Sets the bit to mask virtual timer interrupts.
+ */
+void arch_timer_mask(struct arch_regs *regs);
+
+/**
+ * Checks whether the virtual timer is enabled and its interrupt not masked.
+ */
+bool arch_timer_enabled(struct arch_regs *regs);
+
+/**
+ * Returns the number of nanoseconds remaining on the virtual timer as stored in
+ * the given `arch_regs`, or 0 if it has already expired. This is undefined if
+ * the timer is not enabled.
+ */
+uint64_t arch_timer_remaining_ns(struct arch_regs *regs);
+
+/**
+ * Returns whether the timer is ready to fire: i.e. it is enabled, not masked,
+ * and the condition is met.
+ */
+bool arch_timer_pending(struct arch_regs *regs);
+
+/**
+ * Checks whether the virtual timer is enabled and its interrupt not masked, for
+ * the currently active vCPU.
+ */
+bool arch_timer_enabled_current(void);
+
+/**
+ * Disable the virtual timer for the currently active vCPU.
+ */
+void arch_timer_disable_current(void);
+
+/**
+ * Returns the number of nanoseconds remaining on the virtual timer of the
+ * currently active vCPU, or 0 if it has already expired. This is undefined if
+ * the timer is not enabled.
+ */
+uint64_t arch_timer_remaining_ns_current(void);
diff --git a/inc/hf/arch/vm.h b/inc/hf/arch/vm.h
new file mode 100644
index 0000000..5612388
--- /dev/null
+++ b/inc/hf/arch/vm.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/vm.h"
+
+/**
+ * Set architecture-specific features for the specified VM.
+ */
+void arch_vm_features_set(struct vm *vm);
diff --git a/inc/hf/boot_flow.h b/inc/hf/boot_flow.h
new file mode 100644
index 0000000..39d7060
--- /dev/null
+++ b/inc/hf/boot_flow.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/boot_params.h"
+#include "hf/manifest.h"
+#include "hf/memiter.h"
+#include "hf/mm.h"
+
+bool boot_flow_init(const struct fdt_node *fdt_root, struct manifest *manifest,
+ struct boot_params *boot_params);
+
+bool boot_flow_update(struct mm_stage1_locked stage1_locked,
+ const struct manifest *manifest,
+ struct boot_params_update *p, struct memiter *cpio,
+ struct mpool *ppool);
diff --git a/inc/hf/boot_params.h b/inc/hf/boot_params.h
new file mode 100644
index 0000000..a55ee73
--- /dev/null
+++ b/inc/hf/boot_params.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+
+#include "hf/arch/cpu.h"
+
+#include "hf/fdt.h"
+#include "hf/mm.h"
+#include "hf/mpool.h"
+
+#define MAX_MEM_RANGES 20
+
+struct mem_range {
+ paddr_t begin;
+ paddr_t end;
+};
+
+struct boot_params {
+ cpu_id_t cpu_ids[MAX_CPUS];
+ size_t cpu_count;
+ struct mem_range mem_ranges[MAX_MEM_RANGES];
+ size_t mem_ranges_count;
+ paddr_t initrd_begin;
+ paddr_t initrd_end;
+ uintreg_t kernel_arg;
+};
+
+struct boot_params_update {
+ struct mem_range reserved_ranges[MAX_MEM_RANGES];
+ size_t reserved_ranges_count;
+ paddr_t initrd_begin;
+ paddr_t initrd_end;
+};
diff --git a/inc/hf/check.h b/inc/hf/check.h
new file mode 100644
index 0000000..41e005f
--- /dev/null
+++ b/inc/hf/check.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/panic.h"
+
+/**
+ * Only use to check assumptions which, if false, mean the system is in a bad
+ * state and it is unsafe to continue.
+ *
+ * Do not use if the condition could ever be legitimately false e.g. when
+ * processing external inputs.
+ */
+#define CHECK(x) \
+ do { \
+ if (!(x)) { \
+ panic("assertion failed (%s) at %s:%d", #x, __FILE__, \
+ __LINE__); \
+ } \
+ } while (0)
diff --git a/inc/hf/cpio.h b/inc/hf/cpio.h
new file mode 100644
index 0000000..aebe1d5
--- /dev/null
+++ b/inc/hf/cpio.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+
+#include "hf/memiter.h"
+#include "hf/string.h"
+
+bool cpio_get_file(const struct memiter *cpio, const struct string *name,
+ struct memiter *it);
diff --git a/inc/hf/cpu.h b/inc/hf/cpu.h
new file mode 100644
index 0000000..af7391f
--- /dev/null
+++ b/inc/hf/cpu.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/arch/cpu.h"
+
+/* TODO: Fix alignment such that `cpu` structs are in different cache lines. */
+struct cpu {
+ /** CPU identifier. Doesn't have to be contiguous. */
+ cpu_id_t id;
+
+ /** Pointer to bottom of the stack. */
+ void *stack_bottom;
+
+ /** See api.c for the partial ordering on locks. */
+ struct spinlock lock;
+
+ /** Determines whether the CPU is currently on. */
+ bool is_on;
+};
+
+void cpu_module_init(const cpu_id_t *cpu_ids, size_t count);
+
+size_t cpu_index(struct cpu *c);
+bool cpu_on(struct cpu *c, ipaddr_t entry, uintreg_t arg);
+void cpu_off(struct cpu *c);
+struct cpu *cpu_find(cpu_id_t id);
+uint8_t *cpu_get_buffer(cpu_id_t cpu_id);
+uint32_t cpu_get_buffer_size(cpu_id_t cpu_id);
diff --git a/inc/hf/dlog.h b/inc/hf/dlog.h
new file mode 100644
index 0000000..f8c0080
--- /dev/null
+++ b/inc/hf/dlog.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdarg.h>
+#include <stddef.h>
+
+#include "hf/spci.h"
+
+#define DLOG_BUFFER_SIZE 8192
+
+extern size_t dlog_buffer_offset;
+extern char dlog_buffer[];
+
+#if DEBUG
+void dlog_enable_lock(void);
+void dlog(const char *fmt, ...);
+void vdlog(const char *fmt, va_list args);
+#else
+#define dlog_enable_lock()
+#define dlog(...)
+#define vdlog(fmt, args)
+#endif
+
+void dlog_flush_vm_buffer(spci_vm_id_t id, char buffer[], size_t length);
diff --git a/inc/hf/fdt.h b/inc/hf/fdt.h
new file mode 100644
index 0000000..8f72856
--- /dev/null
+++ b/inc/hf/fdt.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+struct fdt_node {
+ const struct fdt_header *hdr;
+ const char *begin;
+ const char *end;
+ const char *strs;
+};
+
+size_t fdt_header_size(void);
+uint32_t fdt_total_size(struct fdt_header *hdr);
+void fdt_dump(const struct fdt_header *hdr);
+bool fdt_root_node(struct fdt_node *node, const struct fdt_header *hdr);
+bool fdt_find_child(struct fdt_node *node, const char *child);
+bool fdt_first_child(struct fdt_node *node, const char **child_name);
+bool fdt_next_sibling(struct fdt_node *node, const char **sibling_name);
+bool fdt_read_property(const struct fdt_node *node, const char *name,
+ const char **buf, uint32_t *size);
+bool fdt_parse_number(const char *data, uint32_t size, uint64_t *value);
+
+void fdt_add_mem_reservation(struct fdt_header *hdr, uint64_t addr,
+ uint64_t len);
diff --git a/inc/hf/fdt_handler.h b/inc/hf/fdt_handler.h
new file mode 100644
index 0000000..70521b5
--- /dev/null
+++ b/inc/hf/fdt_handler.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/boot_params.h"
+#include "hf/fdt.h"
+#include "hf/mm.h"
+#include "hf/mpool.h"
+
+struct fdt_header *fdt_map(struct mm_stage1_locked stage1_locked,
+ paddr_t fdt_addr, struct fdt_node *n,
+ struct mpool *ppool);
+bool fdt_unmap(struct mm_stage1_locked stage1_locked, struct fdt_header *fdt,
+ struct mpool *ppool);
+bool fdt_find_cpus(const struct fdt_node *root, cpu_id_t *cpu_ids,
+ size_t *cpu_count);
+bool fdt_find_memory_ranges(const struct fdt_node *root, struct boot_params *p);
+bool fdt_find_initrd(const struct fdt_node *root, paddr_t *begin, paddr_t *end);
+
+/** Apply an update to the FDT. */
+bool fdt_patch(struct mm_stage1_locked stage1_locked, paddr_t fdt_addr,
+ struct boot_params_update *p, struct mpool *ppool);
diff --git a/inc/hf/io.h b/inc/hf/io.h
new file mode 100644
index 0000000..f4a80a4
--- /dev/null
+++ b/inc/hf/io.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "hf/arch/barriers.h"
+
+#include "hf/check.h"
+
+/* Opaque types for different sized fields of memory mapped IO. */
+
+typedef struct {
+ volatile uint8_t *ptr;
+} io8_t;
+
+typedef struct {
+ volatile uint16_t *ptr;
+} io16_t;
+
+typedef struct {
+ volatile uint32_t *ptr;
+} io32_t;
+
+typedef struct {
+ volatile uint64_t *ptr;
+} io64_t;
+
+typedef struct {
+ volatile uint8_t *base;
+ size_t count;
+} io8_array_t;
+
+typedef struct {
+ volatile uint16_t *base;
+ size_t count;
+} io16_array_t;
+
+typedef struct {
+ volatile uint32_t *base;
+ size_t count;
+} io32_array_t;
+
+typedef struct {
+ volatile uint64_t *base;
+ size_t count;
+} io64_array_t;
+
+/* Contructors for literals. */
+
+#define IO8_C(addr) ((io8_t){.ptr = (volatile uint8_t *)(addr)})
+#define IO16_C(addr) ((io16_t){.ptr = (volatile uint16_t *)(addr)})
+#define IO32_C(addr) ((io32_t){.ptr = (volatile uint32_t *)(addr)})
+#define IO64_C(addr) ((io64_t){.ptr = (volatile uint64_t *)(addr)})
+
+#define IO8_ARRAY_C(addr, cnt) \
+ ((io8_array_t){.base = (volatile uint8_t *)(addr), .count = (cnt)})
+#define IO16_ARRAY_C(addr, cnt) \
+ ((io16_array_t){.base = (volatile uint16_t *)(addr), .count = (cnt)})
+#define IO32_ARRAY_C(addr, cnt) \
+ ((io32_array_t){.base = (volatile uint32_t *)(addr), .count = (cnt)})
+#define IO64_ARRAY_C(addr, cnt) \
+ ((io64_array_t){.base = (volatile uint64_t *)(addr), .count = (cnt)})
+
+/** Read from memory-mapped IO. */
+
+static inline uint8_t io_read8(io8_t io)
+{
+ return *io.ptr;
+}
+
+static inline uint16_t io_read16(io16_t io)
+{
+ return *io.ptr;
+}
+
+static inline uint32_t io_read32(io32_t io)
+{
+ return *io.ptr;
+}
+
+static inline uint64_t io_read64(io64_t io)
+{
+ return *io.ptr;
+}
+
+static inline uint8_t io_read8_array(io8_array_t io, size_t n)
+{
+ CHECK(n < io.count);
+ return io.base[n];
+}
+
+static inline uint16_t io_read16_array(io16_array_t io, size_t n)
+{
+ CHECK(n < io.count);
+ return io.base[n];
+}
+
+static inline uint32_t io_read32_array(io32_array_t io, size_t n)
+{
+ CHECK(n < io.count);
+ return io.base[n];
+}
+
+static inline uint64_t io_read64_array(io64_array_t io, size_t n)
+{
+ CHECK(n < io.count);
+ return io.base[n];
+}
+
+/**
+ * Read from memory-mapped IO with memory barrier.
+ *
+ * The read is ordered before subsequent memory accesses.
+ */
+
+static inline uint8_t io_read8_mb(io8_t io)
+{
+ uint8_t v = io_read8(io);
+
+ data_sync_barrier();
+ return v;
+}
+
+static inline uint16_t io_read16_mb(io16_t io)
+{
+ uint16_t v = io_read16(io);
+
+ data_sync_barrier();
+ return v;
+}
+
+static inline uint32_t io_read32_mb(io32_t io)
+{
+ uint32_t v = io_read32(io);
+
+ data_sync_barrier();
+ return v;
+}
+
+static inline uint64_t io_read64_mb(io64_t io)
+{
+ uint64_t v = io_read64(io);
+
+ data_sync_barrier();
+ return v;
+}
+
+static inline uint8_t io_read8_array_mb(io8_array_t io, size_t n)
+{
+ uint8_t v = io_read8_array(io, n);
+
+ data_sync_barrier();
+ return v;
+}
+
+static inline uint16_t io_read16_array_mb(io16_array_t io, size_t n)
+{
+ uint16_t v = io_read16_array(io, n);
+
+ data_sync_barrier();
+ return v;
+}
+
+static inline uint32_t io_read32_array_mb(io32_array_t io, size_t n)
+{
+ uint32_t v = io_read32_array(io, n);
+
+ data_sync_barrier();
+ return v;
+}
+
+static inline uint64_t io_read64_array_mb(io64_array_t io, size_t n)
+{
+ uint64_t v = io_read64_array(io, n);
+
+ data_sync_barrier();
+ return v;
+}
+
+/* Write to memory-mapped IO. */
+
+static inline void io_write8(io8_t io, uint8_t v)
+{
+ *io.ptr = v;
+}
+
+static inline void io_write16(io16_t io, uint16_t v)
+{
+ *io.ptr = v;
+}
+
+static inline void io_write32(io32_t io, uint32_t v)
+{
+ *io.ptr = v;
+}
+
+static inline void io_write64(io64_t io, uint64_t v)
+{
+ *io.ptr = v;
+}
+
+static inline void io_write8_array(io8_array_t io, size_t n, uint8_t v)
+{
+ CHECK(n < io.count);
+ io.base[n] = v;
+}
+
+static inline void io_write16_array(io16_array_t io, size_t n, uint16_t v)
+{
+ CHECK(n < io.count);
+ io.base[n] = v;
+}
+
+static inline void io_write32_array(io32_array_t io, size_t n, uint32_t v)
+{
+ CHECK(n < io.count);
+ io.base[n] = v;
+}
+
+static inline void io_write64_array(io64_array_t io, size_t n, uint64_t v)
+{
+ CHECK(n < io.count);
+ io.base[n] = v;
+}
+
+/*
+ * Write to memory-mapped IO with memory barrier.
+ *
+ * The write is ordered after previous memory accesses.
+ */
+
+static inline void io_write8_mb(io8_t io, uint8_t v)
+{
+ data_sync_barrier();
+ io_write8(io, v);
+}
+
+static inline void io_write16_mb(io16_t io, uint16_t v)
+{
+ data_sync_barrier();
+ io_write16(io, v);
+}
+
+static inline void io_write32_mb(io32_t io, uint32_t v)
+{
+ data_sync_barrier();
+ io_write32(io, v);
+}
+
+static inline void io_write64_mb(io64_t io, uint64_t v)
+{
+ data_sync_barrier();
+ io_write64(io, v);
+}
+
+static inline void io_write8_array_mb(io8_array_t io, size_t n, uint8_t v)
+{
+ data_sync_barrier();
+ io_write8_array(io, n, v);
+}
+
+static inline void io_write16_array_mb(io16_array_t io, size_t n, uint16_t v)
+{
+ data_sync_barrier();
+ io_write16_array(io, n, v);
+}
+
+static inline void io_write32_array_mb(io32_array_t io, size_t n, uint32_t v)
+{
+ data_sync_barrier();
+ io_write32_array(io, n, v);
+}
+
+static inline void io_write64_array_mb(io64_array_t io, size_t n, uint64_t v)
+{
+ data_sync_barrier();
+ io_write64_array(io, n, v);
+}
diff --git a/inc/hf/layout.h b/inc/hf/layout.h
new file mode 100644
index 0000000..4193abf
--- /dev/null
+++ b/inc/hf/layout.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/addr.h"
+
+paddr_t layout_text_begin(void);
+paddr_t layout_text_end(void);
+
+paddr_t layout_rodata_begin(void);
+paddr_t layout_rodata_end(void);
+
+paddr_t layout_data_begin(void);
+paddr_t layout_data_end(void);
+
+paddr_t layout_initrd_begin(void);
+paddr_t layout_initrd_end(void);
+
+paddr_t layout_fdt_begin(void);
+paddr_t layout_fdt_end(void);
+
+paddr_t layout_image_end(void);
+
+paddr_t layout_primary_begin(void);
diff --git a/inc/hf/list.h b/inc/hf/list.h
new file mode 100644
index 0000000..30fefa9
--- /dev/null
+++ b/inc/hf/list.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+
+struct list_entry {
+ struct list_entry *next;
+ struct list_entry *prev;
+};
+
+#define LIST_INIT(l) \
+ { \
+ .next = &l, .prev = &l \
+ }
+#define CONTAINER_OF(ptr, type, field) \
+ ((type *)((char *)ptr - offsetof(type, field)))
+
+static inline void list_init(struct list_entry *e)
+{
+ e->next = e;
+ e->prev = e;
+}
+
+static inline void list_append(struct list_entry *l, struct list_entry *e)
+{
+ e->next = l;
+ e->prev = l->prev;
+
+ e->next->prev = e;
+ e->prev->next = e;
+}
+
+static inline void list_prepend(struct list_entry *l, struct list_entry *e)
+{
+ e->next = l->next;
+ e->prev = l;
+
+ e->next->prev = e;
+ e->prev->next = e;
+}
+
+static inline bool list_empty(struct list_entry *l)
+{
+ return l->next == l;
+}
+
+static inline void list_remove(struct list_entry *e)
+{
+ e->prev->next = e->next;
+ e->next->prev = e->prev;
+ list_init(e);
+}
diff --git a/inc/hf/load.h b/inc/hf/load.h
new file mode 100644
index 0000000..b76dc1d
--- /dev/null
+++ b/inc/hf/load.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "hf/boot_params.h"
+#include "hf/cpio.h"
+#include "hf/manifest.h"
+#include "hf/memiter.h"
+#include "hf/mm.h"
+#include "hf/mpool.h"
+
+bool load_vms(struct mm_stage1_locked stage1_locked,
+ const struct manifest *manifest, const struct memiter *cpio,
+ const struct boot_params *params,
+ struct boot_params_update *update, struct mpool *ppool);
diff --git a/inc/hf/manifest.h b/inc/hf/manifest.h
new file mode 100644
index 0000000..0e628e5
--- /dev/null
+++ b/inc/hf/manifest.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/fdt.h"
+#include "hf/memiter.h"
+#include "hf/spci.h"
+#include "hf/string.h"
+#include "hf/vm.h"
+
+/**
+ * Holds information about one of the VMs described in the manifest.
+ */
+struct manifest_vm {
+ /* Properties defined for both primary and secondary VMs. */
+ struct string debug_name;
+ struct string kernel_filename;
+ struct smc_whitelist smc_whitelist;
+
+ union {
+ /* Properties specific to the primary VM. */
+ struct {
+ struct string ramdisk_filename;
+ } primary;
+ /* Properties specific to secondary VMs. */
+ struct {
+ uint64_t mem_size;
+ spci_vcpu_count_t vcpu_count;
+ } secondary;
+ };
+};
+
+/**
+ * Hafnium manifest parsed from FDT.
+ */
+struct manifest {
+ spci_vm_count_t vm_count;
+ struct manifest_vm vm[MAX_VMS];
+};
+
+enum manifest_return_code {
+ MANIFEST_SUCCESS = 0,
+ MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE,
+ MANIFEST_ERROR_NOT_COMPATIBLE,
+ MANIFEST_ERROR_RESERVED_VM_ID,
+ MANIFEST_ERROR_NO_PRIMARY_VM,
+ MANIFEST_ERROR_TOO_MANY_VMS,
+ MANIFEST_ERROR_PROPERTY_NOT_FOUND,
+ MANIFEST_ERROR_MALFORMED_STRING,
+ MANIFEST_ERROR_STRING_TOO_LONG,
+ MANIFEST_ERROR_MALFORMED_STRING_LIST,
+ MANIFEST_ERROR_MALFORMED_INTEGER,
+ MANIFEST_ERROR_INTEGER_OVERFLOW,
+ MANIFEST_ERROR_MALFORMED_INTEGER_LIST,
+ MANIFEST_ERROR_MALFORMED_BOOLEAN,
+};
+
+enum manifest_return_code manifest_init(struct manifest *manifest,
+ const struct fdt_node *fdt_root);
+
+const char *manifest_strerror(enum manifest_return_code ret_code);
diff --git a/inc/hf/memiter.h b/inc/hf/memiter.h
new file mode 100644
index 0000000..e3f0db8
--- /dev/null
+++ b/inc/hf/memiter.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+struct memiter {
+ const char *next;
+ const char *limit;
+};
+
+void memiter_init(struct memiter *it, const void *data, size_t size);
+bool memiter_parse_uint(struct memiter *it, uint64_t *value);
+bool memiter_parse_str(struct memiter *it, struct memiter *str);
+bool memiter_iseq(const struct memiter *it, const char *str);
+bool memiter_advance(struct memiter *it, size_t v);
+
+const void *memiter_base(const struct memiter *it);
+size_t memiter_size(const struct memiter *it);
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
new file mode 100644
index 0000000..43d4c79
--- /dev/null
+++ b/inc/hf/mm.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdalign.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "hf/arch/mm.h"
+
+#include "hf/addr.h"
+#include "hf/mpool.h"
+#include "hf/static_assert.h"
+
+/* Keep macro alignment */
+/* clang-format off */
+
+#define PAGE_SIZE (1 << PAGE_BITS)
+#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
+
+/* The following are arch-independent page mapping modes. */
+#define MM_MODE_R UINT32_C(0x0001) /* read */
+#define MM_MODE_W UINT32_C(0x0002) /* write */
+#define MM_MODE_X UINT32_C(0x0004) /* execute */
+#define MM_MODE_D UINT32_C(0x0008) /* device */
+
+/*
+ * Memory in stage-1 is either valid (present) or invalid (absent).
+ *
+ * Memory in stage-2 has more states to track sharing, borrowing and giving of
+ * memory. The states are made up of three parts:
+ *
+ * 1. V = valid/invalid : Whether the memory is part of the VM's address
+ * space. A fault will be generated if accessed when
+ * invalid.
+ * 2. O = owned/unowned : Whether the memory is owned by the VM.
+ * 3. X = exclusive/shared : Whether access is exclusive to the VM or shared
+ * with at most one other.
+ *
+ * These parts compose to form the following state:
+ *
+ * - V O X : Owner of memory with exclusive access.
+ * - V O !X : Owner of memory with access shared with at most one other VM.
+ * - V !O X : Borrower of memory with exclusive access.
+ * - V !O !X : Borrower of memory where access is shared with the owner.
+ * - !V O X : Owner of memory lent to a VM that has exclusive access.
+ *
+ * - !V O !X : Unused. Owner of shared memory always has access.
+ * - !V !O X : Unused. Next entry is used for invalid memory.
+ *
+ * - !V !O !X : Invalid memory. Memory is unrelated to the VM.
+ *
+ * Modes are selected so that owner of exclusive memory is the default.
+ */
+#define MM_MODE_INVALID UINT32_C(0x0010)
+#define MM_MODE_UNOWNED UINT32_C(0x0020)
+#define MM_MODE_SHARED UINT32_C(0x0040)
+
+/* The mask for a mode that is considered unmapped. */
+#define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED)
+
+#define MM_FLAG_COMMIT 0x01
+#define MM_FLAG_UNMAP 0x02
+#define MM_FLAG_STAGE1 0x04
+
+/* clang-format on */
+
+struct mm_page_table {
+ alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE];
+};
+static_assert(sizeof(struct mm_page_table) == PAGE_SIZE,
+ "A page table must take exactly one page.");
+static_assert(alignof(struct mm_page_table) == PAGE_SIZE,
+ "A page table must be page aligned.");
+
+struct mm_ptable {
+ /** Address of the root of the page table. */
+ paddr_t root;
+};
+
+/** The type of addresses stored in the page table. */
+typedef uintvaddr_t ptable_addr_t;
+
+/** Represents the currently locked stage-1 page table of the hypervisor. */
+struct mm_stage1_locked {
+ struct mm_ptable *ptable;
+};
+
+void mm_vm_enable_invalidation(void);
+
+bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool);
+ptable_addr_t mm_ptable_addr_space_end(int flags);
+
+bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
+void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
+bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool);
+void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
+ struct mpool *ppool);
+void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool);
+void mm_vm_dump(struct mm_ptable *t);
+bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
+ uint32_t *mode);
+
+struct mm_stage1_locked mm_lock_stage1(void);
+void mm_unlock_stage1(struct mm_stage1_locked *lock);
+void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
+ paddr_t end, uint32_t mode, struct mpool *ppool);
+bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
+ struct mpool *ppool);
+void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
+
+bool mm_init(struct mpool *ppool);
diff --git a/inc/hf/mpool.h b/inc/hf/mpool.h
new file mode 100644
index 0000000..c6d6150
--- /dev/null
+++ b/inc/hf/mpool.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include "hf/spinlock.h"
+
+struct mpool {
+ struct spinlock lock;
+ size_t entry_size;
+ struct mpool_chunk *chunk_list;
+ struct mpool_entry *entry_list;
+ struct mpool *fallback;
+};
+
+void mpool_enable_locks(void);
+void mpool_init(struct mpool *p, size_t entry_size);
+void mpool_init_from(struct mpool *p, struct mpool *from);
+void mpool_init_with_fallback(struct mpool *p, struct mpool *fallback);
+void mpool_fini(struct mpool *p);
+bool mpool_add_chunk(struct mpool *p, void *begin, size_t size);
+void *mpool_alloc(struct mpool *p);
+void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align);
+void mpool_free(struct mpool *p, void *ptr);
diff --git a/inc/hf/offset_size_header.h b/inc/hf/offset_size_header.h
new file mode 100644
index 0000000..7d96950
--- /dev/null
+++ b/inc/hf/offset_size_header.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This header file is intended for use by files compiled with the
+ * 'offset_size_header' build rule. See overview in 'offset_size_header.gni'.
+ */
+
+#pragma once
+
+#if (defined GENERATE_BINARY) && (defined VERIFY_HEADER)
+#error Only one action can be specified
+
+#elif defined GENERATE_BINARY
+
+/**
+ * File being compiled to generate binaries with definitions of constants.
+ */
+
+/**
+ * Emit a function with an embedded string in the format:
+ * <HAFNIUM_DEFINE name #value />
+ * These will be recognized by a script that generates the header file.
+ */
+#define DEFINE(sym, val) \
+ void gen_header__##sym(void) \
+ { \
+ __asm__ volatile( \
+ "\n" \
+ ".ascii \"\\n<HAFNIUM_DEFINE " #sym \
+ " %0 />\\n\"\n" \
+ ".align 8\n" /* Align epilogue */ \
+ : \
+ : "i"(val)); \
+ }
+
+#define DEFINE_SIZEOF(sym, type) DEFINE(sym, sizeof(type))
+#define DEFINE_OFFSETOF(sym, type, field) DEFINE(sym, offsetof(type, field))
+
+#elif defined VERIFY_HEADER
+
+/**
+ * File being compiled as part of the main build to check the values in
+ * the auto-generated header file (included using a command-line flag).
+ */
+
+#include "hf/static_assert.h"
+
+#define DEFINE_SIZEOF(sym, type) \
+ void gen_header__##sym(void) \
+ { \
+ static_assert(sizeof(type) == sym, \
+ "Generated struct size mismatch"); \
+ }
+
+#define DEFINE_OFFSETOF(sym, type, field) \
+ void gen_header__##sym(void) \
+ { \
+ static_assert(offsetof(type, field) == sym, \
+ "Generated struct offset mismatch"); \
+ }
+
+#else
+#error No action specified
+#endif
diff --git a/inc/hf/panic.h b/inc/hf/panic.h
new file mode 100644
index 0000000..89893bc
--- /dev/null
+++ b/inc/hf/panic.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdnoreturn.h>
+
+noreturn void panic(const char *fmt, ...);
diff --git a/inc/hf/plat/boot_flow.h b/inc/hf/plat/boot_flow.h
new file mode 100644
index 0000000..b5d8145
--- /dev/null
+++ b/inc/hf/plat/boot_flow.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/addr.h"
+#include "hf/boot_params.h"
+#include "hf/fdt.h"
+#include "hf/manifest.h"
+#include "hf/memiter.h"
+#include "hf/mm.h"
+
+paddr_t plat_boot_flow_get_fdt_addr(void);
+uintreg_t plat_boot_flow_get_kernel_arg(void);
+bool plat_boot_flow_get_initrd_range(const struct fdt_node *fdt_root,
+ paddr_t *begin, paddr_t *end);
+bool plat_boot_flow_update(struct mm_stage1_locked stage1_locked,
+ const struct manifest *manifest,
+ struct boot_params_update *p, struct memiter *cpio,
+ struct mpool *ppool);
diff --git a/inc/hf/plat/console.h b/inc/hf/plat/console.h
new file mode 100644
index 0000000..8fb4969
--- /dev/null
+++ b/inc/hf/plat/console.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/mm.h"
+#include "hf/mpool.h"
+
+/** Initialises the console hardware. */
+void plat_console_init(void);
+
+/** Initialises any memory mappings that the console driver needs. */
+void plat_console_mm_init(struct mm_stage1_locked stage1_locked,
+ struct mpool *ppool);
+
+/** Puts a single character on the console. */
+void plat_console_putchar(char c);
diff --git a/inc/hf/plat/iommu.h b/inc/hf/plat/iommu.h
new file mode 100644
index 0000000..9ef791d
--- /dev/null
+++ b/inc/hf/plat/iommu.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/addr.h"
+#include "hf/fdt.h"
+#include "hf/vm.h"
+
+/**
+ * Initializes the platform IOMMU driver. The root node of the FDT is provided
+ * so that the driver can read from it. This can be used to map IOMMU devices
+ * into the hypervisor's address space so they are accessible by the driver.
+ */
+bool plat_iommu_init(const struct fdt_node *fdt_root,
+ struct mm_stage1_locked stage1_locked,
+ struct mpool *ppool);
+
+/**
+ * Maps the address range with the given mode for the given VM in the IOMMU.
+ *
+ * Assumes the identity map cannot fail. This may not always be true and if it
+ * isn't it will require careful thought on how to safely handle error cases
+ * when intermingled with MMU updates but it gives a starting point for drivers
+ * until those problems are understood.
+ *
+ * The modes are the same as the memory management modes but it is only required
+ * that read and write modes are enforced by the IOMMU driver.
+ */
+void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin,
+ paddr_t end, uint32_t mode);
diff --git a/inc/hf/spci_internal.h b/inc/hf/spci_internal.h
new file mode 100644
index 0000000..1fc226f
--- /dev/null
+++ b/inc/hf/spci_internal.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/addr.h"
+#include "hf/vm.h"
+
+#include "vmapi/hf/spci.h"
+
+#define SPCI_VERSION_MAJOR 0x0
+#define SPCI_VERSION_MINOR 0x9
+
+#define SPCI_VERSION_MAJOR_OFFSET 16
+
+struct spci_mem_transitions {
+ uint32_t orig_from_mode;
+ uint32_t orig_to_mode;
+ uint32_t from_mode;
+ uint32_t to_mode;
+};
+
+/* TODO: Add device attributes: GRE, cacheability, shareability. */
+static inline uint32_t spci_memory_attrs_to_mode(uint16_t memory_attributes)
+{
+ uint32_t mode = 0;
+
+ switch (spci_get_memory_access_attr(memory_attributes)) {
+ case SPCI_MEMORY_RO_NX:
+ mode = MM_MODE_R;
+ break;
+ case SPCI_MEMORY_RO_X:
+ mode = MM_MODE_R | MM_MODE_X;
+ break;
+ case SPCI_MEMORY_RW_NX:
+ mode = MM_MODE_R | MM_MODE_W;
+ break;
+ case SPCI_MEMORY_RW_X:
+ mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
+ break;
+ }
+
+ return mode;
+}
+
+static inline struct spci_value spci_error(uint64_t error_code)
+{
+ return (struct spci_value){.func = SPCI_ERROR_32, .arg2 = error_code};
+}
+
+struct spci_value spci_msg_handle_architected_message(
+ struct vm_locked to_locked, struct vm_locked from_locked,
+ struct spci_memory_region *memory_region, uint32_t size,
+ uint32_t attributes, struct mpool *api_page_pool);
diff --git a/inc/hf/spinlock.h b/inc/hf/spinlock.h
new file mode 100644
index 0000000..2689a5c
--- /dev/null
+++ b/inc/hf/spinlock.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/*
+ * Includes the arch-specific definition of 'struct spinlock' and
+ * implementations of:
+ * - SPINLOCK_INIT
+ * - sl_lock()
+ * - sl_unlock()
+ */
+#include "hf/arch/spinlock.h"
+
+static inline void sl_init(struct spinlock *l)
+{
+ *l = SPINLOCK_INIT;
+}
+
+/**
+ * Locks both locks, enforcing the lowest address first ordering for locks of
+ * the same kind.
+ */
+static inline void sl_lock_both(struct spinlock *a, struct spinlock *b)
+{
+ if (a < b) {
+ sl_lock(a);
+ sl_lock(b);
+ } else {
+ sl_lock(b);
+ sl_lock(a);
+ }
+}
diff --git a/inc/hf/static_assert.h b/inc/hf/static_assert.h
new file mode 100644
index 0000000..da71ac6
--- /dev/null
+++ b/inc/hf/static_assert.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#if !defined(__cplusplus)
+
+#define static_assert _Static_assert
+
+#endif
diff --git a/inc/hf/std.h b/inc/hf/std.h
new file mode 100644
index 0000000..9c6b494
--- /dev/null
+++ b/inc/hf/std.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/arch/std.h"
+
+typedef size_t rsize_t;
+
+/**
+ * Restrict the maximum range for range checked functions so as to be more
+ * likely to catch errors. This may need to be relaxed if it proves to be overly
+ * restrictive.
+ */
+#define RSIZE_MAX (128 * 1024 * 1024)
+
+/*
+ * Only the safer versions of these functions are exposed to reduce the chance
+ * of misusing the versions without bounds checking or null pointer checks.
+ *
+ * These functions don't return errno_t as per the specification and implicitly
+ * have a constraint handler that panics.
+ */
+void memset_s(void *dest, rsize_t destsz, int ch, rsize_t count);
+void memcpy_s(void *dest, rsize_t destsz, const void *src, rsize_t count);
+void memmove_s(void *dest, rsize_t destsz, const void *src, rsize_t count);
+
+void *memchr(const void *ptr, int ch, size_t count);
+
+size_t strnlen_s(const char *str, size_t strsz);
diff --git a/inc/hf/string.h b/inc/hf/string.h
new file mode 100644
index 0000000..9c64632
--- /dev/null
+++ b/inc/hf/string.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+
+/**
+ * Maximum length of a string including the NULL terminator.
+ * This is an arbitrary number and can be adjusted to fit use cases.
+ */
+#define STRING_MAX_SIZE 32
+
+enum string_return_code {
+ STRING_SUCCESS,
+ STRING_ERROR_INVALID_INPUT,
+ STRING_ERROR_TOO_LONG,
+};
+
+/**
+ * Statically-allocated string data structure with input validation to ensure
+ * strings are properly NULL-terminated.
+ *
+ * This is intentionally kept as simple as possible and should not be extended
+ * to perform complex string operations without a good use case.
+ */
+struct string {
+ char data[STRING_MAX_SIZE];
+};
+
+/**
+ * Macro to initialize `struct string` from a string constant.
+ * Triggers a compilation error if the string does not fit into the buffer.
+ */
+#define STRING_INIT(str) ((struct string){.data = str})
+
+enum string_return_code string_init(struct string *str, const char *data,
+ size_t size);
+void string_init_empty(struct string *str);
+bool string_is_empty(const struct string *str);
+const char *string_data(const struct string *str);
diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h
new file mode 100644
index 0000000..87d5e8c
--- /dev/null
+++ b/inc/hf/vcpu.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/addr.h"
+#include "hf/spinlock.h"
+
+#include "vmapi/hf/spci.h"
+
+/** The number of bits in each element of the interrupt bitfields. */
+#define INTERRUPT_REGISTER_BITS 32
+
+enum vcpu_state {
+ /** The vCPU is switched off. */
+ VCPU_STATE_OFF,
+
+ /** The vCPU is ready to be run. */
+ VCPU_STATE_READY,
+
+ /** The vCPU is currently running. */
+ VCPU_STATE_RUNNING,
+
+ /** The vCPU is waiting for a message. */
+ VCPU_STATE_BLOCKED_MAILBOX,
+
+ /** The vCPU is waiting for an interrupt. */
+ VCPU_STATE_BLOCKED_INTERRUPT,
+
+ /** The vCPU has aborted. */
+ VCPU_STATE_ABORTED,
+};
+
+struct interrupts {
+ /** Bitfield keeping track of which interrupts are enabled. */
+ uint32_t interrupt_enabled[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
+ /** Bitfield keeping track of which interrupts are pending. */
+ uint32_t interrupt_pending[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
+ /**
+ * The number of interrupts which are currently both enabled and
+ * pending. i.e. the number of bits set in interrupt_enable &
+ * interrupt_pending.
+ */
+ uint32_t enabled_and_pending_count;
+};
+
+struct vcpu_fault_info {
+ ipaddr_t ipaddr;
+ vaddr_t vaddr;
+ vaddr_t pc;
+ uint32_t mode;
+};
+
+struct vcpu {
+ struct spinlock lock;
+
+ /*
+ * The state is only changed in the context of the vCPU being run. This
+ * ensures the scheduler can easily keep track of the vCPU state as
+ * transitions are indicated by the return code from the run call.
+ */
+ enum vcpu_state state;
+
+ struct cpu *cpu;
+ struct vm *vm;
+ struct arch_regs regs;
+ struct interrupts interrupts;
+
+ /*
+ * Determine whether the 'regs' field is available for use. This is set
+ * to false when a vCPU is about to run on a physical CPU, and is set
+ * back to true when it is descheduled.
+ */
+ bool regs_available;
+};
+
+/** Encapsulates a vCPU whose lock is held. */
+struct vcpu_locked {
+ struct vcpu *vcpu;
+};
+
+struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
+void vcpu_unlock(struct vcpu_locked *locked);
+void vcpu_init(struct vcpu *vcpu, struct vm *vm);
+void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
+spci_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
+bool vcpu_is_off(struct vcpu_locked vcpu);
+bool vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry,
+ uintreg_t arg);
+
+bool vcpu_handle_page_fault(const struct vcpu *current,
+ struct vcpu_fault_info *f);
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
new file mode 100644
index 0000000..9fff714
--- /dev/null
+++ b/inc/hf/vm.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdatomic.h>
+
+#include "hf/arch/types.h"
+
+#include "hf/cpu.h"
+#include "hf/list.h"
+#include "hf/mm.h"
+#include "hf/mpool.h"
+
+#include "vmapi/hf/spci.h"
+
+#define MAX_SMCS 32
+#define LOG_BUFFER_SIZE 256
+
+/**
+ * The state of an RX buffer.
+ *
+ * EMPTY is the initial state. The follow state transitions are possible:
+ * * EMPTY → RECEIVED: message sent to the VM.
+ * * RECEIVED → READ: secondary VM returns from SPCI_MSG_WAIT or
+ * SPCI_MSG_POLL, or primary VM returns from SPCI_RUN with an SPCI_MSG_SEND
+ * where the receiver is itself.
+ * * READ → EMPTY: VM called SPCI_RX_RELEASE.
+ */
+enum mailbox_state {
+ /** There is no message in the mailbox. */
+ MAILBOX_STATE_EMPTY,
+
+ /** There is a message in the mailbox that is waiting for a reader. */
+ MAILBOX_STATE_RECEIVED,
+
+ /** There is a message in the mailbox that has been read. */
+ MAILBOX_STATE_READ,
+};
+
+struct wait_entry {
+ /** The VM that is waiting for a mailbox to become writable. */
+ struct vm *waiting_vm;
+
+ /**
+ * Links used to add entry to a VM's waiter_list. This is protected by
+ * the notifying VM's lock.
+ */
+ struct list_entry wait_links;
+
+ /**
+ * Links used to add entry to a VM's ready_list. This is protected by
+ * the waiting VM's lock.
+ */
+ struct list_entry ready_links;
+};
+
+struct mailbox {
+ enum mailbox_state state;
+ void *recv;
+ const void *send;
+
+ /** The ID of the VM which sent the message currently in `recv`. */
+ spci_vm_id_t recv_sender;
+
+ /** The size of the message currently in `recv`. */
+ uint32_t recv_size;
+
+ /** The attributes of the message currently in `recv`. */
+ uint32_t recv_attributes;
+
+ /**
+ * List of wait_entry structs representing VMs that want to be notified
+ * when the mailbox becomes writable. Once the mailbox does become
+ * writable, the entry is removed from this list and added to the
+ * waiting VM's ready_list.
+ */
+ struct list_entry waiter_list;
+
+ /**
+ * List of wait_entry structs representing VMs whose mailboxes became
+ * writable since the owner of the mailbox registers for notification.
+ */
+ struct list_entry ready_list;
+};
+
+struct smc_whitelist {
+ uint32_t smcs[MAX_SMCS];
+ uint16_t smc_count;
+ bool permissive;
+};
+
+struct vm {
+ spci_vm_id_t id;
+ struct smc_whitelist smc_whitelist;
+
+ /** See api.c for the partial ordering on locks. */
+ struct spinlock lock;
+ spci_vcpu_count_t vcpu_count;
+ struct vcpu vcpus[MAX_CPUS];
+ struct mm_ptable ptable;
+ struct mailbox mailbox;
+ char log_buffer[LOG_BUFFER_SIZE];
+ uint16_t log_buffer_length;
+
+ /**
+ * Wait entries to be used when waiting on other VM mailboxes. See
+ * comments on `struct wait_entry` for the lock discipline of these.
+ */
+ struct wait_entry wait_entries[MAX_VMS];
+
+ atomic_bool aborting;
+
+ /** Arch-specific VM information. */
+ struct arch_vm arch;
+};
+
+/** Encapsulates a VM whose lock is held. */
+struct vm_locked {
+ struct vm *vm;
+};
+
+/** Container for two vm_locked structures */
+struct two_vm_locked {
+ struct vm_locked vm1;
+ struct vm_locked vm2;
+};
+
+bool vm_init(spci_vcpu_count_t vcpu_count, struct mpool *ppool,
+ struct vm **new_vm);
+spci_vm_count_t vm_get_count(void);
+struct vm *vm_find(spci_vm_id_t id);
+struct vm_locked vm_lock(struct vm *vm);
+struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2);
+void vm_unlock(struct vm_locked *locked);
+struct vcpu *vm_get_vcpu(struct vm *vm, spci_vcpu_index_t vcpu_index);
+struct wait_entry *vm_get_wait_entry(struct vm *vm, spci_vm_id_t for_vm);
+spci_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry);
+
+bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool);
+void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ struct mpool *ppool);
+bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool);
diff --git a/inc/system/sys/cdefs.h b/inc/system/sys/cdefs.h
new file mode 100644
index 0000000..ef2bc8f
--- /dev/null
+++ b/inc/system/sys/cdefs.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/*
+ * Empty file to make Android Clang stdatomic.h happy. It includes this internal
+ * glibc header which we don't have, but doesn't actually need it.
+ * TODO: Investigate why Android have replaced the upstream Clang version of
+ * stdatomic.h with one that appears to be from FreeBSD, possibly via Bionic, in
+ * their prebuilt version of Clang. If we can just use the upstream Clang we can
+ * probably remove this workaround.
+ */
diff --git a/inc/system/sys/types.h b/inc/system/sys/types.h
new file mode 100644
index 0000000..ef2bc8f
--- /dev/null
+++ b/inc/system/sys/types.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/*
+ * Empty file to make Android Clang stdatomic.h happy. It includes this internal
+ * glibc header which we don't have, but doesn't actually need it.
+ * TODO: Investigate why Android have replaced the upstream Clang version of
+ * stdatomic.h with one that appears to be from FreeBSD, possibly via Bionic, in
+ * their prebuilt version of Clang. If we can just use the upstream Clang we can
+ * probably remove this workaround.
+ */
diff --git a/inc/vmapi/hf/abi.h b/inc/vmapi/hf/abi.h
new file mode 100644
index 0000000..ed004b7
--- /dev/null
+++ b/inc/vmapi/hf/abi.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/spci.h"
+#include "hf/types.h"
+
+/* Keep macro alignment */
+/* clang-format off */
+
+/* TODO: Define constants below according to spec. */
+#define HF_VM_GET_COUNT 0xff01
+#define HF_VCPU_GET_COUNT 0xff02
+#define HF_MAILBOX_WRITABLE_GET 0xff03
+#define HF_MAILBOX_WAITER_GET 0xff04
+#define HF_INTERRUPT_ENABLE 0xff05
+#define HF_INTERRUPT_GET 0xff06
+#define HF_INTERRUPT_INJECT 0xff07
+
+/* Custom SPCI-like calls returned from SPCI_RUN. */
+#define HF_SPCI_RUN_WAIT_FOR_INTERRUPT 0xff09
+#define HF_SPCI_RUN_WAKE_UP 0xff0a
+
+/* This matches what Trusty and its ATF module currently use. */
+#define HF_DEBUG_LOG 0xbd000000
+
+/* clang-format on */
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
new file mode 100644
index 0000000..f2455e2
--- /dev/null
+++ b/inc/vmapi/hf/call.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/abi.h"
+#include "hf/spci.h"
+#include "hf/types.h"
+
+/**
+ * This function must be implemented to trigger the architecture-specific
+ * mechanism to call to the hypervisor.
+ */
+int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3);
+struct spci_value spci_call(struct spci_value args);
+
+/**
+ * Returns the VM's own ID.
+ */
+static inline struct spci_value spci_id_get(void)
+{
+ return spci_call((struct spci_value){.func = SPCI_ID_GET_32});
+}
+
+/**
+ * Returns the VM's own ID.
+ */
+static inline spci_vm_id_t hf_vm_get_id(void)
+{
+ return spci_id_get().arg2;
+}
+
+/**
+ * Returns the number of secondary VMs.
+ */
+static inline spci_vm_count_t hf_vm_get_count(void)
+{
+ return hf_call(HF_VM_GET_COUNT, 0, 0, 0);
+}
+
+/**
+ * Returns the number of vCPUs configured in the given secondary VM.
+ */
+static inline spci_vcpu_count_t hf_vcpu_get_count(spci_vm_id_t vm_id)
+{
+ return hf_call(HF_VCPU_GET_COUNT, vm_id, 0, 0);
+}
+
+/**
+ * Runs the given vCPU of the given VM.
+ */
+static inline struct spci_value spci_run(spci_vm_id_t vm_id,
+ spci_vcpu_index_t vcpu_idx)
+{
+ return spci_call((struct spci_value){.func = SPCI_RUN_32,
+ spci_vm_vcpu(vm_id, vcpu_idx)});
+}
+
+/**
+ * Hints that the vCPU is willing to yield its current use of the physical CPU.
+ * This call always returns SPCI_SUCCESS.
+ */
+static inline struct spci_value spci_yield(void)
+{
+ return spci_call((struct spci_value){.func = SPCI_YIELD_32});
+}
+
+/**
+ * Configures the pages to send/receive data through. The pages must not be
+ * shared.
+ *
+ * Returns:
+ * - SPCI_ERROR SPCI_INVALID_PARAMETERS if the given addresses are not properly
+ * aligned or are the same.
+ * - SPCI_ERROR SPCI_NO_MEMORY if the hypervisor was unable to map the buffers
+ * due to insuffient page table memory.
+ * - SPCI_ERROR SPCI_DENIED if the pages are already mapped or are not owned by
+ * the caller.
+ * - SPCI_SUCCESS on success if no further action is needed.
+ * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
+ * needs to wake up or kick waiters.
+ */
+static inline struct spci_value spci_rxtx_map(hf_ipaddr_t send,
+ hf_ipaddr_t recv)
+{
+ return spci_call(
+ (struct spci_value){.func = SPCI_RXTX_MAP_32,
+ .arg1 = send,
+ .arg2 = recv,
+ .arg3 = HF_MAILBOX_SIZE / SPCI_PAGE_SIZE});
+}
+
+/**
+ * Copies data from the sender's send buffer to the recipient's receive buffer.
+ *
+ * If the recipient's receive buffer is busy, it can optionally register the
+ * caller to be notified when the recipient's receive buffer becomes available.
+ *
+ * Attributes may include:
+ * - SPCI_MSG_SEND_NOTIFY, to notify the caller when it should try again.
+ * - SPCI_MSG_SEND_LEGACY_MEMORY_*, to send a legacy architected memory sharing
+ * message.
+ *
+ * Returns SPCI_SUCCESS if the message is sent, or an error code otherwise:
+ * - INVALID_PARAMETERS: one or more of the parameters do not conform.
+ * - BUSY: the message could not be delivered either because the mailbox
+ * was full or the target VM is not yet set up.
+ */
+static inline struct spci_value spci_msg_send(spci_vm_id_t sender_vm_id,
+ spci_vm_id_t target_vm_id,
+ uint32_t size,
+ uint32_t attributes)
+{
+ return spci_call((struct spci_value){
+ .func = SPCI_MSG_SEND_32,
+ .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
+ .arg3 = size,
+ .arg4 = attributes});
+}
+
+/**
+ * Called by secondary VMs to receive a message. This will block until a message
+ * is received.
+ *
+ * The mailbox must be cleared before a new message can be received.
+ *
+ * If no message is immediately available and there are no enabled and pending
+ * interrupts (irrespective of whether interrupts are enabled globally), then
+ * this will block until a message is available or an enabled interrupt becomes
+ * pending. This matches the behaviour of the WFI instruction on AArch64, except
+ * that a message becoming available is also treated like a wake-up event.
+ *
+ * Returns:
+ * - SPCI_MSG_SEND if a message is successfully received.
+ * - SPCI_ERROR SPCI_NOT_SUPPORTED if called from the primary VM.
+ * - SPCI_ERROR SPCI_INTERRUPTED if an interrupt happened during the call.
+ */
+static inline struct spci_value spci_msg_wait(void)
+{
+ return spci_call((struct spci_value){.func = SPCI_MSG_WAIT_32});
+}
+
+/**
+ * Called by secondary VMs to receive a message. The call will return whether or
+ * not a message is available.
+ *
+ * The mailbox must be cleared before a new message can be received.
+ *
+ * Returns:
+ * - SPCI_MSG_SEND if a message is successfully received.
+ * - SPCI_ERROR SPCI_NOT_SUPPORTED if called from the primary VM.
+ * - SPCI_ERROR SPCI_INTERRUPTED if an interrupt happened during the call.
+ * - SPCI_ERROR SPCI_RETRY if there was no pending message.
+ */
+static inline struct spci_value spci_msg_poll(void)
+{
+ return spci_call((struct spci_value){.func = SPCI_MSG_POLL_32});
+}
+
+/**
+ * Releases the caller's mailbox so that a new message can be received. The
+ * caller must have copied out all data they wish to preserve as new messages
+ * will overwrite the old and will arrive asynchronously.
+ *
+ * Returns:
+ * - SPCI_ERROR SPCI_DENIED on failure, if the mailbox hasn't been read.
+ * - SPCI_SUCCESS on success if no further action is needed.
+ * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
+ * needs to wake up or kick waiters. Waiters should be retrieved by calling
+ * hf_mailbox_waiter_get.
+ */
+static inline struct spci_value spci_rx_release(void)
+{
+ return spci_call((struct spci_value){.func = SPCI_RX_RELEASE_32});
+}
+
+/**
+ * Retrieves the next VM whose mailbox became writable. For a VM to be notified
+ * by this function, the caller must have called api_mailbox_send before with
+ * the notify argument set to true, and this call must have failed because the
+ * mailbox was not available.
+ *
+ * It should be called repeatedly to retrieve a list of VMs.
+ *
+ * Returns -1 if no VM became writable, or the id of the VM whose mailbox
+ * became writable.
+ */
+static inline int64_t hf_mailbox_writable_get(void)
+{
+ return hf_call(HF_MAILBOX_WRITABLE_GET, 0, 0, 0);
+}
+
+/**
+ * Retrieves the next VM waiting to be notified that the mailbox of the
+ * specified VM became writable. Only primary VMs are allowed to call this.
+ *
+ * Returns -1 on failure or if there are no waiters; the VM id of the next
+ * waiter otherwise.
+ */
+static inline int64_t hf_mailbox_waiter_get(spci_vm_id_t vm_id)
+{
+ return hf_call(HF_MAILBOX_WAITER_GET, vm_id, 0, 0);
+}
+
+/**
+ * Enables or disables a given interrupt ID.
+ *
+ * Returns 0 on success, or -1 if the intid is invalid.
+ */
+static inline int64_t hf_interrupt_enable(uint32_t intid, bool enable)
+{
+ return hf_call(HF_INTERRUPT_ENABLE, intid, enable, 0);
+}
+
+/**
+ * Gets the ID of the pending interrupt (if any) and acknowledge it.
+ *
+ * Returns HF_INVALID_INTID if there are no pending interrupts.
+ */
+static inline uint32_t hf_interrupt_get(void)
+{
+ return hf_call(HF_INTERRUPT_GET, 0, 0, 0);
+}
+
+/**
+ * Injects a virtual interrupt of the given ID into the given target vCPU.
+ * This doesn't cause the vCPU to actually be run immediately; it will be taken
+ * when the vCPU is next run, which is up to the scheduler.
+ *
+ * Returns:
+ * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
+ * ID is invalid, or the current VM is not allowed to inject interrupts to
+ * the target VM.
+ * - 0 on success if no further action is needed.
+ * - 1 if it was called by the primary VM and the primary VM now needs to wake
+ * up or kick the target vCPU.
+ */
+static inline int64_t hf_interrupt_inject(spci_vm_id_t target_vm_id,
+ spci_vcpu_index_t target_vcpu_idx,
+ uint32_t intid)
+{
+ return hf_call(HF_INTERRUPT_INJECT, target_vm_id, target_vcpu_idx,
+ intid);
+}
+
+/**
+ * Sends a character to the debug log for the VM.
+ *
+ * Returns 0 on success, or -1 if it failed for some reason.
+ */
+static inline int64_t hf_debug_log(char c)
+{
+ return hf_call(HF_DEBUG_LOG, c, 0, 0);
+}
+
+/** Obtains the Hafnium's version of the implemented SPCI specification. */
+static inline struct spci_value spci_version(void)
+{
+ return spci_call((struct spci_value){.func = SPCI_VERSION_32});
+}
+
+/**
+ * Discovery function returning information about the implementation of optional
+ * SPCI interfaces.
+ *
+ * Returns:
+ * - SPCI_SUCCESS in .func if the optional interface with function_id is
+ * implemented.
+ * - SPCI_ERROR in .func if the optional interface with function_id is not
+ * implemented.
+ */
+static inline struct spci_value spci_features(uint32_t function_id)
+{
+ return spci_call((struct spci_value){.func = SPCI_FEATURES_32,
+ .arg1 = function_id});
+}
diff --git a/inc/vmapi/hf/spci.h b/inc/vmapi/hf/spci.h
new file mode 100644
index 0000000..12600c1
--- /dev/null
+++ b/inc/vmapi/hf/spci.h
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/types.h"
+
+/* clang-format off */
+
+#define SPCI_LOW_32_ID 0x84000060
+#define SPCI_HIGH_32_ID 0x8400007F
+#define SPCI_LOW_64_ID 0xC4000060
+#define SPCI_HIGH_32_ID 0x8400007F
+
+/* SPCI function identifiers. */
+#define SPCI_ERROR_32 0x84000060
+#define SPCI_SUCCESS_32 0x84000061
+#define SPCI_INTERRUPT_32 0x84000062
+#define SPCI_VERSION_32 0x84000063
+#define SPCI_FEATURES_32 0x84000064
+#define SPCI_RX_RELEASE_32 0x84000065
+#define SPCI_RXTX_MAP_32 0x84000066
+#define SPCI_RXTX_UNMAP_32 0x84000067
+#define SPCI_PARTITION_INFO_GET_32 0x84000068
+#define SPCI_ID_GET_32 0x84000069
+#define SPCI_MSG_POLL_32 0x8400006A
+#define SPCI_MSG_WAIT_32 0x8400006B
+#define SPCI_YIELD_32 0x8400006C
+#define SPCI_RUN_32 0x8400006D
+#define SPCI_MSG_SEND_32 0x8400006E
+#define SPCI_MSG_SEND_DIRECT_REQ_32 0x8400006F
+#define SPCI_MSG_SEND_DIRECT_RESP_32 0x84000070
+
+/* SPCI error codes. */
+#define SPCI_NOT_SUPPORTED INT32_C(-1)
+#define SPCI_INVALID_PARAMETERS INT32_C(-2)
+#define SPCI_NO_MEMORY INT32_C(-3)
+#define SPCI_BUSY INT32_C(-4)
+#define SPCI_INTERRUPTED INT32_C(-5)
+#define SPCI_DENIED INT32_C(-6)
+#define SPCI_RETRY INT32_C(-7)
+#define SPCI_ABORTED INT32_C(-8)
+
+/* SPCI function specific constants. */
+#define SPCI_MSG_RECV_BLOCK 0x1
+#define SPCI_MSG_RECV_BLOCK_MASK 0x1
+
+#define SPCI_MSG_SEND_NOTIFY 0x1
+#define SPCI_MSG_SEND_NOTIFY_MASK 0x1
+#define SPCI_MSG_SEND_LEGACY_MEMORY_DONATE 0x10
+#define SPCI_MSG_SEND_LEGACY_MEMORY_LEND 0x20
+#define SPCI_MSG_SEND_LEGACY_MEMORY_SHARE 0x30
+#define SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH 0x40
+#define SPCI_MSG_SEND_LEGACY_MEMORY_MASK 0x70
+
+#define SPCI_SLEEP_INDEFINITE 0
+
+/**
+ * For use where the SPCI specification refers explicitly to '4K pages'. Not to
+ * be confused with PAGE_SIZE, which is the translation granule Hafnium is
+ * configured to use.
+ */
+#define SPCI_PAGE_SIZE 4096
+
+/* The maximum length possible for a single message. */
+#define SPCI_MSG_PAYLOAD_MAX HF_MAILBOX_SIZE
+
+enum spci_memory_access {
+ SPCI_MEMORY_RO_NX,
+ SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_RW_NX,
+ SPCI_MEMORY_RW_X,
+};
+
+enum spci_memory_type {
+ SPCI_MEMORY_DEVICE_MEM,
+ SPCI_MEMORY_NORMAL_MEM,
+};
+
+enum spci_memory_cacheability {
+ SPCI_MEMORY_CACHE_RESERVED = 0x0,
+ SPCI_MEMORY_CACHE_NON_CACHEABLE = 0x1,
+ SPCI_MEMORY_CACHE_WRITE_THROUGH = 0x2,
+ SPCI_MEMORY_CACHE_WRITE_BACK = 0x4,
+ SPCI_MEMORY_DEV_NGNRNE = 0x0,
+ SPCI_MEMORY_DEV_NGNRE = 0x1,
+ SPCI_MEMORY_DEV_NGRE = 0x2,
+ SPCI_MEMORY_DEV_GRE = 0x3,
+};
+
+enum spci_memory_shareability {
+ SPCI_MEMORY_SHARE_NON_SHAREABLE,
+ SPCI_MEMORY_RESERVED,
+ SPCI_MEMORY_OUTER_SHAREABLE,
+ SPCI_MEMORY_INNER_SHAREABLE,
+};
+
+#define SPCI_MEMORY_ACCESS_OFFSET (0x5U)
+#define SPCI_MEMORY_ACCESS_MASK ((0x3U) << SPCI_MEMORY_ACCESS_OFFSET)
+
+#define SPCI_MEMORY_TYPE_OFFSET (0x4U)
+#define SPCI_MEMORY_TYPE_MASK ((0x1U) << SPCI_MEMORY_TYPE_OFFSET)
+
+#define SPCI_MEMORY_CACHEABILITY_OFFSET (0x2U)
+#define SPCI_MEMORY_CACHEABILITY_MASK ((0x3U) <<\
+ SPCI_MEMORY_CACHEABILITY_OFFSET)
+
+#define SPCI_MEMORY_SHAREABILITY_OFFSET (0x0U)
+#define SPCI_MEMORY_SHAREABILITY_MASK ((0x3U) <<\
+ SPCI_MEMORY_SHAREABILITY_OFFSET)
+
+#define LEND_ATTR_FUNCTION_SET(name, offset, mask) \
+static inline void spci_set_memory_##name##_attr(uint16_t *attr,\
+ const enum spci_memory_##name perm)\
+{\
+ *attr = (*attr & ~(mask)) | ((perm << offset) & mask);\
+}
+
+#define LEND_ATTR_FUNCTION_GET(name, offset, mask) \
+static inline enum spci_memory_##name spci_get_memory_##name##_attr(\
+ uint16_t attr)\
+{\
+ return (enum spci_memory_##name)((attr & mask) >> offset);\
+}
+
+LEND_ATTR_FUNCTION_SET(access, SPCI_MEMORY_ACCESS_OFFSET,
+ SPCI_MEMORY_ACCESS_MASK)
+LEND_ATTR_FUNCTION_GET(access, SPCI_MEMORY_ACCESS_OFFSET,
+ SPCI_MEMORY_ACCESS_MASK)
+
+LEND_ATTR_FUNCTION_SET(type, SPCI_MEMORY_TYPE_OFFSET, SPCI_MEMORY_TYPE_MASK)
+LEND_ATTR_FUNCTION_GET(type, SPCI_MEMORY_TYPE_OFFSET, SPCI_MEMORY_TYPE_MASK)
+
+LEND_ATTR_FUNCTION_SET(cacheability, SPCI_MEMORY_CACHEABILITY_OFFSET,
+ SPCI_MEMORY_CACHEABILITY_MASK)
+
+LEND_ATTR_FUNCTION_GET(cacheability, SPCI_MEMORY_CACHEABILITY_OFFSET,
+ SPCI_MEMORY_CACHEABILITY_MASK)
+
+LEND_ATTR_FUNCTION_SET(shareability, SPCI_MEMORY_SHAREABILITY_OFFSET,
+ SPCI_MEMORY_SHAREABILITY_MASK)
+
+LEND_ATTR_FUNCTION_GET(shareability, SPCI_MEMORY_SHAREABILITY_OFFSET,
+ SPCI_MEMORY_SHAREABILITY_MASK)
+
+/* clang-format on */
+
+/** The ID of a VM. These are assigned sequentially starting with an offset. */
+typedef uint16_t spci_vm_id_t;
+typedef uint32_t spci_memory_handle_t;
+
+/**
+ * A count of VMs. This has the same range as the VM IDs but we give it a
+ * different name to make the different semantics clear.
+ */
+typedef spci_vm_id_t spci_vm_count_t;
+
+/** The index of a vCPU within a particular VM. */
+typedef uint16_t spci_vcpu_index_t;
+
+/**
+ * A count of vCPUs. This has the same range as the vCPU indices but we give it
+ * a different name to make the different semantics clear.
+ */
+typedef spci_vcpu_index_t spci_vcpu_count_t;
+
+/** Parameter and return type of SPCI functions. */
+struct spci_value {
+ uint64_t func;
+ uint64_t arg1;
+ uint64_t arg2;
+ uint64_t arg3;
+ uint64_t arg4;
+ uint64_t arg5;
+ uint64_t arg6;
+ uint64_t arg7;
+};
+
+static inline spci_vm_id_t spci_msg_send_sender(struct spci_value args)
+{
+ return (args.arg1 >> 16) & 0xffff;
+}
+
+static inline spci_vm_id_t spci_msg_send_receiver(struct spci_value args)
+{
+ return args.arg1 & 0xffff;
+}
+
+static inline uint32_t spci_msg_send_size(struct spci_value args)
+{
+ return args.arg3;
+}
+
+static inline uint32_t spci_msg_send_attributes(struct spci_value args)
+{
+ return args.arg4;
+}
+
+static inline spci_vm_id_t spci_vm_id(struct spci_value args)
+{
+ return (args.arg1 >> 16) & 0xffff;
+}
+
+static inline spci_vcpu_index_t spci_vcpu_index(struct spci_value args)
+{
+ return args.arg1 & 0xffff;
+}
+
+static inline uint64_t spci_vm_vcpu(spci_vm_id_t vm_id,
+ spci_vcpu_index_t vcpu_index)
+{
+ return ((uint32_t)vm_id << 16) | vcpu_index;
+}
+
+struct spci_memory_region_constituent {
+ /**
+ * The base IPA of the constituent memory region, aligned to 4 kiB page
+ * size granularity.
+ */
+ uint64_t address;
+ /** The number of 4 kiB pages in the constituent memory region. */
+ uint32_t page_count;
+
+ uint32_t reserved;
+};
+
+struct spci_memory_region_attributes {
+ /** The ID of the VM to which the memory is being given or shared. */
+ spci_vm_id_t receiver;
+ /**
+ * The attributes with which the memory region should be mapped in the
+ * receiver's page table.
+ */
+ uint16_t memory_attributes;
+};
+
+/** Flags to control the behaviour of a memory sharing transaction. */
+typedef uint32_t spci_memory_region_flags_t;
+
+/**
+ * Clear memory region contents after unmapping it from the sender and before
+ * mapping it for any receiver.
+ */
+#define SPCI_MEMORY_REGION_FLAG_CLEAR 0x1
+
+struct spci_memory_region {
+ /**
+ * An implementation defined value associated with the receiver and the
+ * memory region.
+ */
+ uint32_t tag;
+ /** Flags to control behaviour of the transaction. */
+ spci_memory_region_flags_t flags;
+ /** Sender VM ID. */
+ spci_vm_id_t sender;
+ /** Reserved field, must be 0. */
+ uint16_t reserved;
+ /**
+ * The total number of 4 kiB pages included in this memory region. This
+ * must be equal to the sum of page counts specified in each
+ * `spci_memory_region_constituent`.
+ */
+ uint32_t page_count;
+ /**
+ * The number of constituents (`spci_memory_region_constituent`)
+ * included in this memory region.
+ */
+ uint32_t constituent_count;
+ /**
+ * The offset in bytes from the base address of this
+ * `spci_memory_region` to the start of the first
+ * `spci_memory_region_constituent`.
+ */
+ uint32_t constituent_offset;
+ /**
+ * The number of `spci_memory_region_attributes` entries included in
+ * this memory region.
+ */
+ uint32_t attribute_count;
+ /**
+ * An array of `attribute_count` memory region attribute descriptors.
+ * Each one specifies an endpoint and the attributes with which this
+ * memory region should be mapped in that endpoint's page table.
+ */
+ struct spci_memory_region_attributes attributes[];
+};
+
+/**
+ * Gets the constituent array for an `spci_memory_region`.
+ */
+static inline struct spci_memory_region_constituent *
+spci_memory_region_get_constituents(struct spci_memory_region *memory_region)
+{
+ return (struct spci_memory_region_constituent
+ *)((uint8_t *)memory_region +
+ memory_region->constituent_offset);
+}
+
+uint32_t spci_memory_region_init(
+ struct spci_memory_region *memory_region, spci_vm_id_t sender,
+ spci_vm_id_t receiver,
+ const struct spci_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t tag,
+ spci_memory_region_flags_t flags, enum spci_memory_access access,
+ enum spci_memory_type type, enum spci_memory_cacheability cacheability,
+ enum spci_memory_shareability shareability);
diff --git a/inc/vmapi/hf/transport.h b/inc/vmapi/hf/transport.h
new file mode 100644
index 0000000..686143c
--- /dev/null
+++ b/inc/vmapi/hf/transport.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/**
+ * Header for Hafnium messages
+ *
+ * NOTE: This is a work in progress. The final form of a Hafnium message header
+ * is likely to change.
+ */
+struct hf_msg_hdr {
+ uint64_t src_port;
+ uint64_t dst_port;
+};
diff --git a/inc/vmapi/hf/types.h b/inc/vmapi/hf/types.h
new file mode 100644
index 0000000..1f0d5d4
--- /dev/null
+++ b/inc/vmapi/hf/types.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/* Define the standard types for the platform. */
+#if defined(__linux__) && defined(__KERNEL__)
+
+#include <linux/types.h>
+
+#define INT32_C(c) c
+
+typedef phys_addr_t hf_ipaddr_t;
+
+#else
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+typedef uintptr_t hf_ipaddr_t;
+
+#endif
+
+/**
+ * An offset to use when assigning VM IDs.
+ * The offset is needed because VM ID 0 is reserved.
+ */
+#define HF_VM_ID_OFFSET 1
+
+/**
+ * The index and ID of the primary VM, which is responsible for scheduling.
+ *
+ * These are not equal because ID 0 is reserved for the hypervisor itself.
+ * Primary VM therefore gets ID 1 and all other VMs come after that.
+ */
+#define HF_PRIMARY_VM_INDEX 0
+#define HF_PRIMARY_VM_ID (HF_VM_ID_OFFSET + HF_PRIMARY_VM_INDEX)
+
+/** Sleep value for an indefinite period of time. */
+#define HF_SLEEP_INDEFINITE 0xffffffffffffffff
+
+/** The amount of data that can be sent to a mailbox. */
+#define HF_MAILBOX_SIZE 4096
+
+/** The number of virtual interrupt IDs which are supported. */
+#define HF_NUM_INTIDS 64
+
+/** Interrupt ID returned when there is no interrupt pending. */
+#define HF_INVALID_INTID 0xffffffff
+
+/** Interrupt ID indicating the mailbox is readable. */
+#define HF_MAILBOX_READABLE_INTID 1
+
+/** Interrupt ID indicating a mailbox is writable. */
+#define HF_MAILBOX_WRITABLE_INTID 2
+
+/** The virtual interrupt ID used for the virtual timer. */
+#define HF_VIRTUAL_TIMER_INTID 3
diff --git a/kokoro/ubuntu/build.sh b/kokoro/ubuntu/build.sh
new file mode 100755
index 0000000..ecedad6
--- /dev/null
+++ b/kokoro/ubuntu/build.sh
@@ -0,0 +1,188 @@
+#!/bin/bash
+#
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+SCRIPT_PATH="${SCRIPT_DIR}/$(basename "${BASH_SOURCE[0]}")"
+ROOT_DIR="$(dirname $(dirname "${SCRIPT_DIR}"))"
+
+REPO="${ROOT_DIR}/prebuilts/generic/repo/repo"
+
+# Fail on any error.
+set -e
+# Fail on any part of a pipeline failing.
+set -o pipefail
+# Treat unset variables as an error.
+set -u
+# Display commands being run.
+set -x
+
+# Returns true if `git status` reports uncommitted changes in the source tree.
+function is_repo_dirty() {
+ local cmd=(git status --porcelain=v1)
+ if [ -d ".repo" ]
+ then
+ # This source tree was checked out using `repo`. Check the
+ # status of all projects.
+ cmd=(${REPO} forall -c "${cmd[@]}")
+ fi
+ ! (u="$(${cmd[@]})" && test -z "$u")
+ return $?
+}
+
+# Assigns value (second arg) of a variable (first arg) if it is not set already.
+function default_value {
+ local var_name=$1
+ local value=$2
+ export ${var_name}=${!var_name:-${value}}
+}
+
+# Assign default values to variables.
+if [ -v KOKORO_JOB_NAME ]
+then
+ # Default config for Kokoro builds.
+ default_value HAFNIUM_HERMETIC_BUILD true
+ default_value HAFNIUM_SKIP_LONG_RUNNING_TESTS false
+ default_value HAFNIUM_RUN_ALL_QEMU_CPUS true
+else
+ # Default config for local builds.
+ default_value HAFNIUM_HERMETIC_BUILD false
+ default_value HAFNIUM_SKIP_LONG_RUNNING_TESTS true
+ default_value HAFNIUM_RUN_ALL_QEMU_CPUS false
+fi
+
+# If HAFNIUM_HERMETIC_BUILD is "true", relaunch this script inside a container.
+# The 'run_in_container.sh' script will set the variable value to 'inside' to
+# avoid recursion.
+if [ "${HAFNIUM_HERMETIC_BUILD}" == "true" ]
+then
+ exec "${ROOT_DIR}/build/run_in_container.sh" ${SCRIPT_PATH} $@
+fi
+
+USE_FVP=false
+
+while test $# -gt 0
+do
+ case "$1" in
+ --fvp)
+ USE_FVP=true
+ ;;
+ --skip-long-running-tests)
+ HAFNIUM_SKIP_LONG_RUNNING_TESTS=true
+ ;;
+ --run-all-qemu-cpus)
+ HAFNIUM_RUN_ALL_QEMU_CPUS=true
+ ;;
+ *)
+ echo "Unexpected argument $1"
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+CLANG=${PWD}/prebuilts/linux-x64/clang/bin/clang
+
+# Kokoro does something weird that makes all files look dirty to git diff-index;
+# this fixes it so that the Linux build doesn't think it has a dirty tree for
+# building the Hafnium kernel module (and so end up with a version magic string
+# that doesn't match the prebuilt kernel).
+(
+ cd third_party/linux &&
+ git status
+)
+
+#
+# Step 1: make sure it builds.
+#
+
+for proj in $(cd project/ && ls)
+do
+ make PROJECT=${proj}
+done
+
+#
+# Step 2: make sure it works.
+#
+
+TEST_ARGS=()
+if [ $USE_FVP == true ]
+then
+ TEST_ARGS+=(--fvp)
+fi
+if [ "${HAFNIUM_SKIP_LONG_RUNNING_TESTS}" == "true" ]
+then
+ TEST_ARGS+=(--skip-long-running-tests)
+fi
+if [ "${HAFNIUM_RUN_ALL_QEMU_CPUS}" == "true" ]
+then
+ TEST_ARGS+=(--run-all-qemu-cpus)
+fi
+./kokoro/ubuntu/test.sh ${TEST_ARGS[@]}
+
+#
+# Step 3: static analysis.
+#
+
+make check
+if is_repo_dirty
+then
+ echo "Run \`make check\' locally to fix this."
+ exit 1
+fi
+
+#
+# Step 4: make sure the code looks good.
+#
+
+make format
+if is_repo_dirty
+then
+ echo "Run \`make format\' locally to fix this."
+ exit 1
+fi
+
+make checkpatch
+
+#
+# Step 5: make sure there's not lint.
+#
+
+make tidy
+if is_repo_dirty
+then
+ echo "Run \`make tidy\' locally to fix this."
+ exit 1
+fi
+
+#
+# Step 6: make sure all the files have a license.
+#
+
+make license
+if is_repo_dirty
+then
+ echo "Run \`make license\' locally to fix this."
+ exit 1
+fi
+
+# Step 7: make sure the Linux driver maintains style. It's already built as part
+# of the tests.
+(
+export ARCH=arm64 &&
+export CROSS_COMPILE=aarch64-linux-gnu- &&
+cd driver/linux &&
+make checkpatch
+)
diff --git a/kokoro/ubuntu/continuous.cfg b/kokoro/ubuntu/continuous.cfg
new file mode 100644
index 0000000..cacd607
--- /dev/null
+++ b/kokoro/ubuntu/continuous.cfg
@@ -0,0 +1,14 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Location of the continuous bash script in Git.
+build_file: "hafnium/kokoro/ubuntu/build.sh"
+
+action {
+ define_artifacts {
+ regex: "git/hafnium/out/**/kokoro_log/**/*sponge_log.log"
+ regex: "git/hafnium/out/**/kokoro_log/**/*sponge_log.xml"
+ strip_prefix: "git/hafnium"
+ }
+}
+
+timeout_mins: 10
diff --git a/kokoro/ubuntu/presubmit.cfg b/kokoro/ubuntu/presubmit.cfg
new file mode 100644
index 0000000..7e56cce
--- /dev/null
+++ b/kokoro/ubuntu/presubmit.cfg
@@ -0,0 +1,14 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Location of the presubmit bash script in Git.
+build_file: "hafnium/kokoro/ubuntu/build.sh"
+
+action {
+ define_artifacts {
+ regex: "git/hafnium/out/**/kokoro_log/**/*sponge_log.log"
+ regex: "git/hafnium/out/**/kokoro_log/**/*sponge_log.xml"
+ strip_prefix: "git/hafnium"
+ }
+}
+
+timeout_mins: 10
diff --git a/kokoro/ubuntu/test.sh b/kokoro/ubuntu/test.sh
new file mode 100755
index 0000000..0dcd187
--- /dev/null
+++ b/kokoro/ubuntu/test.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+#
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Note: this assumes that the images have all been built and the current working
+# directory is the root of the repo.
+
+# Fail on any error.
+set -e
+# Fail on any part of a pipeline failing.
+set -o pipefail
+# Treat unset variables as an error.
+set -u
+# Display commands being run.
+set -x
+
+USE_FVP=false
+SKIP_LONG_RUNNING_TESTS=false
+RUN_ALL_QEMU_CPUS=false
+
+while test $# -gt 0
+do
+ case "$1" in
+ --fvp) USE_FVP=true
+ ;;
+ --skip-long-running-tests) SKIP_LONG_RUNNING_TESTS=true
+ ;;
+ --run-all-qemu-cpus) RUN_ALL_QEMU_CPUS=true
+ ;;
+ *) echo "Unexpected argument $1"
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+TIMEOUT=(timeout --foreground)
+PROJECT="${PROJECT:-reference}"
+OUT="out/${PROJECT}"
+LOG_DIR_BASE="${OUT}/kokoro_log"
+
+# Run the tests with a timeout so they can't loop forever.
+HFTEST=(${TIMEOUT[@]} 300s ./test/hftest/hftest.py)
+if [ $USE_FVP == true ]
+then
+ HFTEST+=(--fvp)
+ HFTEST+=(--out "$OUT/aem_v8a_fvp_clang")
+ HFTEST+=(--out_initrd "$OUT/aem_v8a_fvp_vm_clang")
+else
+ HFTEST+=(--out "$OUT/qemu_aarch64_clang")
+ HFTEST+=(--out_initrd "$OUT/qemu_aarch64_vm_clang")
+fi
+if [ $SKIP_LONG_RUNNING_TESTS == true ]
+then
+ HFTEST+=(--skip-long-running-tests)
+fi
+
+# Add prebuilt libc++ to the path.
+export LD_LIBRARY_PATH="$PWD/prebuilts/linux-x64/clang/lib64"
+
+# Run the host unit tests.
+mkdir -p "${LOG_DIR_BASE}/unit_tests"
+${TIMEOUT[@]} 30s "$OUT/host_fake_clang/unit_tests" \
+ --gtest_output="xml:${LOG_DIR_BASE}/unit_tests/sponge_log.xml" \
+ | tee "${LOG_DIR_BASE}/unit_tests/sponge_log.log"
+
+CPUS=("")
+
+if [ $RUN_ALL_QEMU_CPUS == true ]
+then
+ CPUS=("cortex-a53" "max")
+fi
+
+for CPU in "${CPUS[@]}"
+do
+ HFTEST_CPU=("${HFTEST[@]}")
+ if [ -n "$CPU" ]
+ then
+ # Per-CPU log directory to avoid filename conflicts.
+ HFTEST_CPU+=(--cpu "$CPU" --log "$LOG_DIR_BASE/$CPU")
+ else
+ HFTEST_CPU+=(--log "$LOG_DIR_BASE")
+ fi
+ "${HFTEST_CPU[@]}" arch_test
+ "${HFTEST_CPU[@]}" hafnium --initrd test/vmapi/arch/aarch64/aarch64_test
+ "${HFTEST_CPU[@]}" hafnium --initrd test/vmapi/arch/aarch64/gicv3/gicv3_test
+ "${HFTEST_CPU[@]}" hafnium --initrd test/vmapi/primary_only/primary_only_test
+ "${HFTEST_CPU[@]}" hafnium --initrd test/vmapi/primary_with_secondaries/primary_with_secondaries_test
+ "${HFTEST_CPU[@]}" hafnium --initrd test/linux/linux_test --vm_args "rdinit=/test_binary --"
+done
diff --git a/navbar.md b/navbar.md
new file mode 100644
index 0000000..f0835ab
--- /dev/null
+++ b/navbar.md
@@ -0,0 +1,8 @@
+# Hafnium
+
+* [Getting started](/docs/GettingStarted.md)
+* [Architecture](/docs/Architecture.md)
+* [Coding style](/docs/StyleGuide.md)
+* [Documentation index](/docs/README.md)
+
+[home]: /README.md
diff --git a/prebuilts b/prebuilts
new file mode 160000
index 0000000..6fcd301
--- /dev/null
+++ b/prebuilts
@@ -0,0 +1 @@
+Subproject commit 6fcd30188886b8cdd7cdcd1de8be813914eb9015
diff --git a/project/reference b/project/reference
new file mode 160000
index 0000000..e2a525d
--- /dev/null
+++ b/project/reference
@@ -0,0 +1 @@
+Subproject commit e2a525d24927401f8b2d190f234af6ea24c85664
diff --git a/src/BUILD.gn b/src/BUILD.gn
new file mode 100644
index 0000000..1437908
--- /dev/null
+++ b/src/BUILD.gn
@@ -0,0 +1,175 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+import("//build/toolchain/platform.gni")
+
+# The hypervisor image.
+hypervisor("hafnium") {
+ deps = [
+ ":layout",
+ ":src_not_testable_yet",
+ ]
+}
+
+# Hypervisor specific code that isn't. One day it will be testable and both the
+# src targets will merge!
+source_set("src_not_testable_yet") {
+ sources = [
+ "cpio.c",
+ "init.c",
+ "load.c",
+ "main.c",
+ ]
+ deps = [
+ ":src_testable",
+ "//project/${project}/${plat_name}",
+ plat_boot_flow,
+ plat_console,
+ plat_iommu,
+ ]
+}
+
+# One day, this will contain all the hypervisor's source but only once it can
+# all be built against the fake arch for unit tests. Utilities that are shared
+# e.g. with VM used in the VM tests have their own targets to facilitate
+# sharing.
+source_set("src_testable") {
+ sources = [
+ "abort.c",
+ "api.c",
+ "cpu.c",
+ "manifest.c",
+ "panic.c",
+ "spci_architected_message.c",
+ "string.c",
+ "vcpu.c",
+ "vm.c",
+ ]
+
+ deps = [
+ ":fdt",
+ ":fdt_handler",
+ ":memiter",
+ ":mm",
+ ":std",
+ "//src/arch/${plat_arch}/hypervisor",
+ plat_boot_flow,
+ plat_console,
+ plat_iommu,
+ ]
+
+ if (is_debug) {
+ deps += [ ":dlog" ]
+ }
+}
+
+source_set("layout") {
+ sources = [
+ "layout.c",
+ ]
+}
+
+source_set("mm") {
+ sources = [
+ "mm.c",
+ "mpool.c",
+ ]
+}
+
+# Standard library functions.
+source_set("std") {
+ sources = [
+ "std.c",
+ ]
+
+ deps = [
+ "//src/arch/${plat_arch}:std",
+ ]
+}
+
+# Debug code that is not specific to a certain image so can be shared.
+source_set("dlog") {
+ sources = [
+ "dlog.c",
+ ]
+
+ deps = [
+ ":std",
+ plat_console,
+ ]
+}
+
+source_set("fdt_handler") {
+ sources = [
+ "fdt_handler.c",
+ ]
+ deps = [
+ ":fdt",
+ ]
+
+ if (is_debug) {
+ deps += [ ":dlog" ]
+ }
+}
+
+# Flattened Device Tree (FDT) utilities.
+source_set("fdt") {
+ sources = [
+ "fdt.c",
+ ]
+
+ deps = [
+ ":std",
+ ]
+
+ if (is_debug) {
+ deps += [ ":dlog" ]
+ }
+}
+
+source_set("memiter") {
+ sources = [
+ "memiter.c",
+ ]
+}
+
+source_set("panic") {
+ sources = [
+ "panic.c",
+ ]
+}
+
+executable("unit_tests") {
+ testonly = true
+ sources = [
+ "api_test.cc",
+ "fdt_handler_test.cc",
+ "fdt_test.cc",
+ "manifest_test.cc",
+ "mm_test.cc",
+ "mpool_test.cc",
+ "string_test.cc",
+ "vm_test.cc",
+ ]
+ sources += [ "layout_fake.c" ]
+ cflags_cc = [
+ "-Wno-c99-extensions",
+ "-Wno-nested-anon-types",
+ ]
+ deps = [
+ ":src_testable",
+ "//third_party/googletest:gtest_main",
+ ]
+}
diff --git a/src/abort.c b/src/abort.c
new file mode 100644
index 0000000..8d9d723
--- /dev/null
+++ b/src/abort.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/abort.h"
+
+#include "hf/dlog.h"
+
+/**
+ * Causes execution to halt and prevent progress of the current and less
+ * privileged software components. This should be triggered when a
+ * non-recoverable event is identified which leaves the system in an
+ * inconsistent state.
+ *
+ * TODO: Should this also reset the system?
+ */
+noreturn void abort(void)
+{
+ /* TODO: Block all CPUs. */
+ for (;;) {
+ /* Prevent loop being optimized away. */
+ __asm__ volatile("nop");
+ }
+}
diff --git a/src/api.c b/src/api.c
new file mode 100644
index 0000000..d8ced97
--- /dev/null
+++ b/src/api.c
@@ -0,0 +1,1458 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/api.h"
+
+#include "hf/arch/cpu.h"
+#include "hf/arch/timer.h"
+
+#include "hf/check.h"
+#include "hf/dlog.h"
+#include "hf/mm.h"
+#include "hf/plat/console.h"
+#include "hf/spci_internal.h"
+#include "hf/spinlock.h"
+#include "hf/static_assert.h"
+#include "hf/std.h"
+#include "hf/vm.h"
+
+#include "vmapi/hf/call.h"
+#include "vmapi/hf/spci.h"
+
+/*
+ * To eliminate the risk of deadlocks, we define a partial order for the
+ * acquisition of locks held concurrently by the same physical CPU. Our current
+ * ordering requirements are as follows:
+ *
+ * vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
+ *
+ * Locks of the same kind require the lock of lowest address to be locked first,
+ * see `sl_lock_both()`.
+ */
+
+static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
+ "Currently, a page is mapped for the send and receive buffers so "
+ "the maximum request is the size of a page.");
+
+static struct mpool api_page_pool;
+
+/**
+ * Initialises the API page pool by taking ownership of the contents of the
+ * given page pool.
+ */
+void api_init(struct mpool *ppool)
+{
+ mpool_init_from(&api_page_pool, ppool);
+}
+
+/**
+ * Switches the physical CPU back to the corresponding vCPU of the primary VM.
+ *
+ * This triggers the scheduling logic to run. Run in the context of secondary VM
+ * to cause SPCI_RUN to return and the primary VM to regain control of the CPU.
+ */
+static struct vcpu *api_switch_to_primary(struct vcpu *current,
+ struct spci_value primary_ret,
+ enum vcpu_state secondary_state)
+{
+ struct vm *primary = vm_find(HF_PRIMARY_VM_ID);
+ struct vcpu *next = vm_get_vcpu(primary, cpu_index(current->cpu));
+
+ /*
+ * If the secondary is blocked but has a timer running, sleep until the
+ * timer fires rather than indefinitely.
+ */
+ switch (primary_ret.func) {
+ case HF_SPCI_RUN_WAIT_FOR_INTERRUPT:
+ case SPCI_MSG_WAIT_32: {
+ if (arch_timer_enabled_current()) {
+ uint64_t remaining_ns =
+ arch_timer_remaining_ns_current();
+
+ if (remaining_ns == 0) {
+ /*
+ * Timer is pending, so the current vCPU should
+ * be run again right away.
+ */
+ primary_ret.func = SPCI_INTERRUPT_32;
+ /*
+ * primary_ret.arg1 should already be set to the
+ * current VM ID and vCPU ID.
+ */
+ primary_ret.arg2 = 0;
+ } else {
+ primary_ret.arg2 = remaining_ns;
+ }
+ } else {
+ primary_ret.arg2 = SPCI_SLEEP_INDEFINITE;
+ }
+ break;
+ }
+
+ default:
+ /* Do nothing. */
+ break;
+ }
+
+ /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
+ arch_regs_set_retval(&next->regs, primary_ret);
+
+ /* Mark the current vCPU as waiting. */
+ sl_lock(¤t->lock);
+ current->state = secondary_state;
+ sl_unlock(¤t->lock);
+
+ return next;
+}
+
+/**
+ * Returns to the primary VM and signals that the vCPU still has work to do so.
+ */
+struct vcpu *api_preempt(struct vcpu *current)
+{
+ struct spci_value ret = {
+ .func = SPCI_INTERRUPT_32,
+ .arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
+ };
+
+ return api_switch_to_primary(current, ret, VCPU_STATE_READY);
+}
+
+/**
+ * Puts the current vCPU in wait for interrupt mode, and returns to the primary
+ * VM.
+ */
+struct vcpu *api_wait_for_interrupt(struct vcpu *current)
+{
+ struct spci_value ret = {
+ .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
+ .arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
+ };
+
+ return api_switch_to_primary(current, ret,
+ VCPU_STATE_BLOCKED_INTERRUPT);
+}
+
+/**
+ * Puts the current vCPU in off mode, and returns to the primary VM.
+ */
+struct vcpu *api_vcpu_off(struct vcpu *current)
+{
+ struct spci_value ret = {
+ .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
+ .arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
+ };
+
+ /*
+ * Disable the timer, so the scheduler doesn't get told to call back
+ * based on it.
+ */
+ arch_timer_disable_current();
+
+ return api_switch_to_primary(current, ret, VCPU_STATE_OFF);
+}
+
+/**
+ * Returns to the primary VM to allow this CPU to be used for other tasks as the
+ * vCPU does not have work to do at this moment. The current vCPU is marked as
+ * ready to be scheduled again.
+ */
+void api_yield(struct vcpu *current, struct vcpu **next)
+{
+ struct spci_value primary_ret = {
+ .func = SPCI_YIELD_32,
+ .arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
+ };
+
+ if (current->vm->id == HF_PRIMARY_VM_ID) {
+ /* NOOP on the primary as it makes the scheduling decisions. */
+ return;
+ }
+
+ *next = api_switch_to_primary(current, primary_ret, VCPU_STATE_READY);
+}
+
+/**
+ * Switches to the primary so that it can switch to the target, or kick it if it
+ * is already running on a different physical CPU.
+ */
+struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
+{
+ struct spci_value ret = {
+ .func = HF_SPCI_RUN_WAKE_UP,
+ .arg1 = spci_vm_vcpu(target_vcpu->vm->id,
+ vcpu_index(target_vcpu)),
+ };
+ return api_switch_to_primary(current, ret, VCPU_STATE_READY);
+}
+
+/**
+ * Aborts the vCPU and triggers its VM to abort fully.
+ */
+struct vcpu *api_abort(struct vcpu *current)
+{
+ struct spci_value ret = spci_error(SPCI_ABORTED);
+
+ dlog("Aborting VM %u vCPU %u\n", current->vm->id, vcpu_index(current));
+
+ if (current->vm->id == HF_PRIMARY_VM_ID) {
+ /* TODO: what to do when the primary aborts? */
+ for (;;) {
+ /* Do nothing. */
+ }
+ }
+
+ atomic_store_explicit(¤t->vm->aborting, true,
+ memory_order_relaxed);
+
+ /* TODO: free resources once all vCPUs abort. */
+
+ return api_switch_to_primary(current, ret, VCPU_STATE_ABORTED);
+}
+
+/**
+ * Returns the ID of the VM.
+ */
+struct spci_value api_spci_id_get(const struct vcpu *current)
+{
+ return (struct spci_value){.func = SPCI_SUCCESS_32,
+ .arg2 = current->vm->id};
+}
+
+/**
+ * Returns the number of VMs configured to run.
+ */
+spci_vm_count_t api_vm_get_count(void)
+{
+ return vm_get_count();
+}
+
+/**
+ * Returns the number of vCPUs configured in the given VM, or 0 if there is no
+ * such VM or the caller is not the primary VM.
+ */
+spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id,
+ const struct vcpu *current)
+{
+ struct vm *vm;
+
+ /* Only the primary VM needs to know about vCPUs for scheduling. */
+ if (current->vm->id != HF_PRIMARY_VM_ID) {
+ return 0;
+ }
+
+ vm = vm_find(vm_id);
+ if (vm == NULL) {
+ return 0;
+ }
+
+ return vm->vcpu_count;
+}
+
+/**
+ * This function is called by the architecture-specific context switching
+ * function to indicate that register state for the given vCPU has been saved
+ * and can therefore be used by other pCPUs.
+ */
+void api_regs_state_saved(struct vcpu *vcpu)
+{
+ sl_lock(&vcpu->lock);
+ vcpu->regs_available = true;
+ sl_unlock(&vcpu->lock);
+}
+
+/**
+ * Retrieves the next waiter and removes it from the wait list if the VM's
+ * mailbox is in a writable state.
+ */
+static struct wait_entry *api_fetch_waiter(struct vm_locked locked_vm)
+{
+ struct wait_entry *entry;
+ struct vm *vm = locked_vm.vm;
+
+ if (vm->mailbox.state != MAILBOX_STATE_EMPTY ||
+ vm->mailbox.recv == NULL || list_empty(&vm->mailbox.waiter_list)) {
+ /* The mailbox is not writable or there are no waiters. */
+ return NULL;
+ }
+
+ /* Remove waiter from the wait list. */
+ entry = CONTAINER_OF(vm->mailbox.waiter_list.next, struct wait_entry,
+ wait_links);
+ list_remove(&entry->wait_links);
+ return entry;
+}
+
+/**
+ * Assuming that the arguments have already been checked by the caller, injects
+ * a virtual interrupt of the given ID into the given target vCPU. This doesn't
+ * cause the vCPU to actually be run immediately; it will be taken when the vCPU
+ * is next run, which is up to the scheduler.
+ *
+ * Returns:
+ * - 0 on success if no further action is needed.
+ * - 1 if it was called by the primary VM and the primary VM now needs to wake
+ * up or kick the target vCPU.
+ */
+static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
+ uint32_t intid, struct vcpu *current,
+ struct vcpu **next)
+{
+ uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
+ uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
+ int64_t ret = 0;
+
+ sl_lock(&target_vcpu->lock);
+
+ /*
+ * We only need to change state and (maybe) trigger a virtual IRQ if it
+ * is enabled and was not previously pending. Otherwise we can skip
+ * everything except setting the pending bit.
+ *
+ * If you change this logic make sure to update the need_vm_lock logic
+ * above to match.
+ */
+ if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
+ ~target_vcpu->interrupts.interrupt_pending[intid_index] &
+ intid_mask)) {
+ goto out;
+ }
+
+ /* Increment the count. */
+ target_vcpu->interrupts.enabled_and_pending_count++;
+
+ /*
+ * Only need to update state if there was not already an
+ * interrupt enabled and pending.
+ */
+ if (target_vcpu->interrupts.enabled_and_pending_count != 1) {
+ goto out;
+ }
+
+ if (current->vm->id == HF_PRIMARY_VM_ID) {
+ /*
+ * If the call came from the primary VM, let it know that it
+ * should run or kick the target vCPU.
+ */
+ ret = 1;
+ } else if (current != target_vcpu && next != NULL) {
+ *next = api_wake_up(current, target_vcpu);
+ }
+
+out:
+ /* Either way, make it pending. */
+ target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
+
+ sl_unlock(&target_vcpu->lock);
+
+ return ret;
+}
+
+/**
+ * Constructs an SPCI_MSG_SEND value to return from a successful SPCI_MSG_POLL
+ * or SPCI_MSG_WAIT call.
+ */
+static struct spci_value spci_msg_recv_return(const struct vm *receiver)
+{
+ return (struct spci_value){
+ .func = SPCI_MSG_SEND_32,
+ .arg1 = (receiver->mailbox.recv_sender << 16) | receiver->id,
+ .arg3 = receiver->mailbox.recv_size,
+ .arg4 = receiver->mailbox.recv_attributes};
+}
+
+/**
+ * Prepares the vCPU to run by updating its state and fetching whether a return
+ * value needs to be forced onto the vCPU.
+ */
+static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
+ struct spci_value *run_ret)
+{
+ bool need_vm_lock;
+ bool ret;
+
+ /*
+ * Check that the registers are available so that the vCPU can be run.
+ *
+ * The VM lock is not needed in the common case so it must only be taken
+ * when it is going to be needed. This ensures there are no inter-vCPU
+ * dependencies in the common run case meaning the sensitive context
+ * switch performance is consistent.
+ */
+ sl_lock(&vcpu->lock);
+
+ /* The VM needs to be locked to deliver mailbox messages. */
+ need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
+ if (need_vm_lock) {
+ sl_unlock(&vcpu->lock);
+ sl_lock(&vcpu->vm->lock);
+ sl_lock(&vcpu->lock);
+ }
+
+ /*
+ * If the vCPU is already running somewhere then we can't run it here
+ * simultaneously. While it is actually running then the state should be
+ * `VCPU_STATE_RUNNING` and `regs_available` should be false. Once it
+ * stops running but while Hafnium is in the process of switching back
+ * to the primary there will be a brief period while the state has been
+ * updated but `regs_available` is still false (until
+ * `api_regs_state_saved` is called). We can't start running it again
+ * until this has finished, so count this state as still running for the
+ * purposes of this check.
+ */
+ if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) {
+ /*
+ * vCPU is running on another pCPU.
+ *
+ * It's okay not to return the sleep duration here because the
+ * other physical CPU that is currently running this vCPU will
+ * return the sleep duration if needed.
+ */
+ *run_ret = spci_error(SPCI_BUSY);
+ ret = false;
+ goto out;
+ }
+
+ if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) {
+ if (vcpu->state != VCPU_STATE_ABORTED) {
+ dlog("Aborting VM %u vCPU %u\n", vcpu->vm->id,
+ vcpu_index(vcpu));
+ vcpu->state = VCPU_STATE_ABORTED;
+ }
+ ret = false;
+ goto out;
+ }
+
+ switch (vcpu->state) {
+ case VCPU_STATE_RUNNING:
+ case VCPU_STATE_OFF:
+ case VCPU_STATE_ABORTED:
+ ret = false;
+ goto out;
+
+ case VCPU_STATE_BLOCKED_MAILBOX:
+ /*
+ * A pending message allows the vCPU to run so the message can
+ * be delivered directly.
+ */
+ if (vcpu->vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
+ arch_regs_set_retval(&vcpu->regs,
+ spci_msg_recv_return(vcpu->vm));
+ vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
+ break;
+ }
+ /* Fall through. */
+ case VCPU_STATE_BLOCKED_INTERRUPT:
+ /* Allow virtual interrupts to be delivered. */
+ if (vcpu->interrupts.enabled_and_pending_count > 0) {
+ break;
+ }
+
+ if (arch_timer_enabled(&vcpu->regs)) {
+ uint64_t timer_remaining_ns =
+ arch_timer_remaining_ns(&vcpu->regs);
+
+ /*
+ * The timer expired so allow the interrupt to be
+ * delivered.
+ */
+ if (timer_remaining_ns == 0) {
+ break;
+ }
+
+ /*
+ * The vCPU is not ready to run, return the appropriate
+ * code to the primary which called vcpu_run.
+ */
+ run_ret->func =
+ vcpu->state == VCPU_STATE_BLOCKED_MAILBOX
+ ? SPCI_MSG_WAIT_32
+ : HF_SPCI_RUN_WAIT_FOR_INTERRUPT;
+ run_ret->arg1 =
+ spci_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
+ run_ret->arg2 = timer_remaining_ns;
+ }
+
+ ret = false;
+ goto out;
+
+ case VCPU_STATE_READY:
+ break;
+ }
+
+ /* It has been decided that the vCPU should be run. */
+ vcpu->cpu = current->cpu;
+ vcpu->state = VCPU_STATE_RUNNING;
+
+ /*
+ * Mark the registers as unavailable now that we're about to reflect
+ * them onto the real registers. This will also prevent another physical
+ * CPU from trying to read these registers.
+ */
+ vcpu->regs_available = false;
+
+ ret = true;
+
+out:
+ sl_unlock(&vcpu->lock);
+ if (need_vm_lock) {
+ sl_unlock(&vcpu->vm->lock);
+ }
+
+ return ret;
+}
+
+struct spci_value api_spci_run(spci_vm_id_t vm_id, spci_vcpu_index_t vcpu_idx,
+ const struct vcpu *current, struct vcpu **next)
+{
+ struct vm *vm;
+ struct vcpu *vcpu;
+ struct spci_value ret = spci_error(SPCI_INVALID_PARAMETERS);
+
+ /* Only the primary VM can switch vCPUs. */
+ if (current->vm->id != HF_PRIMARY_VM_ID) {
+ ret.arg2 = SPCI_DENIED;
+ goto out;
+ }
+
+ /* Only secondary VM vCPUs can be run. */
+ if (vm_id == HF_PRIMARY_VM_ID) {
+ goto out;
+ }
+
+ /* The requested VM must exist. */
+ vm = vm_find(vm_id);
+ if (vm == NULL) {
+ goto out;
+ }
+
+ /* The requested vCPU must exist. */
+ if (vcpu_idx >= vm->vcpu_count) {
+ goto out;
+ }
+
+ /* Update state if allowed. */
+ vcpu = vm_get_vcpu(vm, vcpu_idx);
+ if (!api_vcpu_prepare_run(current, vcpu, &ret)) {
+ goto out;
+ }
+
+ /*
+ * Inject timer interrupt if timer has expired. It's safe to access
+ * vcpu->regs here because api_vcpu_prepare_run already made sure that
+ * regs_available was true (and then set it to false) before returning
+ * true.
+ */
+ if (arch_timer_pending(&vcpu->regs)) {
+ /* Make virtual timer interrupt pending. */
+ internal_interrupt_inject(vcpu, HF_VIRTUAL_TIMER_INTID, vcpu,
+ NULL);
+
+ /*
+ * Set the mask bit so the hardware interrupt doesn't fire
+ * again. Ideally we wouldn't do this because it affects what
+ * the secondary vCPU sees, but if we don't then we end up with
+ * a loop of the interrupt firing each time we try to return to
+ * the secondary vCPU.
+ */
+ arch_timer_mask(&vcpu->regs);
+ }
+
+ /* Switch to the vCPU. */
+ *next = vcpu;
+
+ /*
+ * Set a placeholder return code to the scheduler. This will be
+ * overwritten when the switch back to the primary occurs.
+ */
+ ret.func = SPCI_INTERRUPT_32;
+ ret.arg1 = spci_vm_vcpu(vm_id, vcpu_idx);
+ ret.arg2 = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * Check that the mode indicates memory that is valid, owned and exclusive.
+ */
+static bool api_mode_valid_owned_and_exclusive(uint32_t mode)
+{
+ return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED)) == 0;
+}
+
+/**
+ * Determines the value to be returned by api_vm_configure and spci_rx_release
+ * after they've succeeded. If a secondary VM is running and there are waiters,
+ * it also switches back to the primary VM for it to wake waiters up.
+ */
+static struct spci_value api_waiter_result(struct vm_locked locked_vm,
+ struct vcpu *current,
+ struct vcpu **next)
+{
+ struct vm *vm = locked_vm.vm;
+
+ if (list_empty(&vm->mailbox.waiter_list)) {
+ /* No waiters, nothing else to do. */
+ return (struct spci_value){.func = SPCI_SUCCESS_32};
+ }
+
+ if (vm->id == HF_PRIMARY_VM_ID) {
+ /* The caller is the primary VM. Tell it to wake up waiters. */
+ return (struct spci_value){.func = SPCI_RX_RELEASE_32};
+ }
+
+ /*
+ * Switch back to the primary VM, informing it that there are waiters
+ * that need to be notified.
+ */
+ *next = api_switch_to_primary(
+ current, (struct spci_value){.func = SPCI_RX_RELEASE_32},
+ VCPU_STATE_READY);
+
+ return (struct spci_value){.func = SPCI_SUCCESS_32};
+}
+
+/**
+ * Configures the hypervisor's stage-1 view of the send and receive pages. The
+ * stage-1 page tables must be locked so memory cannot be taken by another core
+ * which could result in this transaction being unable to roll back in the case
+ * of an error.
+ */
+static bool api_vm_configure_stage1(struct vm_locked vm_locked,
+ paddr_t pa_send_begin, paddr_t pa_send_end,
+ paddr_t pa_recv_begin, paddr_t pa_recv_end,
+ struct mpool *local_page_pool)
+{
+ bool ret;
+ struct mm_stage1_locked mm_stage1_locked = mm_lock_stage1();
+
+ /* Map the send page as read-only in the hypervisor address space. */
+ vm_locked.vm->mailbox.send =
+ mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
+ MM_MODE_R, local_page_pool);
+ if (!vm_locked.vm->mailbox.send) {
+ /* TODO: partial defrag of failed range. */
+ /* Recover any memory consumed in failed mapping. */
+ mm_defrag(mm_stage1_locked, local_page_pool);
+ goto fail;
+ }
+
+ /*
+ * Map the receive page as writable in the hypervisor address space. On
+ * failure, unmap the send page before returning.
+ */
+ vm_locked.vm->mailbox.recv =
+ mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
+ MM_MODE_W, local_page_pool);
+ if (!vm_locked.vm->mailbox.recv) {
+ /* TODO: partial defrag of failed range. */
+ /* Recover any memory consumed in failed mapping. */
+ mm_defrag(mm_stage1_locked, local_page_pool);
+ goto fail_undo_send;
+ }
+
+ ret = true;
+ goto out;
+
+ /*
+ * The following mappings will not require more memory than is available
+ * in the local pool.
+ */
+fail_undo_send:
+ vm_locked.vm->mailbox.send = NULL;
+ CHECK(mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
+ local_page_pool));
+
+fail:
+ ret = false;
+
+out:
+ mm_unlock_stage1(&mm_stage1_locked);
+
+ return ret;
+}
+
+/**
+ * Configures the send and receive pages in the VM stage-2 and hypervisor
+ * stage-1 page tables. Locking of the page tables combined with a local memory
+ * pool ensures there will always be enough memory to recover from any errors
+ * that arise.
+ */
+static bool api_vm_configure_pages(struct vm_locked vm_locked,
+ paddr_t pa_send_begin, paddr_t pa_send_end,
+ uint32_t orig_send_mode,
+ paddr_t pa_recv_begin, paddr_t pa_recv_end,
+ uint32_t orig_recv_mode)
+{
+ bool ret;
+ struct mpool local_page_pool;
+
+ /*
+ * Create a local pool so any freed memory can't be used by another
+ * thread. This is to ensure the original mapping can be restored if any
+ * stage of the process fails.
+ */
+ mpool_init_with_fallback(&local_page_pool, &api_page_pool);
+
+ /* Take memory ownership away from the VM and mark as shared. */
+ if (!vm_identity_map(
+ vm_locked, pa_send_begin, pa_send_end,
+ MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W,
+ &local_page_pool, NULL)) {
+ goto fail;
+ }
+
+ if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
+ MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R,
+ &local_page_pool, NULL)) {
+ /* TODO: partial defrag of failed range. */
+ /* Recover any memory consumed in failed mapping. */
+ mm_vm_defrag(&vm_locked.vm->ptable, &local_page_pool);
+ goto fail_undo_send;
+ }
+
+ if (!api_vm_configure_stage1(vm_locked, pa_send_begin, pa_send_end,
+ pa_recv_begin, pa_recv_end,
+ &local_page_pool)) {
+ goto fail_undo_send_and_recv;
+ }
+
+ ret = true;
+ goto out;
+
+ /*
+ * The following mappings will not require more memory than is available
+ * in the local pool.
+ */
+fail_undo_send_and_recv:
+ CHECK(vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
+ orig_recv_mode, &local_page_pool, NULL));
+
+fail_undo_send:
+ CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
+ orig_send_mode, &local_page_pool, NULL));
+
+fail:
+ ret = false;
+
+out:
+ mpool_fini(&local_page_pool);
+
+ return ret;
+}
+
+/**
+ * Configures the VM to send/receive data through the specified pages. The pages
+ * must not be shared.
+ *
+ * Returns:
+ * - SPCI_ERROR SPCI_INVALID_PARAMETERS if the given addresses are not properly
+ * aligned or are the same.
+ * - SPCI_ERROR SPCI_NO_MEMORY if the hypervisor was unable to map the buffers
+ * due to insuffient page table memory.
+ * - SPCI_ERROR SPCI_DENIED if the pages are already mapped or are not owned by
+ * the caller.
+ * - SPCI_SUCCESS on success if no further action is needed.
+ * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
+ * needs to wake up or kick waiters.
+ */
+struct spci_value api_spci_rxtx_map(ipaddr_t send, ipaddr_t recv,
+ uint32_t page_count, struct vcpu *current,
+ struct vcpu **next)
+{
+ struct vm *vm = current->vm;
+ struct vm_locked vm_locked;
+ paddr_t pa_send_begin;
+ paddr_t pa_send_end;
+ paddr_t pa_recv_begin;
+ paddr_t pa_recv_end;
+ uint32_t orig_send_mode;
+ uint32_t orig_recv_mode;
+ struct spci_value ret;
+
+ /* Hafnium only supports a fixed size of RX/TX buffers. */
+ if (page_count != HF_MAILBOX_SIZE / SPCI_PAGE_SIZE) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Fail if addresses are not page-aligned. */
+ if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
+ !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Convert to physical addresses. */
+ pa_send_begin = pa_from_ipa(send);
+ pa_send_end = pa_add(pa_send_begin, HF_MAILBOX_SIZE);
+
+ pa_recv_begin = pa_from_ipa(recv);
+ pa_recv_end = pa_add(pa_recv_begin, HF_MAILBOX_SIZE);
+
+ /* Fail if the same page is used for the send and receive pages. */
+ if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /*
+ * The hypervisor's memory map must be locked for the duration of this
+ * operation to ensure there will be sufficient memory to recover from
+ * any failures.
+ *
+ * TODO: the scope of the can be reduced but will require restructuring
+ * to keep a single unlock point.
+ */
+ vm_locked = vm_lock(vm);
+
+ /* We only allow these to be setup once. */
+ if (vm->mailbox.send || vm->mailbox.recv) {
+ ret = spci_error(SPCI_DENIED);
+ goto exit;
+ }
+
+ /*
+ * Ensure the pages are valid, owned and exclusive to the VM and that
+ * the VM has the required access to the memory.
+ */
+ if (!mm_vm_get_mode(&vm->ptable, send, ipa_add(send, PAGE_SIZE),
+ &orig_send_mode) ||
+ !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
+ (orig_send_mode & MM_MODE_R) == 0 ||
+ (orig_send_mode & MM_MODE_W) == 0) {
+ ret = spci_error(SPCI_DENIED);
+ goto exit;
+ }
+
+ if (!mm_vm_get_mode(&vm->ptable, recv, ipa_add(recv, PAGE_SIZE),
+ &orig_recv_mode) ||
+ !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
+ (orig_recv_mode & MM_MODE_R) == 0) {
+ ret = spci_error(SPCI_DENIED);
+ goto exit;
+ }
+
+ if (!api_vm_configure_pages(vm_locked, pa_send_begin, pa_send_end,
+ orig_send_mode, pa_recv_begin, pa_recv_end,
+ orig_recv_mode)) {
+ ret = spci_error(SPCI_NO_MEMORY);
+ goto exit;
+ }
+
+ /* Tell caller about waiters, if any. */
+ ret = api_waiter_result(vm_locked, current, next);
+
+exit:
+ vm_unlock(&vm_locked);
+
+ return ret;
+}
+
+/**
+ * Checks whether the given `to` VM's mailbox is currently busy, and optionally
+ * registers the `from` VM to be notified when it becomes available.
+ */
+static bool msg_receiver_busy(struct vm_locked to, struct vm *from, bool notify)
+{
+ if (to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
+ to.vm->mailbox.recv == NULL) {
+ /*
+ * Fail if the receiver isn't currently ready to receive data,
+ * setting up for notification if requested.
+ */
+ if (notify) {
+ struct wait_entry *entry =
+ vm_get_wait_entry(from, to.vm->id);
+
+ /* Append waiter only if it's not there yet. */
+ if (list_empty(&entry->wait_links)) {
+ list_append(&to.vm->mailbox.waiter_list,
+ &entry->wait_links);
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * Notifies the `to` VM about the message currently in its mailbox, possibly
+ * with the help of the primary VM.
+ */
+static void deliver_msg(struct vm_locked to, spci_vm_id_t from_id,
+ struct vcpu *current, struct vcpu **next)
+{
+ struct spci_value primary_ret = {
+ .func = SPCI_MSG_SEND_32,
+ .arg1 = ((uint32_t)from_id << 16) | to.vm->id,
+ };
+
+ /* Messages for the primary VM are delivered directly. */
+ if (to.vm->id == HF_PRIMARY_VM_ID) {
+ /*
+ * Only tell the primary VM the size if the message is for it,
+ * to avoid leaking data about messages for other VMs.
+ */
+ primary_ret.arg3 = to.vm->mailbox.recv_size;
+ primary_ret.arg4 = to.vm->mailbox.recv_attributes;
+
+ to.vm->mailbox.state = MAILBOX_STATE_READ;
+ *next = api_switch_to_primary(current, primary_ret,
+ VCPU_STATE_READY);
+ return;
+ }
+
+ to.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
+
+ /* Return to the primary VM directly or with a switch. */
+ if (from_id != HF_PRIMARY_VM_ID) {
+ *next = api_switch_to_primary(current, primary_ret,
+ VCPU_STATE_READY);
+ }
+}
+
+/**
+ * Copies data from the sender's send buffer to the recipient's receive buffer
+ * and notifies the recipient.
+ *
+ * If the recipient's receive buffer is busy, it can optionally register the
+ * caller to be notified when the recipient's receive buffer becomes available.
+ */
+struct spci_value api_spci_msg_send(spci_vm_id_t sender_vm_id,
+ spci_vm_id_t receiver_vm_id, uint32_t size,
+ uint32_t attributes, struct vcpu *current,
+ struct vcpu **next)
+{
+ struct vm *from = current->vm;
+ struct vm *to;
+
+ struct two_vm_locked vm_to_from_lock;
+
+ const void *from_msg;
+
+ struct spci_value ret;
+ bool notify = (attributes & SPCI_MSG_SEND_NOTIFY_MASK) ==
+ SPCI_MSG_SEND_NOTIFY;
+
+ /* Ensure sender VM ID corresponds to the current VM. */
+ if (sender_vm_id != from->id) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Disallow reflexive requests as this suggests an error in the VM. */
+ if (receiver_vm_id == from->id) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Limit the size of transfer. */
+ if (size > SPCI_MSG_PAYLOAD_MAX) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Ensure the receiver VM exists. */
+ to = vm_find(receiver_vm_id);
+ if (to == NULL) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /*
+ * Check that the sender has configured its send buffer. If the tx
+ * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
+ * be safely accessed after releasing the lock since the tx mailbox
+ * address can only be configured once.
+ */
+ sl_lock(&from->lock);
+ from_msg = from->mailbox.send;
+ sl_unlock(&from->lock);
+
+ if (from_msg == NULL) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /*
+ * Hafnium needs to hold the lock on <to> before the mailbox state is
+ * checked. The lock on <to> must be held until the information is
+ * copied to <to> Rx buffer. Since in
+ * spci_msg_handle_architected_message we may call api_spci_share_memory
+ * which must hold the <from> lock, we must hold the <from> lock at this
+ * point to prevent a deadlock scenario.
+ */
+ vm_to_from_lock = vm_lock_both(to, from);
+
+ if (msg_receiver_busy(vm_to_from_lock.vm1, from, notify)) {
+ ret = spci_error(SPCI_BUSY);
+ goto out;
+ }
+
+ /* Handle legacy memory sharing messages. */
+ if ((attributes & SPCI_MSG_SEND_LEGACY_MEMORY_MASK) != 0) {
+ /*
+ * Buffer holding the internal copy of the shared memory
+ * regions.
+ */
+ uint8_t *message_replica = cpu_get_buffer(current->cpu->id);
+ uint32_t message_buffer_size =
+ cpu_get_buffer_size(current->cpu->id);
+
+ if (size > message_buffer_size) {
+ ret = spci_error(SPCI_INVALID_PARAMETERS);
+ goto out;
+ }
+
+ /* Copy the architected message into the internal buffer. */
+ memcpy_s(message_replica, message_buffer_size, from_msg, size);
+
+ /*
+ * Note that architected_message_replica is passed as the third
+ * parameter to spci_msg_handle_architected_message. The
+ * execution flow commencing at
+ * spci_msg_handle_architected_message will make several
+ * accesses to fields in architected_message_replica. The memory
+ * area architected_message_replica must be exclusively owned by
+ * Hafnium so that TOCTOU issues do not arise.
+ */
+ ret = spci_msg_handle_architected_message(
+ vm_to_from_lock.vm1, vm_to_from_lock.vm2,
+ (struct spci_memory_region *)message_replica, size,
+ attributes, &api_page_pool);
+
+ if (ret.func != SPCI_SUCCESS_32) {
+ goto out;
+ }
+ } else {
+ /* Copy data. */
+ memcpy_s(to->mailbox.recv, SPCI_MSG_PAYLOAD_MAX, from_msg,
+ size);
+ to->mailbox.recv_size = size;
+ to->mailbox.recv_sender = sender_vm_id;
+ to->mailbox.recv_attributes = 0;
+ ret = (struct spci_value){.func = SPCI_SUCCESS_32};
+ }
+
+ deliver_msg(vm_to_from_lock.vm1, sender_vm_id, current, next);
+
+out:
+ vm_unlock(&vm_to_from_lock.vm1);
+ vm_unlock(&vm_to_from_lock.vm2);
+
+ return ret;
+}
+
+/**
+ * Checks whether the vCPU's attempt to block for a message has already been
+ * interrupted or whether it is allowed to block.
+ */
+bool api_spci_msg_recv_block_interrupted(struct vcpu *current)
+{
+ bool interrupted;
+
+ sl_lock(¤t->lock);
+
+ /*
+ * Don't block if there are enabled and pending interrupts, to match
+ * behaviour of wait_for_interrupt.
+ */
+ interrupted = (current->interrupts.enabled_and_pending_count > 0);
+
+ sl_unlock(¤t->lock);
+
+ return interrupted;
+}
+
+/**
+ * Receives a message from the mailbox. If one isn't available, this function
+ * can optionally block the caller until one becomes available.
+ *
+ * No new messages can be received until the mailbox has been cleared.
+ */
+struct spci_value api_spci_msg_recv(bool block, struct vcpu *current,
+ struct vcpu **next)
+{
+ struct vm *vm = current->vm;
+ struct spci_value return_code;
+
+ /*
+ * The primary VM will receive messages as a status code from running
+ * vCPUs and must not call this function.
+ */
+ if (vm->id == HF_PRIMARY_VM_ID) {
+ return spci_error(SPCI_NOT_SUPPORTED);
+ }
+
+ sl_lock(&vm->lock);
+
+ /* Return pending messages without blocking. */
+ if (vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
+ vm->mailbox.state = MAILBOX_STATE_READ;
+ return_code = spci_msg_recv_return(vm);
+ goto out;
+ }
+
+ /* No pending message so fail if not allowed to block. */
+ if (!block) {
+ return_code = spci_error(SPCI_RETRY);
+ goto out;
+ }
+
+ /*
+ * From this point onward this call can only be interrupted or a message
+ * received. If a message is received the return value will be set at
+ * that time to SPCI_SUCCESS.
+ */
+ return_code = spci_error(SPCI_INTERRUPTED);
+ if (api_spci_msg_recv_block_interrupted(current)) {
+ goto out;
+ }
+
+ /* Switch back to primary VM to block. */
+ {
+ struct spci_value run_return = {
+ .func = SPCI_MSG_WAIT_32,
+ .arg1 = spci_vm_vcpu(vm->id, vcpu_index(current)),
+ };
+
+ *next = api_switch_to_primary(current, run_return,
+ VCPU_STATE_BLOCKED_MAILBOX);
+ }
+out:
+ sl_unlock(&vm->lock);
+
+ return return_code;
+}
+
+/**
+ * Retrieves the next VM whose mailbox became writable. For a VM to be notified
+ * by this function, the caller must have called api_mailbox_send before with
+ * the notify argument set to true, and this call must have failed because the
+ * mailbox was not available.
+ *
+ * It should be called repeatedly to retrieve a list of VMs.
+ *
+ * Returns -1 if no VM became writable, or the id of the VM whose mailbox
+ * became writable.
+ */
+int64_t api_mailbox_writable_get(const struct vcpu *current)
+{
+ struct vm *vm = current->vm;
+ struct wait_entry *entry;
+ int64_t ret;
+
+ sl_lock(&vm->lock);
+ if (list_empty(&vm->mailbox.ready_list)) {
+ ret = -1;
+ goto exit;
+ }
+
+ entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
+ ready_links);
+ list_remove(&entry->ready_links);
+ ret = vm_id_for_wait_entry(vm, entry);
+
+exit:
+ sl_unlock(&vm->lock);
+ return ret;
+}
+
+/**
+ * Retrieves the next VM waiting to be notified that the mailbox of the
+ * specified VM became writable. Only primary VMs are allowed to call this.
+ *
+ * Returns -1 on failure or if there are no waiters; the VM id of the next
+ * waiter otherwise.
+ */
+int64_t api_mailbox_waiter_get(spci_vm_id_t vm_id, const struct vcpu *current)
+{
+ struct vm *vm;
+ struct vm_locked locked;
+ struct wait_entry *entry;
+ struct vm *waiting_vm;
+
+ /* Only primary VMs are allowed to call this function. */
+ if (current->vm->id != HF_PRIMARY_VM_ID) {
+ return -1;
+ }
+
+ vm = vm_find(vm_id);
+ if (vm == NULL) {
+ return -1;
+ }
+
+ /* Check if there are outstanding notifications from given VM. */
+ locked = vm_lock(vm);
+ entry = api_fetch_waiter(locked);
+ vm_unlock(&locked);
+
+ if (entry == NULL) {
+ return -1;
+ }
+
+ /* Enqueue notification to waiting VM. */
+ waiting_vm = entry->waiting_vm;
+
+ sl_lock(&waiting_vm->lock);
+ if (list_empty(&entry->ready_links)) {
+ list_append(&waiting_vm->mailbox.ready_list,
+ &entry->ready_links);
+ }
+ sl_unlock(&waiting_vm->lock);
+
+ return waiting_vm->id;
+}
+
+/**
+ * Releases the caller's mailbox so that a new message can be received. The
+ * caller must have copied out all data they wish to preserve as new messages
+ * will overwrite the old and will arrive asynchronously.
+ *
+ * Returns:
+ * - SPCI_ERROR SPCI_DENIED on failure, if the mailbox hasn't been read.
+ * - SPCI_SUCCESS on success if no further action is needed.
+ * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
+ * needs to wake up or kick waiters. Waiters should be retrieved by calling
+ * hf_mailbox_waiter_get.
+ */
+struct spci_value api_spci_rx_release(struct vcpu *current, struct vcpu **next)
+{
+ struct vm *vm = current->vm;
+ struct vm_locked locked;
+ struct spci_value ret;
+
+ locked = vm_lock(vm);
+ switch (vm->mailbox.state) {
+ case MAILBOX_STATE_EMPTY:
+ case MAILBOX_STATE_RECEIVED:
+ ret = spci_error(SPCI_DENIED);
+ break;
+
+ case MAILBOX_STATE_READ:
+ ret = api_waiter_result(locked, current, next);
+ vm->mailbox.state = MAILBOX_STATE_EMPTY;
+ break;
+ }
+ vm_unlock(&locked);
+
+ return ret;
+}
+
+/**
+ * Enables or disables a given interrupt ID for the calling vCPU.
+ *
+ * Returns 0 on success, or -1 if the intid is invalid.
+ */
+int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
+{
+ uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
+ uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
+
+ if (intid >= HF_NUM_INTIDS) {
+ return -1;
+ }
+
+ sl_lock(¤t->lock);
+ if (enable) {
+ /*
+ * If it is pending and was not enabled before, increment the
+ * count.
+ */
+ if (current->interrupts.interrupt_pending[intid_index] &
+ ~current->interrupts.interrupt_enabled[intid_index] &
+ intid_mask) {
+ current->interrupts.enabled_and_pending_count++;
+ }
+ current->interrupts.interrupt_enabled[intid_index] |=
+ intid_mask;
+ } else {
+ /*
+ * If it is pending and was enabled before, decrement the count.
+ */
+ if (current->interrupts.interrupt_pending[intid_index] &
+ current->interrupts.interrupt_enabled[intid_index] &
+ intid_mask) {
+ current->interrupts.enabled_and_pending_count--;
+ }
+ current->interrupts.interrupt_enabled[intid_index] &=
+ ~intid_mask;
+ }
+
+ sl_unlock(¤t->lock);
+ return 0;
+}
+
+/**
+ * Returns the ID of the next pending interrupt for the calling vCPU, and
+ * acknowledges it (i.e. marks it as no longer pending). Returns
+ * HF_INVALID_INTID if there are no pending interrupts.
+ */
+uint32_t api_interrupt_get(struct vcpu *current)
+{
+ uint8_t i;
+ uint32_t first_interrupt = HF_INVALID_INTID;
+
+ /*
+ * Find the first enabled and pending interrupt ID, return it, and
+ * deactivate it.
+ */
+ sl_lock(¤t->lock);
+ for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
+ uint32_t enabled_and_pending =
+ current->interrupts.interrupt_enabled[i] &
+ current->interrupts.interrupt_pending[i];
+
+ if (enabled_and_pending != 0) {
+ uint8_t bit_index = ctz(enabled_and_pending);
+ /*
+ * Mark it as no longer pending and decrement the count.
+ */
+ current->interrupts.interrupt_pending[i] &=
+ ~(1U << bit_index);
+ current->interrupts.enabled_and_pending_count--;
+ first_interrupt =
+ i * INTERRUPT_REGISTER_BITS + bit_index;
+ break;
+ }
+ }
+
+ sl_unlock(¤t->lock);
+ return first_interrupt;
+}
+
+/**
+ * Returns whether the current vCPU is allowed to inject an interrupt into the
+ * given VM and vCPU.
+ */
+static inline bool is_injection_allowed(uint32_t target_vm_id,
+ struct vcpu *current)
+{
+ uint32_t current_vm_id = current->vm->id;
+
+ /*
+ * The primary VM is allowed to inject interrupts into any VM. Secondary
+ * VMs are only allowed to inject interrupts into their own vCPUs.
+ */
+ return current_vm_id == HF_PRIMARY_VM_ID ||
+ current_vm_id == target_vm_id;
+}
+
+/**
+ * Injects a virtual interrupt of the given ID into the given target vCPU.
+ * This doesn't cause the vCPU to actually be run immediately; it will be taken
+ * when the vCPU is next run, which is up to the scheduler.
+ *
+ * Returns:
+ * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
+ * ID is invalid, or the current VM is not allowed to inject interrupts to
+ * the target VM.
+ * - 0 on success if no further action is needed.
+ * - 1 if it was called by the primary VM and the primary VM now needs to wake
+ * up or kick the target vCPU.
+ */
+int64_t api_interrupt_inject(spci_vm_id_t target_vm_id,
+ spci_vcpu_index_t target_vcpu_idx, uint32_t intid,
+ struct vcpu *current, struct vcpu **next)
+{
+ struct vcpu *target_vcpu;
+ struct vm *target_vm = vm_find(target_vm_id);
+
+ if (intid >= HF_NUM_INTIDS) {
+ return -1;
+ }
+
+ if (target_vm == NULL) {
+ return -1;
+ }
+
+ if (target_vcpu_idx >= target_vm->vcpu_count) {
+ /* The requested vCPU must exist. */
+ return -1;
+ }
+
+ if (!is_injection_allowed(target_vm_id, current)) {
+ return -1;
+ }
+
+ target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
+
+ dlog("Injecting IRQ %d for VM %d vCPU %d from VM %d vCPU %d\n", intid,
+ target_vm_id, target_vcpu_idx, current->vm->id, current->cpu->id);
+ return internal_interrupt_inject(target_vcpu, intid, current, next);
+}
+
+/** Returns the version of the implemented SPCI specification. */
+struct spci_value api_spci_version(void)
+{
+ /*
+ * Ensure that both major and minor revision representation occupies at
+ * most 15 bits.
+ */
+ static_assert(0x8000 > SPCI_VERSION_MAJOR,
+ "Major revision representation take more than 15 bits.");
+ static_assert(0x10000 > SPCI_VERSION_MINOR,
+ "Minor revision representation take more than 16 bits.");
+
+ struct spci_value ret = {
+ .func = SPCI_SUCCESS_32,
+ .arg2 = (SPCI_VERSION_MAJOR << SPCI_VERSION_MAJOR_OFFSET) |
+ SPCI_VERSION_MINOR};
+ return ret;
+}
+
+int64_t api_debug_log(char c, struct vcpu *current)
+{
+ bool flush;
+ struct vm *vm = current->vm;
+ struct vm_locked vm_locked = vm_lock(vm);
+
+ if (c == '\n' || c == '\0') {
+ flush = true;
+ } else {
+ vm->log_buffer[vm->log_buffer_length++] = c;
+ flush = (vm->log_buffer_length == sizeof(vm->log_buffer));
+ }
+
+ if (flush) {
+ dlog_flush_vm_buffer(vm->id, vm->log_buffer,
+ vm->log_buffer_length);
+ vm->log_buffer_length = 0;
+ }
+
+ vm_unlock(&vm_locked);
+
+ return 0;
+}
+
+/**
+ * Discovery function returning information about the implementation of optional
+ * SPCI interfaces.
+ */
+struct spci_value api_spci_features(uint32_t function_id)
+{
+ switch (function_id) {
+ case SPCI_ERROR_32:
+ case SPCI_SUCCESS_32:
+ case SPCI_ID_GET_32:
+ case SPCI_YIELD_32:
+ case SPCI_VERSION_32:
+ case SPCI_FEATURES_32:
+ case SPCI_MSG_SEND_32:
+ case SPCI_MSG_POLL_32:
+ case SPCI_MSG_WAIT_32:
+ return (struct spci_value){.func = SPCI_SUCCESS_32};
+ default:
+ return spci_error(SPCI_NOT_SUPPORTED);
+ }
+}
diff --git a/src/api_test.cc b/src/api_test.cc
new file mode 100644
index 0000000..0a80156
--- /dev/null
+++ b/src/api_test.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This order of headers works around a libc++ issue which prevents
+ * "atomic" being included before "stdatomic.h".
+ */
+#include <gmock/gmock.h>
+
+extern "C" {
+#include "hf/api.h"
+}
+
+namespace
+{
+using ::testing::Eq;
+
+TEST(api, vm_get_count)
+{
+ EXPECT_THAT(api_vm_get_count(), Eq(0));
+}
+
+} /* namespace */
diff --git a/src/arch/aarch64/BUILD.gn b/src/arch/aarch64/BUILD.gn
new file mode 100644
index 0000000..93569ac
--- /dev/null
+++ b/src/arch/aarch64/BUILD.gn
@@ -0,0 +1,47 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+config("config") {
+ include_dirs = [ "." ]
+}
+
+# Implementation of the arch interface for aarch64.
+source_set("arch") {
+ sources = [
+ "irq.c",
+ "mm.c",
+ "timer.c",
+ ]
+}
+
+source_set("std") {
+ sources = [
+ "stack_protector.c",
+ "std.c",
+ ]
+}
+
+# Entry code to prepare the loaded image to be run.
+source_set("entry") {
+ sources = [
+ "entry.S",
+ ]
+}
+
+# Make a call to the secure monitor.
+source_set("smc") {
+ sources = [
+ "smc.c",
+ ]
+}
diff --git a/src/arch/aarch64/args.gni b/src/arch/aarch64/args.gni
new file mode 100644
index 0000000..1582b34
--- /dev/null
+++ b/src/arch/aarch64/args.gni
@@ -0,0 +1,18 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+declare_args() {
+ # SMC hooks to be used for the platform, specified as build target.
+ plat_smc = "//src/arch/aarch64/smc:absent"
+}
diff --git a/src/arch/aarch64/boot_flow/BUILD.gn b/src/arch/aarch64/boot_flow/BUILD.gn
new file mode 100644
index 0000000..d709a9e
--- /dev/null
+++ b/src/arch/aarch64/boot_flow/BUILD.gn
@@ -0,0 +1,25 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source_set("android") {
+ sources = [
+ "android.S",
+ ]
+}
+
+source_set("linux") {
+ sources = [
+ "linux.S",
+ ]
+}
diff --git a/src/arch/aarch64/boot_flow/android.S b/src/arch/aarch64/boot_flow/android.S
new file mode 100644
index 0000000..13f0530
--- /dev/null
+++ b/src/arch/aarch64/boot_flow/android.S
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.section .init.plat_boot_flow_hook, "ax"
+.global plat_boot_flow_hook
+plat_boot_flow_hook:
+ /* Do nothing. */
+ ret
diff --git a/src/arch/aarch64/boot_flow/linux.S b/src/arch/aarch64/boot_flow/linux.S
new file mode 100644
index 0000000..419f9a4
--- /dev/null
+++ b/src/arch/aarch64/boot_flow/linux.S
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.section .init.plat_boot_flow_hook, "ax"
+.global plat_boot_flow_hook
+plat_boot_flow_hook:
+ /* Save the FDT pointer to a global variable. */
+ adrp x25, plat_boot_flow_fdt_addr
+ add x25, x25, :lo12:plat_boot_flow_fdt_addr
+ str x0, [x25]
+ ret
diff --git a/src/arch/aarch64/entry.S b/src/arch/aarch64/entry.S
new file mode 100644
index 0000000..3d3725e
--- /dev/null
+++ b/src/arch/aarch64/entry.S
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This is a generic entry point for an image. It carries out the operations
+ * required to prepare the loaded image to be run. Specifically, it performs
+ * relocations and zeroing of the bss section using registers x25 and above.
+ */
+.section .init.entry, "ax"
+.global entry
+entry:
+ /* Linux aarch64 image header. */
+ b 0f
+ .word 0
+ .quad 0x1000 /* text_offset */
+ .quad image_size /* image_size */
+ .quad 0 /* flags */
+ .quad 0 /* res2 */
+ .quad 0 /* res3 */
+ .quad 0 /* res4 */
+ .word 0x644d5241 /* magic */
+ .word 0
+
+ /*
+ * Calculate the difference between the actual load address and the
+ * preferred one. We'll use this to relocate.
+ */
+0: adrp x25, entry
+ add x25, x25, :lo12:entry
+
+ ldr w29, =ORIGIN_ADDRESS
+
+ sub x25, x25, x29
+
+ /* Find where the relocations begin and end. */
+ adrp x29, rela_begin
+ add x29, x29, :lo12:rela_begin
+
+ adrp x30, rela_end
+ add x30, x30, :lo12:rela_end
+
+ /* Iterate over all relocations. */
+1: cmp x29, x30
+ b.eq 2f
+
+ ldp x26, x27, [x29], #16
+ ldr x28, [x29], #8
+
+ cmp w27, #1027 /* R_AARCH64_RELATIVE */
+# b.ne 1b
+ b.ne .
+
+ add x28, x28, x25
+ str x28, [x26, x25]
+ b 1b
+
+ /* Zero out the bss section. */
+2: adrp x29, bss_begin
+ add x29, x29, :lo12:bss_begin
+
+ adrp x30, bss_end
+ add x30, x30, :lo12:bss_end
+
+3: cmp x29, x30
+ b.hs 4f
+
+ stp xzr, xzr, [x29], #16
+ b 3b
+
+ /* Branch to the entry point for the specific image. */
+4: b image_entry
diff --git a/src/arch/aarch64/exception_macros.S b/src/arch/aarch64/exception_macros.S
new file mode 100644
index 0000000..fee454a
--- /dev/null
+++ b/src/arch/aarch64/exception_macros.S
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * From Linux commit 679db70801da9fda91d26caf13bf5b5ccc74e8e8:
+ * "Some CPUs can speculate past an ERET instruction and potentially perform
+ * speculative accesses to memory before processing the exception return.
+ * Since the register state is often controlled by a lower privilege level
+ * at the point of an ERET, this could potentially be used as part of a
+ * side-channel attack."
+ *
+ * This macro emits a speculation barrier after the ERET to prevent the CPU
+ * from speculating past the exception return.
+ *
+ * ARMv8.5 introduces a dedicated SB speculative barrier instruction.
+ * Use a DSB/ISB pair on older platforms.
+ */
+.macro eret_with_sb
+ eret
+ dsb nsh
+ isb
+.endm
+
+/**
+ * Saves the volatile registers onto the stack. This currently takes 14
+ * instructions, so it can be used in exception handlers with 18 instructions
+ * left, 2 of which in the same cache line (assuming a 16-byte cache line).
+ *
+ * On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
+ * which can be used as the first and second arguments of a subsequent call.
+ */
+.macro save_volatile_to_stack elx:req
+ /* Reserve stack space and save registers x0-x18, x29 & x30. */
+ stp x0, x1, [sp, #-(8 * 24)]!
+ stp x2, x3, [sp, #8 * 2]
+ stp x4, x5, [sp, #8 * 4]
+ stp x6, x7, [sp, #8 * 6]
+ stp x8, x9, [sp, #8 * 8]
+ stp x10, x11, [sp, #8 * 10]
+ stp x12, x13, [sp, #8 * 12]
+ stp x14, x15, [sp, #8 * 14]
+ stp x16, x17, [sp, #8 * 16]
+ str x18, [sp, #8 * 18]
+ stp x29, x30, [sp, #8 * 20]
+
+ /*
+ * Save elr_elx & spsr_elx. This such that we can take nested exception
+ * and still be able to unwind.
+ */
+ mrs x0, elr_\elx
+ mrs x1, spsr_\elx
+ stp x0, x1, [sp, #8 * 22]
+.endm
+
+/**
+ * Restores the volatile registers from the stack. This currently takes 14
+ * instructions, so it can be used in exception handlers while still leaving 18
+ * instructions left; if paired with save_volatile_to_stack, there are 4
+ * instructions to spare.
+ */
+.macro restore_volatile_from_stack elx:req
+ /* Restore registers x2-x18, x29 & x30. */
+ ldp x2, x3, [sp, #8 * 2]
+ ldp x4, x5, [sp, #8 * 4]
+ ldp x6, x7, [sp, #8 * 6]
+ ldp x8, x9, [sp, #8 * 8]
+ ldp x10, x11, [sp, #8 * 10]
+ ldp x12, x13, [sp, #8 * 12]
+ ldp x14, x15, [sp, #8 * 14]
+ ldp x16, x17, [sp, #8 * 16]
+ ldr x18, [sp, #8 * 18]
+ ldp x29, x30, [sp, #8 * 20]
+
+ /* Restore registers elr_elx & spsr_elx, using x0 & x1 as scratch. */
+ ldp x0, x1, [sp, #8 * 22]
+ msr elr_\elx, x0
+ msr spsr_\elx, x1
+
+ /* Restore x0 & x1, and release stack space. */
+ ldp x0, x1, [sp], #8 * 24
+.endm
+
+/**
+ * This is a generic handler for exceptions taken at the current EL while using
+ * SP0. It behaves similarly to the SPx case by first switching to SPx, doing
+ * the work, then switching back to SP0 before returning.
+ *
+ * Switching to SPx and calling the C handler takes 16 instructions, so it's not
+ * possible to add a branch to a common exit path without going into the next
+ * cache line (assuming 16-byte cache lines). Additionally, to restore and
+ * return we need an additional 16 instructions, so we could implement the whole
+ * handler within the allotted 32 instructions. However, since we want to emit
+ * a speculation barrier after each ERET, we are forced to move the ERET to
+ * a shared exit path.
+ */
+.macro current_exception_sp0 elx:req handler:req eret_label:req
+ msr spsel, #1
+ save_volatile_to_stack \elx
+ bl \handler
+ restore_volatile_from_stack \elx
+ msr spsel, #0
+ b \eret_label
+.endm
+
+/**
+ * Variant of current_exception_sp0 which assumes the handler never returns.
+ */
+.macro noreturn_current_exception_sp0 elx:req handler:req
+ msr spsel, #1
+ save_volatile_to_stack \elx
+ b \handler
+.endm
+
+/**
+ * This is a generic handler for exceptions taken at the current EL while using
+ * SPx. It saves volatile registers, calls the C handler, restores volatile
+ * registers, then returns.
+ *
+ * Saving state and jumping to C handler takes 15 instructions. We add an extra
+ * branch to a common exit path. So each handler takes up one unique cache line
+ * and one shared cache line (assuming 16-byte cache lines).
+ */
+.macro current_exception_spx elx:req handler:req
+ save_volatile_to_stack \elx
+ bl \handler
+ b restore_from_stack_and_return
+.endm
+
+/**
+ * Variant of current_exception_spx which assumes the handler never returns.
+ */
+.macro noreturn_current_exception_spx elx:req handler:req
+ save_volatile_to_stack \elx
+ b \handler
+.endm
diff --git a/src/arch/aarch64/hftest/BUILD.gn b/src/arch/aarch64/hftest/BUILD.gn
new file mode 100644
index 0000000..a801bd1
--- /dev/null
+++ b/src/arch/aarch64/hftest/BUILD.gn
@@ -0,0 +1,92 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# These components are only used by VMs for aarch64 specific actions.
+
+# Implements image_entry for a simple VM kernel.
+source_set("entry") {
+ sources = [
+ "entry.S",
+ ]
+}
+
+# Shutdown the system or exit emulation, start/stop CPUs.
+source_set("power_mgmt") {
+ testonly = true
+ public_configs = [ "//src/arch/aarch64:config" ]
+ sources = [
+ "cpu_entry.S",
+ "power_mgmt.c",
+ ]
+
+ deps = [
+ "//src/arch/aarch64:smc",
+ ]
+}
+
+# Exception handlers for interrupts.
+source_set("interrupts") {
+ testonly = true
+ public_configs = [
+ "//src/arch/aarch64:config",
+ "//test/hftest:hftest_config",
+ ]
+ sources = [
+ "events.c",
+ "exceptions.S",
+ "interrupts.c",
+ ]
+}
+
+# GICv3 EL1 driver.
+source_set("interrupts_gicv3") {
+ testonly = true
+ public_configs = [ "//src/arch/aarch64:config" ]
+ sources = [
+ "interrupts_gicv3.c",
+ ]
+}
+
+# Get/set CPU state.
+source_set("state") {
+ testonly = true
+ public_configs = [ "//src/arch/aarch64:config" ]
+ sources = [
+ "state.c",
+ ]
+}
+
+# Interact directly with registers.
+source_set("registers") {
+ testonly = true
+ sources = [
+ "registers.c",
+ ]
+}
+
+source_set("console") {
+ sources = [
+ "console.c",
+ ]
+
+ deps = [
+ "//vmlib/aarch64:call",
+ ]
+}
+
+source_set("mm") {
+ sources = [
+ "mm.c",
+ ]
+}
diff --git a/src/arch/aarch64/hftest/console.c b/src/arch/aarch64/hftest/console.c
new file mode 100644
index 0000000..4e2f4e8
--- /dev/null
+++ b/src/arch/aarch64/hftest/console.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/plat/console.h"
+
+#include "vmapi/hf/call.h"
+
+void plat_console_putchar(char c)
+{
+ hf_debug_log(c);
+}
diff --git a/src/arch/aarch64/hftest/cpu_entry.S b/src/arch/aarch64/hftest/cpu_entry.S
new file mode 100644
index 0000000..726b042
--- /dev/null
+++ b/src/arch/aarch64/hftest/cpu_entry.S
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.global vm_cpu_entry
+vm_cpu_entry:
+ /* Disable trapping floating point access in EL1. */
+ mov x1, #(0x3 << 20)
+ msr cpacr_el1, x1
+ isb
+
+ /* Initialise stack from the cpu_start_state struct. */
+ ldr x1, [x0]
+ mov sp, x1
+
+ /* Load entry function pointer and its argument. */
+ ldr x1, [x0, 8]
+ ldr x0, [x0, 16]
+
+ /* Branch to entry function. */
+ blr x1
+
+ /* Entry function should not return, but if it does, spin. */
+ b .
diff --git a/src/arch/aarch64/hftest/entry.S b/src/arch/aarch64/hftest/entry.S
new file mode 100644
index 0000000..5fbe110
--- /dev/null
+++ b/src/arch/aarch64/hftest/entry.S
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.section .init.image_entry, "ax"
+.global image_entry
+image_entry:
+ /* Prepare the stack. */
+ adr x30, kstack + 4096
+ mov sp, x30
+
+ /* Disable trapping floating point access in EL1. */
+ mov x30, #(0x3 << 20)
+ msr cpacr_el1, x30
+ isb
+
+ /* Call into C code. */
+ bl kmain
+
+ /* If the VM returns, shutdown the system. */
+ bl arch_power_off
+
+ /* Loop forever waiting for interrupts. */
+0: wfi
+ b 0b
diff --git a/src/arch/aarch64/hftest/events.c b/src/arch/aarch64/hftest/events.c
new file mode 100644
index 0000000..435a74d
--- /dev/null
+++ b/src/arch/aarch64/hftest/events.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/events.h"
+
+void event_wait(void)
+{
+ __asm__ volatile("wfe");
+}
+
+void event_send_local(void)
+{
+ __asm__ volatile("sevl");
+}
diff --git a/src/arch/aarch64/hftest/exceptions.S b/src/arch/aarch64/hftest/exceptions.S
new file mode 100644
index 0000000..0203110
--- /dev/null
+++ b/src/arch/aarch64/hftest/exceptions.S
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exception_macros.S"
+
+.section .text.vector_table_el1, "ax"
+.global vector_table_el1
+.balign 0x800
+vector_table_el1:
+sync_cur_sp0:
+ b .
+
+.balign 0x80
+irq_cur_sp0:
+ current_exception_sp0 el1 irq_current exception_handler_return
+
+.balign 0x80
+fiq_cur_sp0:
+ b .
+
+.balign 0x80
+serr_cur_sp0:
+ b .
+
+.balign 0x80
+sync_cur_spx:
+ current_exception_spx el1 sync_exception_current
+
+.balign 0x80
+irq_cur_spx:
+ current_exception_spx el1 irq_current
+
+.balign 0x80
+fiq_cur_spx:
+ b .
+
+.balign 0x80
+serr_cur_spx:
+ b .
+
+.balign 0x80
+sync_lower_64:
+ b .
+
+.balign 0x80
+irq_lower_64:
+ b .
+
+.balign 0x80
+fiq_lower_64:
+ b .
+
+.balign 0x80
+serr_lower_64:
+ b .
+
+.balign 0x80
+sync_lower_32:
+ b .
+
+.balign 0x80
+irq_lower_32:
+ b .
+
+.balign 0x80
+fiq_lower_32:
+ b .
+
+.balign 0x80
+serr_lower_32:
+ b .
+
+.balign 0x40
+/**
+ * Restores the volatile registers from the stack.
+
+ * Register x0: if false restores elr_el1, if true retains the value of elr_el1.
+ * This enables exception handlers to indicate whether they have changed the
+ * value of elr_el1 (e.g., to skip the faulting instruction).
+ */
+restore_from_stack_and_return:
+ /* Restore registers x2-x18, x29 & x30. */
+ ldp x2, x3, [sp, #8 * 2]
+ ldp x4, x5, [sp, #8 * 4]
+ ldp x6, x7, [sp, #8 * 6]
+ ldp x8, x9, [sp, #8 * 8]
+ ldp x10, x11, [sp, #8 * 10]
+ ldp x12, x13, [sp, #8 * 12]
+ ldp x14, x15, [sp, #8 * 14]
+ ldp x16, x17, [sp, #8 * 16]
+ ldr x18, [sp, #8 * 18]
+ ldp x29, x30, [sp, #8 * 20]
+
+ cbnz x0, skip_elr
+
+ /* Restore register elr_el1 using x1 as scratch. */
+ ldr x1, [sp, #8 * 22]
+ msr elr_el1, x1
+
+skip_elr:
+ /* Restore register spsr_el1 using x1 as scratch. */
+ ldr x1, [sp, #8 * 23]
+ msr spsr_el1, x1
+
+ /* Restore x0 & x1, and release stack space. */
+ ldp x0, x1, [sp], #8 * 24
+
+exception_handler_return:
+ eret_with_sb
diff --git a/src/arch/aarch64/hftest/interrupts.c b/src/arch/aarch64/hftest/interrupts.c
new file mode 100644
index 0000000..a7eb401
--- /dev/null
+++ b/src/arch/aarch64/hftest/interrupts.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/interrupts.h"
+
+#include <stdint.h>
+
+#include "hf/dlog.h"
+
+#include "msr.h"
+#include "test/hftest.h"
+
+extern uint8_t vector_table_el1;
+static void (*irq_callback)(void);
+static bool (*exception_callback)(void);
+
+void irq_current(void)
+{
+ if (irq_callback != NULL) {
+ irq_callback();
+ } else {
+ FAIL("Got unexpected interrupt.\n");
+ }
+}
+
+noreturn static bool default_sync_current_exception(void)
+{
+ uintreg_t esr = read_msr(esr_el1);
+ uintreg_t elr = read_msr(elr_el1);
+
+ switch (esr >> 26) {
+ case 0x25: /* EC = 100101, Data abort. */
+ dlog("Data abort: pc=%#x, esr=%#x, ec=%#x", elr, esr,
+ esr >> 26);
+ if (!(esr & (1U << 10))) { /* Check FnV bit. */
+ dlog(", far=%#x", read_msr(far_el1));
+ } else {
+ dlog(", far=invalid");
+ }
+
+ dlog("\n");
+ break;
+
+ default:
+ dlog("Unknown current sync exception pc=%#x, esr=%#x, "
+ "ec=%#x\n",
+ elr, esr, esr >> 26);
+ }
+
+ for (;;) {
+ /* do nothing */
+ }
+}
+
+bool sync_exception_current(void)
+{
+ if (exception_callback != NULL) {
+ return exception_callback();
+ }
+ return default_sync_current_exception();
+}
+
+void exception_setup(void (*irq)(void), bool (*exception)(void))
+{
+ irq_callback = irq;
+ exception_callback = exception;
+
+ /* Set exception vector table. */
+ write_msr(VBAR_EL1, &vector_table_el1);
+}
+
+void interrupt_wait(void)
+{
+ __asm__ volatile("wfi");
+}
diff --git a/src/arch/aarch64/hftest/interrupts_gicv3.c b/src/arch/aarch64/hftest/interrupts_gicv3.c
new file mode 100644
index 0000000..9010ce1
--- /dev/null
+++ b/src/arch/aarch64/hftest/interrupts_gicv3.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/interrupts_gicv3.h"
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "hf/dlog.h"
+
+#include "msr.h"
+
+void interrupt_gic_setup(void)
+{
+ uint32_t ctlr = 1U << 4 /* Enable affinity routing. */
+ | 1U << 1; /* Enable group 1 non-secure interrupts. */
+
+ write_msr(ICC_CTLR_EL1, 0);
+
+ io_write32(GICD_CTLR, ctlr);
+
+ /* Mark CPU as awake. */
+ io_write32(GICR_WAKER, io_read32(GICR_WAKER) & ~(1U << 1));
+ while ((io_read32(GICR_WAKER) & (1U << 2)) != 0) {
+ dlog("Waiting for ChildrenAsleep==0\n");
+ }
+
+ /* Put interrupts into non-secure group 1. */
+ dlog("GICR_IGROUPR0 was %x\n", 0xffffffff, io_read32(GICR_IGROUPR0));
+ io_write32(GICR_IGROUPR0, 0xffffffff);
+ dlog("wrote %x to GICR_IGROUPR0, got back %x\n", 0xffffffff,
+ io_read32(GICR_IGROUPR0));
+ /* Enable non-secure group 1. */
+ write_msr(ICC_IGRPEN1_EL1, 0x00000001);
+ dlog("wrote %x to ICC_IGRPEN1_EL1, got back %x\n", 0x00000001,
+ read_msr(ICC_IGRPEN1_EL1));
+}
+
+void interrupt_enable(uint32_t intid, bool enable)
+{
+ uint32_t index = intid / 32;
+ uint32_t bit = 1U << (intid % 32);
+
+ if (enable) {
+ io_write32_array(GICD_ISENABLER, index, bit);
+ if (intid < 32) {
+ io_write32(GICR_ISENABLER0, bit);
+ }
+ } else {
+ io_write32_array(GICD_ICENABLER, index, bit);
+ if (intid < 32) {
+ io_write32(GICR_ICENABLER0, bit);
+ }
+ }
+}
+
+void interrupt_enable_all(bool enable)
+{
+ uint32_t i;
+
+ if (enable) {
+ io_write32(GICR_ISENABLER0, 0xffffffff);
+ for (i = 0; i < 32; ++i) {
+ io_write32_array(GICD_ISENABLER, i, 0xffffffff);
+ }
+ } else {
+ io_write32(GICR_ISENABLER0, 0);
+ for (i = 0; i < 32; ++i) {
+ io_write32_array(GICD_ISENABLER, i, 0);
+ }
+ }
+}
+
+void interrupt_set_priority_mask(uint8_t min_priority)
+{
+ write_msr(ICC_PMR_EL1, min_priority);
+}
+
+void interrupt_set_priority(uint32_t intid, uint8_t priority)
+{
+ io_write8_array(GICD_IPRIORITYR, intid, priority);
+}
+
+void interrupt_set_edge_triggered(uint32_t intid, bool edge_triggered)
+{
+ uint32_t index = intid / 16;
+ uint32_t bit = 1U << (((intid % 16) * 2) + 1);
+
+ if (intid < 32) {
+ uint32_t v = io_read32_array(GICR_ICFGR, index);
+
+ if (edge_triggered) {
+ io_write32_array(GICR_ICFGR, index, v | bit);
+ } else {
+ io_write32_array(GICR_ICFGR, index, v & ~bit);
+ }
+ } else {
+ uint32_t v = io_read32_array(GICD_ICFGR, index);
+
+ if (edge_triggered) {
+ io_write32_array(GICD_ICFGR, index, v | bit);
+ } else {
+ io_write32_array(GICD_ICFGR, index, v & ~bit);
+ }
+ }
+}
+
+void interrupt_send_sgi(uint8_t intid, bool irm, uint8_t affinity3,
+ uint8_t affinity2, uint8_t affinity1,
+ uint16_t target_list)
+{
+ uint64_t sgi_register =
+ ((uint64_t)target_list) | ((uint64_t)affinity1 << 16) |
+ (((uint64_t)intid & 0x0f) << 24) | ((uint64_t)affinity2 << 32) |
+ ((uint64_t)irm << 40) | ((uint64_t)affinity3 << 48);
+
+ write_msr(ICC_SGI1R_EL1, sgi_register);
+}
+
+uint32_t interrupt_get_and_acknowledge(void)
+{
+ return read_msr(ICC_IAR1_EL1);
+}
+
+void interrupt_end(uint32_t intid)
+{
+ write_msr(ICC_EOIR1_EL1, intid);
+}
diff --git a/src/arch/aarch64/hftest/mm.c b/src/arch/aarch64/hftest/mm.c
new file mode 100644
index 0000000..86f27d8
--- /dev/null
+++ b/src/arch/aarch64/hftest/mm.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/mm.h"
+
+#include "hf/arch/barriers.h"
+#include "hf/arch/vm/mm.h"
+
+#include "hf/dlog.h"
+
+#include "../msr.h"
+
+#define STAGE1_DEVICEINDX UINT64_C(0)
+#define STAGE1_NORMALINDX UINT64_C(1)
+
+static uintreg_t mm_mair_el1;
+static uintreg_t mm_tcr_el1;
+static uintreg_t mm_sctlr_el1;
+
+/**
+ * Initialize MMU for a test running in EL1.
+ */
+bool arch_vm_mm_init(void)
+{
+ static const int pa_bits_table[16] = {32, 36, 40, 42, 44, 48};
+ uint64_t features = read_msr(id_aa64mmfr0_el1);
+ int pa_bits = pa_bits_table[features & 0xf];
+
+ /* Check that 4KB granules are supported. */
+ if ((features >> 28) & 0xf) {
+ dlog("4KB granules are not supported\n");
+ return false;
+ }
+
+ /* Check the physical address range. */
+ if (!pa_bits) {
+ dlog("Unsupported value of id_aa64mmfr0_el1.PARange: %x\n",
+ features & 0xf);
+ return false;
+ }
+
+ /*
+ * 0 -> Device-nGnRnE memory
+ * 0xff -> Normal memory, Inner/Outer Write-Back Non-transient,
+ * Write-Alloc, Read-Alloc.
+ */
+ mm_mair_el1 = (0 << (8 * STAGE1_DEVICEINDX)) |
+ (0xff << (8 * STAGE1_NORMALINDX));
+
+ mm_tcr_el1 = (1 << 20) | /* TBI, top byte ignored. */
+ ((features & 0xf) << 16) | /* PS. */
+ (0 << 14) | /* TG0, granule size, 4KB. */
+ (3 << 12) | /* SH0, inner shareable. */
+ (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */
+ (1 << 8) | /* IRGN0, normal mem, WB RA WA Cacheable. */
+ (25 << 0) | /* T0SZ, input address is 2^39 bytes. */
+ 0;
+
+ mm_sctlr_el1 = (1 << 0) | /* M, enable stage 1 EL2 MMU. */
+ (1 << 1) | /* A, enable alignment check faults. */
+ (1 << 2) | /* C, data cache enable. */
+ (1 << 3) | /* SA, enable stack alignment check. */
+ (3 << 4) | /* RES1 bits. */
+ (1 << 11) | /* RES1 bit. */
+ (1 << 12) | /* I, instruction cache enable. */
+ (1 << 16) | /* RES1 bit. */
+ (1 << 18) | /* RES1 bit. */
+ (0 << 19) | /* WXN bit, writable execute never. */
+ (3 << 22) | /* RES1 bits. */
+ (3 << 28) | /* RES1 bits. */
+ 0;
+
+ return true;
+}
+
+void arch_vm_mm_enable(paddr_t table)
+{
+ /* Configure translation management registers. */
+ write_msr(ttbr0_el1, pa_addr(table));
+ write_msr(mair_el1, mm_mair_el1);
+ write_msr(tcr_el1, mm_tcr_el1);
+
+ /* Configure sctlr_el1 to enable MMU and cache. */
+ dsb(sy);
+ isb();
+ write_msr(sctlr_el1, mm_sctlr_el1);
+ isb();
+}
diff --git a/src/arch/aarch64/hftest/power_mgmt.c b/src/arch/aarch64/hftest/power_mgmt.c
new file mode 100644
index 0000000..cfa3e08
--- /dev/null
+++ b/src/arch/aarch64/hftest/power_mgmt.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/power_mgmt.h"
+
+#include "hf/static_assert.h"
+
+#include "vmapi/hf/call.h"
+
+#include "psci.h"
+#include "smc.h"
+
+/**
+ * Starts the CPU with the given ID. It will set the stack pointer according to
+ * the provided `state` and jump to the entry point with the given argument
+ * specified in it.
+ *
+ * Note: The caller of this function must guarantee that the contents of `state`
+ * do not change until the new CPU has branched to the given entry point, and
+ * that it was written-back to memory (that it is not waiting in a data cache)
+ * because the new CPU is started with caching disabled.
+ */
+bool arch_cpu_start(uintptr_t id, struct arch_cpu_start_state *state)
+{
+ void vm_cpu_entry(uintptr_t arg);
+ struct spci_value smc_res;
+
+ /* Try to start the CPU. */
+ smc_res = smc64(PSCI_CPU_ON, id, (uintptr_t)&vm_cpu_entry,
+ (uintptr_t)state, 0, 0, 0, SMCCC_CALLER_HYPERVISOR);
+
+ return smc_res.func == PSCI_RETURN_SUCCESS;
+}
+
+/**
+ * Stops the current CPU.
+ */
+noreturn void arch_cpu_stop(void)
+{
+ smc32(PSCI_CPU_OFF, 0, 0, 0, 0, 0, 0, SMCCC_CALLER_HYPERVISOR);
+ for (;;) {
+ /* This should never be reached. */
+ }
+}
+
+static_assert(POWER_STATUS_ON == PSCI_RETURN_ON,
+ "power_status enum values must match PSCI return values.");
+static_assert(POWER_STATUS_OFF == PSCI_RETURN_OFF,
+ "power_status enum values must match PSCI return values.");
+static_assert(POWER_STATUS_ON_PENDING == PSCI_RETURN_ON_PENDING,
+ "power_status enum values must match PSCI return values.");
+
+/**
+ * Returns the power status of the given CPU.
+ */
+enum power_status arch_cpu_status(cpu_id_t cpu_id)
+{
+ uint32_t lowest_affinity_level = 0;
+ struct spci_value smc_res;
+
+ /*
+ * This works because the power_status enum values happen to be the same
+ * as the PSCI_RETURN_* values. The static_asserts above validate that
+ * this is the case.
+ */
+ smc_res = smc32(PSCI_AFFINITY_INFO, cpu_id, lowest_affinity_level, 0, 0,
+ 0, 0, SMCCC_CALLER_HYPERVISOR);
+ return smc_res.func;
+}
+
+/**
+ * Shuts down the system or exits emulation.
+ */
+noreturn void arch_power_off(void)
+{
+ smc32(PSCI_SYSTEM_OFF, 0, 0, 0, 0, 0, 0, SMCCC_CALLER_HYPERVISOR);
+ for (;;) {
+ /* This should never be reached. */
+ }
+}
diff --git a/src/arch/aarch64/hftest/registers.c b/src/arch/aarch64/hftest/registers.c
new file mode 100644
index 0000000..15b3409
--- /dev/null
+++ b/src/arch/aarch64/hftest/registers.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/registers.h"
+
+#define read_fp_register(name) \
+ __extension__({ \
+ double __v; \
+ __asm__ volatile("fmov %0, " #name : "=r"(__v)); \
+ __v; \
+ })
+
+#define write_fp_register(name, value) \
+ __extension__({ \
+ __asm__ volatile("fmov " #name ", %0" \
+ : \
+ : "r"((double)(value)) \
+ : #name); \
+ })
+
+#define move_fp_register(dest, source) \
+ __extension__({ \
+ __asm__ volatile("fmov " #dest ", " #source : : : #dest); \
+ })
+
+void fill_fp_registers(double value)
+{
+ write_fp_register(d0, value);
+ move_fp_register(d1, d0);
+ move_fp_register(d2, d0);
+ move_fp_register(d3, d0);
+ move_fp_register(d4, d0);
+ move_fp_register(d5, d0);
+ move_fp_register(d6, d0);
+ move_fp_register(d7, d0);
+ move_fp_register(d8, d0);
+ move_fp_register(d9, d0);
+ move_fp_register(d10, d0);
+ move_fp_register(d11, d0);
+ move_fp_register(d12, d0);
+ move_fp_register(d13, d0);
+ move_fp_register(d14, d0);
+ move_fp_register(d15, d0);
+ move_fp_register(d16, d0);
+ move_fp_register(d17, d0);
+ move_fp_register(d18, d0);
+ move_fp_register(d19, d0);
+ move_fp_register(d20, d0);
+ move_fp_register(d21, d0);
+ move_fp_register(d22, d0);
+ move_fp_register(d23, d0);
+ move_fp_register(d24, d0);
+ move_fp_register(d25, d0);
+ move_fp_register(d26, d0);
+ move_fp_register(d27, d0);
+ move_fp_register(d28, d0);
+ move_fp_register(d29, d0);
+ move_fp_register(d30, d0);
+ move_fp_register(d31, d0);
+}
+
+bool check_fp_register(double value)
+{
+ bool result = true;
+
+ result = result && (read_fp_register(d0) == value);
+ result = result && (read_fp_register(d1) == value);
+ result = result && (read_fp_register(d2) == value);
+ result = result && (read_fp_register(d3) == value);
+ result = result && (read_fp_register(d4) == value);
+ result = result && (read_fp_register(d5) == value);
+ result = result && (read_fp_register(d6) == value);
+ result = result && (read_fp_register(d7) == value);
+ result = result && (read_fp_register(d8) == value);
+ result = result && (read_fp_register(d9) == value);
+ result = result && (read_fp_register(d10) == value);
+ result = result && (read_fp_register(d11) == value);
+ result = result && (read_fp_register(d12) == value);
+ result = result && (read_fp_register(d13) == value);
+ result = result && (read_fp_register(d14) == value);
+ result = result && (read_fp_register(d15) == value);
+ result = result && (read_fp_register(d16) == value);
+ result = result && (read_fp_register(d17) == value);
+ result = result && (read_fp_register(d18) == value);
+ result = result && (read_fp_register(d19) == value);
+ result = result && (read_fp_register(d20) == value);
+ result = result && (read_fp_register(d21) == value);
+ result = result && (read_fp_register(d21) == value);
+ result = result && (read_fp_register(d23) == value);
+ result = result && (read_fp_register(d24) == value);
+ result = result && (read_fp_register(d25) == value);
+ result = result && (read_fp_register(d26) == value);
+ result = result && (read_fp_register(d27) == value);
+ result = result && (read_fp_register(d28) == value);
+ result = result && (read_fp_register(d29) == value);
+ result = result && (read_fp_register(d30) == value);
+ result = result && (read_fp_register(d31) == value);
+ return result;
+}
diff --git a/src/arch/aarch64/hftest/state.c b/src/arch/aarch64/hftest/state.c
new file mode 100644
index 0000000..53471f2
--- /dev/null
+++ b/src/arch/aarch64/hftest/state.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/state.h"
+
+#include "msr.h"
+
+void per_cpu_ptr_set(uintptr_t v)
+{
+ write_msr(tpidr_el1, v);
+}
+
+uintptr_t per_cpu_ptr_get(void)
+{
+ return read_msr(tpidr_el1);
+}
diff --git a/src/arch/aarch64/hypervisor/BUILD.gn b/src/arch/aarch64/hypervisor/BUILD.gn
new file mode 100644
index 0000000..8a057e9
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/BUILD.gn
@@ -0,0 +1,52 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/toolchain/offset_size_header.gni")
+import("//src/arch/aarch64/args.gni")
+
+offset_size_header("offsets") {
+ sources = [
+ "offsets.c",
+ ]
+ path = "hf/arch/offsets.h"
+}
+
+# Hypervisor specific code.
+source_set("hypervisor") {
+ public_configs = [ "//src/arch/aarch64:config" ]
+ sources = [
+ "exceptions.S",
+ "hypervisor_entry.S",
+ "plat_entry.S",
+ ]
+
+ sources += [
+ "cpu.c",
+ "debug_el1.c",
+ "feature_id.c",
+ "handler.c",
+ "perfmon.c",
+ "psci_handler.c",
+ "sysregs.c",
+ "vm.c",
+ ]
+
+ deps = [
+ ":offsets",
+ "//src/arch/aarch64:arch",
+ "//src/arch/aarch64:entry",
+ "//src/arch/aarch64:smc",
+ plat_smc,
+ ]
+}
diff --git a/src/arch/aarch64/hypervisor/cpu.c b/src/arch/aarch64/hypervisor/cpu.c
new file mode 100644
index 0000000..346c6b6
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/cpu.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/cpu.h"
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "hf/addr.h"
+#include "hf/spci.h"
+#include "hf/std.h"
+#include "hf/vm.h"
+
+#include "feature_id.h"
+#include "msr.h"
+#include "perfmon.h"
+#include "sysregs.h"
+
+/**
+ * The LO field indicates whether LORegions are supported.
+ */
+#define ID_AA64MMFR1_EL1_LO (UINT64_C(1) << 16)
+
+static void lor_disable(void)
+{
+ /*
+ * Accesses to LORC_EL1 are undefined if LORegions are not supported.
+ */
+ if (read_msr(ID_AA64MMFR1_EL1) & ID_AA64MMFR1_EL1_LO) {
+ write_msr(MSR_LORC_EL1, 0);
+ }
+}
+
+static void gic_regs_reset(struct arch_regs *r, bool is_primary)
+{
+#if GIC_VERSION == 3 || GIC_VERSION == 4
+ uint32_t ich_hcr = 0;
+ uint32_t icc_sre_el2 =
+ (1U << 0) | /* SRE, enable ICH_* and ICC_* at EL2. */
+ (0x3 << 1); /* DIB and DFB, disable IRQ/FIQ bypass. */
+
+ if (is_primary) {
+ icc_sre_el2 |= 1U << 3; /* Enable EL1 access to ICC_SRE_EL1. */
+ } else {
+ /* Trap EL1 access to GICv3 system registers. */
+ ich_hcr =
+ (0x1fU << 10); /* TDIR, TSEI, TALL1, TALL0, TC bits. */
+ }
+ r->gic.ich_hcr_el2 = ich_hcr;
+ r->gic.icc_sre_el2 = icc_sre_el2;
+#endif
+}
+
+void arch_regs_reset(struct vcpu *vcpu)
+{
+ spci_vm_id_t vm_id = vcpu->vm->id;
+ bool is_primary = vm_id == HF_PRIMARY_VM_ID;
+ cpu_id_t vcpu_id = vcpu_index(vcpu);
+ paddr_t table = vcpu->vm->ptable.root;
+ struct arch_regs *r = &vcpu->regs;
+ uintreg_t pc = r->pc;
+ uintreg_t arg = r->r[0];
+ uintreg_t cnthctl;
+
+ memset_s(r, sizeof(*r), 0, sizeof(*r));
+
+ r->pc = pc;
+ r->r[0] = arg;
+
+ cnthctl = 0;
+
+ if (is_primary) {
+ cnthctl |=
+ (1U << 0) | /* EL1PCTEN, don't trap phys cnt access. */
+ (1U << 1); /* EL1PCEN, don't trap phys timer access. */
+ }
+
+ r->lazy.hcr_el2 = get_hcr_el2_value(vm_id);
+ r->lazy.cnthctl_el2 = cnthctl;
+ r->lazy.vttbr_el2 = pa_addr(table) | ((uint64_t)vm_id << 48);
+ r->lazy.vmpidr_el2 = vcpu_id;
+ /* Mask (disable) interrupts and run in EL1h mode. */
+ r->spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H;
+
+ r->lazy.mdcr_el2 = get_mdcr_el2_value();
+
+ /*
+ * NOTE: It is important that MDSCR_EL1.MDE (bit 15) is set to 0 for
+ * secondary VMs as long as Hafnium does not support debug register
+ * access for secondary VMs. If adding Hafnium support for secondary VM
+ * debug register accesses, then on context switches Hafnium needs to
+ * save/restore EL1 debug register state that either might change, or
+ * that needs to be protected.
+ */
+ r->lazy.mdscr_el1 = 0x0U & ~(0x1U << 15);
+
+ /* Disable cycle counting on initialization. */
+ r->lazy.pmccfiltr_el0 = perfmon_get_pmccfiltr_el0_init_value(vm_id);
+
+ /* Set feature-specific register values. */
+ feature_set_traps(vcpu->vm, r);
+
+ gic_regs_reset(r, is_primary);
+}
+
+void arch_regs_set_pc_arg(struct arch_regs *r, ipaddr_t pc, uintreg_t arg)
+{
+ r->pc = ipa_addr(pc);
+ r->r[0] = arg;
+}
+
+void arch_regs_set_retval(struct arch_regs *r, struct spci_value v)
+{
+ r->r[0] = v.func;
+ r->r[1] = v.arg1;
+ r->r[2] = v.arg2;
+ r->r[3] = v.arg3;
+ r->r[4] = v.arg4;
+ r->r[5] = v.arg5;
+ r->r[6] = v.arg6;
+ r->r[7] = v.arg7;
+}
+
+void arch_cpu_init(void)
+{
+ /*
+ * Linux expects LORegions to be disabled, hence if the current system
+ * supports them, Hafnium ensures that they are disabled.
+ */
+ lor_disable();
+
+ write_msr(CPTR_EL2, get_cptr_el2_value());
+}
diff --git a/src/arch/aarch64/hypervisor/debug_el1.c b/src/arch/aarch64/hypervisor/debug_el1.c
new file mode 100644
index 0000000..45e69b0
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/debug_el1.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "debug_el1.h"
+
+#include "hf/check.h"
+#include "hf/dlog.h"
+#include "hf/types.h"
+
+#include "msr.h"
+#include "sysregs.h"
+
+/* clang-format off */
+
+/**
+ * Definitions of read-only debug registers' encodings.
+ * See Arm Architecture Reference Manual Armv8-A, D12.2.
+ * NAME, op0, op1, crn, crm, op2
+ */
+#define EL1_DEBUG_REGISTERS_READ \
+ X(MDRAR_EL1 , 2, 0, 1, 0, 0) \
+ X(OSLSR_EL1 , 2, 0, 1, 1, 4) \
+ X(DBGAUTHSTATUS_EL1 , 2, 0, 7, 14, 6) \
+
+/**
+ * Definitions of write-only debug registers' encodings.
+ * See Arm Architecture Reference Manual Armv8-A, D12.2.
+ * NAME, op0, op1, crn, crm, op2
+ */
+#define EL1_DEBUG_REGISTERS_WRITE \
+ X(OSLAR_EL1 , 2, 0, 1, 0, 4) \
+
+/**
+ * Definitions of readable and writeable debug registers' encodings.
+ * See Arm Architecture Reference Manual Armv8-A, D12.2.
+ * NAME, op0, op1, crn, crm, op2
+ */
+#define EL1_DEBUG_REGISTERS_READ_WRITE \
+ X(OSDTRRX_EL1 , 2, 0, 0, 0, 2) \
+ X(MDCCINT_EL1 , 2, 0, 0, 2, 0) \
+ X(MDSCR_EL1 , 2, 0, 0, 2, 2) \
+ X(OSDTRTX_EL1 , 2, 0, 0, 3, 2) \
+ X(OSECCR_EL1 , 2, 0, 0, 6, 2) \
+ X(DBGBVR0_EL1 , 2, 0, 0, 0, 4) \
+ X(DBGBVR1_EL1 , 2, 0, 0, 1, 4) \
+ X(DBGBVR2_EL1 , 2, 0, 0, 2, 4) \
+ X(DBGBVR3_EL1 , 2, 0, 0, 3, 4) \
+ X(DBGBVR4_EL1 , 2, 0, 0, 4, 4) \
+ X(DBGBVR5_EL1 , 2, 0, 0, 5, 4) \
+ X(DBGBVR6_EL1 , 2, 0, 0, 6, 4) \
+ X(DBGBVR7_EL1 , 2, 0, 0, 7, 4) \
+ X(DBGBVR8_EL1 , 2, 0, 0, 8, 4) \
+ X(DBGBVR9_EL1 , 2, 0, 0, 9, 4) \
+ X(DBGBVR10_EL1 , 2, 0, 0, 10, 4) \
+ X(DBGBVR11_EL1 , 2, 0, 0, 11, 4) \
+ X(DBGBVR12_EL1 , 2, 0, 0, 12, 4) \
+ X(DBGBVR13_EL1 , 2, 0, 0, 13, 4) \
+ X(DBGBVR14_EL1 , 2, 0, 0, 14, 4) \
+ X(DBGBVR15_EL1 , 2, 0, 0, 15, 4) \
+ X(DBGBCR0_EL1 , 2, 0, 0, 0, 5) \
+ X(DBGBCR1_EL1 , 2, 0, 0, 1, 5) \
+ X(DBGBCR2_EL1 , 2, 0, 0, 2, 5) \
+ X(DBGBCR3_EL1 , 2, 0, 0, 3, 5) \
+ X(DBGBCR4_EL1 , 2, 0, 0, 4, 5) \
+ X(DBGBCR5_EL1 , 2, 0, 0, 5, 5) \
+ X(DBGBCR6_EL1 , 2, 0, 0, 6, 5) \
+ X(DBGBCR7_EL1 , 2, 0, 0, 7, 5) \
+ X(DBGBCR8_EL1 , 2, 0, 0, 8, 5) \
+ X(DBGBCR9_EL1 , 2, 0, 0, 9, 5) \
+ X(DBGBCR10_EL1 , 2, 0, 0, 10, 5) \
+ X(DBGBCR11_EL1 , 2, 0, 0, 11, 5) \
+ X(DBGBCR12_EL1 , 2, 0, 0, 12, 5) \
+ X(DBGBCR13_EL1 , 2, 0, 0, 13, 5) \
+ X(DBGBCR14_EL1 , 2, 0, 0, 14, 5) \
+ X(DBGBCR15_EL1 , 2, 0, 0, 15, 5) \
+ X(DBGWVR0_EL1 , 2, 0, 0, 0, 6) \
+ X(DBGWVR1_EL1 , 2, 0, 0, 1, 6) \
+ X(DBGWVR2_EL1 , 2, 0, 0, 2, 6) \
+ X(DBGWVR3_EL1 , 2, 0, 0, 3, 6) \
+ X(DBGWVR4_EL1 , 2, 0, 0, 4, 6) \
+ X(DBGWVR5_EL1 , 2, 0, 0, 5, 6) \
+ X(DBGWVR6_EL1 , 2, 0, 0, 6, 6) \
+ X(DBGWVR7_EL1 , 2, 0, 0, 7, 6) \
+ X(DBGWVR8_EL1 , 2, 0, 0, 8, 6) \
+ X(DBGWVR9_EL1 , 2, 0, 0, 9, 6) \
+ X(DBGWVR10_EL1 , 2, 0, 0, 10, 6) \
+ X(DBGWVR11_EL1 , 2, 0, 0, 11, 6) \
+ X(DBGWVR12_EL1 , 2, 0, 0, 12, 6) \
+ X(DBGWVR13_EL1 , 2, 0, 0, 13, 6) \
+ X(DBGWVR14_EL1 , 2, 0, 0, 14, 6) \
+ X(DBGWVR15_EL1 , 2, 0, 0, 15, 6) \
+ X(DBGWCR0_EL1 , 2, 0, 0, 0, 7) \
+ X(DBGWCR1_EL1 , 2, 0, 0, 1, 7) \
+ X(DBGWCR2_EL1 , 2, 0, 0, 2, 7) \
+ X(DBGWCR3_EL1 , 2, 0, 0, 3, 7) \
+ X(DBGWCR4_EL1 , 2, 0, 0, 4, 7) \
+ X(DBGWCR5_EL1 , 2, 0, 0, 5, 7) \
+ X(DBGWCR6_EL1 , 2, 0, 0, 6, 7) \
+ X(DBGWCR7_EL1 , 2, 0, 0, 7, 7) \
+ X(DBGWCR8_EL1 , 2, 0, 0, 8, 7) \
+ X(DBGWCR9_EL1 , 2, 0, 0, 9, 7) \
+ X(DBGWCR10_EL1 , 2, 0, 0, 10, 7) \
+ X(DBGWCR11_EL1 , 2, 0, 0, 11, 7) \
+ X(DBGWCR12_EL1 , 2, 0, 0, 12, 7) \
+ X(DBGWCR13_EL1 , 2, 0, 0, 13, 7) \
+ X(DBGWCR14_EL1 , 2, 0, 0, 14, 7) \
+ X(DBGWCR15_EL1 , 2, 0, 0, 15, 7) \
+ X(OSDLR_EL1 , 2, 0, 1, 3, 4) \
+ X(DBGPRCR_EL1 , 2, 0, 1, 4, 4) \
+ X(DBGCLAIMSET_EL1 , 2, 0, 7, 8, 6) \
+ X(DBGCLAIMCLR_EL1 , 2, 0, 7, 9, 6)
+
+/* clang-format on */
+
+/**
+ * Returns true if the ESR register shows an access to an EL1 debug register.
+ */
+bool debug_el1_is_register_access(uintreg_t esr)
+{
+ /*
+ * Architecture Reference Manual D12.2: op0 == 2 is for debug and trace
+ * system registers, op1 == 1 for trace, remaining are debug.
+ */
+ return GET_ISS_OP0(esr) == 2 && GET_ISS_OP1(esr) != 1;
+}
+
+/**
+ * Processes an access (msr, mrs) to an EL1 debug register.
+ * Returns true if the access was allowed and performed, false otherwise.
+ */
+bool debug_el1_process_access(struct vcpu *vcpu, spci_vm_id_t vm_id,
+ uintreg_t esr)
+{
+ /*
+ * For now, debug registers are not supported by secondary VMs.
+ * Disallow accesses to them.
+ */
+ if (vm_id != HF_PRIMARY_VM_ID) {
+ return false;
+ }
+
+ uintreg_t sys_register = GET_ISS_SYSREG(esr);
+ uintreg_t rt_register = GET_ISS_RT(esr);
+ uintreg_t value;
+
+ /* +1 because Rt can access register XZR */
+ CHECK(rt_register < NUM_GP_REGS + 1);
+
+ if (ISS_IS_READ(esr)) {
+ switch (sys_register) {
+#define X(reg_name, op0, op1, crn, crm, op2) \
+ case (GET_ISS_ENCODING(op0, op1, crn, crm, op2)): \
+ value = read_msr(reg_name); \
+ break;
+ EL1_DEBUG_REGISTERS_READ
+ EL1_DEBUG_REGISTERS_READ_WRITE
+#undef X
+ default:
+ value = vcpu->regs.r[rt_register];
+ dlog("Unsupported debug system register read: "
+ "op0=%d, op1=%d, crn=%d, crm=%d, op2=%d, rt=%d.\n",
+ GET_ISS_OP0(esr), GET_ISS_OP1(esr),
+ GET_ISS_CRN(esr), GET_ISS_CRM(esr),
+ GET_ISS_OP2(esr), GET_ISS_RT(esr));
+ break;
+ }
+ if (rt_register != RT_REG_XZR) {
+ vcpu->regs.r[rt_register] = value;
+ }
+ } else {
+ if (rt_register != RT_REG_XZR) {
+ value = vcpu->regs.r[rt_register];
+ } else {
+ value = 0;
+ }
+ switch (sys_register) {
+#define X(reg_name, op0, op1, crn, crm, op2) \
+ case (GET_ISS_ENCODING(op0, op1, crn, crm, op2)): \
+ write_msr(reg_name, value); \
+ break;
+ EL1_DEBUG_REGISTERS_WRITE
+ EL1_DEBUG_REGISTERS_READ_WRITE
+#undef X
+ default:
+ dlog("Unsupported debug system register write: "
+ "op0=%d, op1=%d, crn=%d, crm=%d, op2=%d, rt=%d.\n",
+ GET_ISS_OP0(esr), GET_ISS_OP1(esr),
+ GET_ISS_CRN(esr), GET_ISS_CRM(esr),
+ GET_ISS_OP2(esr), GET_ISS_RT(esr));
+ break;
+ }
+ }
+
+ return true;
+}
diff --git a/src/arch/aarch64/hypervisor/debug_el1.h b/src/arch/aarch64/hypervisor/debug_el1.h
new file mode 100644
index 0000000..9dc1ef6
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/debug_el1.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/arch/types.h"
+
+#include "hf/cpu.h"
+
+#include "vmapi/hf/spci.h"
+
+bool debug_el1_is_register_access(uintreg_t esr_el2);
+
+bool debug_el1_process_access(struct vcpu *vcpu, spci_vm_id_t vm_id,
+ uintreg_t esr_el2);
diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
new file mode 100644
index 0000000..17a5b63
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/exceptions.S
@@ -0,0 +1,529 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/offsets.h"
+#include "exception_macros.S"
+
+/**
+ * Saves the volatile registers into the register buffer of the current vCPU.
+ */
+.macro save_volatile_to_vcpu
+ /*
+ * Save x18 since we're about to clobber it. We subtract 16 instead of
+ * 8 from the stack pointer to keep it 16-byte aligned.
+ */
+ str x18, [sp, #-16]!
+
+ /* Get the current vCPU. */
+ mrs x18, tpidr_el2
+ stp x0, x1, [x18, #VCPU_REGS + 8 * 0]
+ stp x2, x3, [x18, #VCPU_REGS + 8 * 2]
+ stp x4, x5, [x18, #VCPU_REGS + 8 * 4]
+ stp x6, x7, [x18, #VCPU_REGS + 8 * 6]
+ stp x8, x9, [x18, #VCPU_REGS + 8 * 8]
+ stp x10, x11, [x18, #VCPU_REGS + 8 * 10]
+ stp x12, x13, [x18, #VCPU_REGS + 8 * 12]
+ stp x14, x15, [x18, #VCPU_REGS + 8 * 14]
+ stp x16, x17, [x18, #VCPU_REGS + 8 * 16]
+ stp x29, x30, [x18, #VCPU_REGS + 8 * 29]
+
+ /* x18 was saved on the stack, so we move it to vCPU regs buffer. */
+ ldr x0, [sp], #16
+ str x0, [x18, #VCPU_REGS + 8 * 18]
+
+ /* Save return address & mode. */
+ mrs x1, elr_el2
+ mrs x2, spsr_el2
+ stp x1, x2, [x18, #VCPU_REGS + 8 * 31]
+.endm
+
+/**
+ * This is a generic handler for exceptions taken at a lower EL. It saves the
+ * volatile registers to the current vCPU and calls the C handler, which can
+ * select one of two paths: (a) restore volatile registers and return, or
+ * (b) switch to a different vCPU. In the latter case, the handler needs to save
+ * all non-volatile registers (they haven't been saved yet), then restore all
+ * registers from the new vCPU.
+ */
+.macro lower_exception handler:req
+ save_volatile_to_vcpu
+
+ /* Call C handler. */
+ bl \handler
+
+ /* Switch vCPU if requested by handler. */
+ cbnz x0, vcpu_switch
+
+ /* vCPU is not changing. */
+ mrs x0, tpidr_el2
+ b vcpu_restore_volatile_and_run
+.endm
+
+/**
+ * This is the handler for a sync exception taken at a lower EL.
+ */
+.macro lower_sync_exception
+ save_volatile_to_vcpu
+
+ /* Extract the exception class (EC) from exception syndrome register. */
+ mrs x18, esr_el2
+ lsr x18, x18, #26
+
+ /* Take the system register path for EC 0x18. */
+ sub x18, x18, #0x18
+ cbz x18, system_register_access
+
+ /* Read syndrome register and call C handler. */
+ mrs x0, esr_el2
+ bl sync_lower_exception
+
+ /* Switch vCPU if requested by handler. */
+ cbnz x0, vcpu_switch
+
+ /* vCPU is not changing. */
+ mrs x0, tpidr_el2
+ b vcpu_restore_volatile_and_run
+.endm
+
+/**
+ * The following is the exception table. A pointer to it will be stored in
+ * register vbar_el2.
+ */
+.section .text.vector_table_el2, "ax"
+.global vector_table_el2
+.balign 0x800
+vector_table_el2:
+sync_cur_sp0:
+ noreturn_current_exception_sp0 el2 sync_current_exception_noreturn
+
+.balign 0x80
+irq_cur_sp0:
+ noreturn_current_exception_sp0 el2 irq_current_exception_noreturn
+
+.balign 0x80
+fiq_cur_sp0:
+ noreturn_current_exception_sp0 el2 fiq_current_exception_noreturn
+
+.balign 0x80
+serr_cur_sp0:
+ noreturn_current_exception_sp0 el2 serr_current_exception_noreturn
+
+.balign 0x80
+sync_cur_spx:
+ noreturn_current_exception_spx el2 sync_current_exception_noreturn
+
+.balign 0x80
+irq_cur_spx:
+ noreturn_current_exception_spx el2 irq_current_exception_noreturn
+
+.balign 0x80
+fiq_cur_spx:
+ noreturn_current_exception_spx el2 fiq_current_exception_noreturn
+
+.balign 0x80
+serr_cur_spx:
+ noreturn_current_exception_spx el2 serr_current_exception_noreturn
+
+.balign 0x80
+sync_lower_64:
+ lower_sync_exception
+
+.balign 0x80
+irq_lower_64:
+ lower_exception irq_lower
+
+.balign 0x80
+fiq_lower_64:
+ lower_exception fiq_lower
+
+.balign 0x80
+serr_lower_64:
+ lower_exception serr_lower
+
+.balign 0x80
+sync_lower_32:
+ lower_sync_exception
+
+.balign 0x80
+irq_lower_32:
+ lower_exception irq_lower
+
+.balign 0x80
+fiq_lower_32:
+ lower_exception fiq_lower
+
+.balign 0x80
+serr_lower_32:
+ lower_exception serr_lower
+
+.balign 0x40
+
+/**
+ * Handle accesses to system registers (EC=0x18) and return to original caller.
+ */
+system_register_access:
+ /*
+ * Non-volatile registers are (conservatively) saved because the handler
+ * can clobber non-volatile registers that are used by the msr/mrs,
+ * which results in the wrong value being read or written.
+ */
+ /* Get the current vCPU. */
+ mrs x18, tpidr_el2
+ stp x19, x20, [x18, #VCPU_REGS + 8 * 19]
+ stp x21, x22, [x18, #VCPU_REGS + 8 * 21]
+ stp x23, x24, [x18, #VCPU_REGS + 8 * 23]
+ stp x25, x26, [x18, #VCPU_REGS + 8 * 25]
+ stp x27, x28, [x18, #VCPU_REGS + 8 * 27]
+
+ /* Read syndrome register and call C handler. */
+ mrs x0, esr_el2
+ bl handle_system_register_access
+ cbnz x0, vcpu_switch
+
+ /* vCPU is not changing. */
+ mrs x0, tpidr_el2
+ b vcpu_restore_nonvolatile_and_run
+
+/**
+ * Switch to a new vCPU.
+ *
+ * All volatile registers from the old vCPU have already been saved. We need
+ * to save only non-volatile ones from the old vCPU, and restore all from the
+ * new one.
+ *
+ * x0 is a pointer to the new vCPU.
+ */
+vcpu_switch:
+ /* Save non-volatile registers. */
+ mrs x1, tpidr_el2
+ stp x19, x20, [x1, #VCPU_REGS + 8 * 19]
+ stp x21, x22, [x1, #VCPU_REGS + 8 * 21]
+ stp x23, x24, [x1, #VCPU_REGS + 8 * 23]
+ stp x25, x26, [x1, #VCPU_REGS + 8 * 25]
+ stp x27, x28, [x1, #VCPU_REGS + 8 * 27]
+
+ /* Save lazy state. */
+ /* Use x28 as the base */
+ add x28, x1, #VCPU_LAZY
+
+ mrs x24, vmpidr_el2
+ mrs x25, csselr_el1
+ stp x24, x25, [x28], #16
+
+ mrs x2, sctlr_el1
+ mrs x3, actlr_el1
+ stp x2, x3, [x28], #16
+
+ mrs x4, cpacr_el1
+ mrs x5, ttbr0_el1
+ stp x4, x5, [x28], #16
+
+ mrs x6, ttbr1_el1
+ mrs x7, tcr_el1
+ stp x6, x7, [x28], #16
+
+ mrs x8, esr_el1
+ mrs x9, afsr0_el1
+ stp x8, x9, [x28], #16
+
+ mrs x10, afsr1_el1
+ mrs x11, far_el1
+ stp x10, x11, [x28], #16
+
+ mrs x12, mair_el1
+ mrs x13, vbar_el1
+ stp x12, x13, [x28], #16
+
+ mrs x14, contextidr_el1
+ mrs x15, tpidr_el0
+ stp x14, x15, [x28], #16
+
+ mrs x16, tpidrro_el0
+ mrs x17, tpidr_el1
+ stp x16, x17, [x28], #16
+
+ mrs x18, amair_el1
+ mrs x19, cntkctl_el1
+ stp x18, x19, [x28], #16
+
+ mrs x20, sp_el0
+ mrs x21, sp_el1
+ stp x20, x21, [x28], #16
+
+ mrs x22, elr_el1
+ mrs x23, spsr_el1
+ stp x22, x23, [x28], #16
+
+ mrs x24, par_el1
+ mrs x25, hcr_el2
+ stp x24, x25, [x28], #16
+
+ mrs x26, cnthctl_el2
+ mrs x27, vttbr_el2
+ stp x26, x27, [x28], #16
+
+ mrs x4, mdcr_el2
+ mrs x5, mdscr_el1
+ stp x4, x5, [x28], #16
+
+ mrs x6, pmccfiltr_el0
+ mrs x7, pmcr_el0
+ stp x6, x7, [x28], #16
+
+ mrs x8, pmcntenset_el0
+ mrs x9, pmintenset_el1
+ stp x8, x9, [x28], #16
+
+ /* Save GIC registers. */
+#if GIC_VERSION == 3 || GIC_VERSION == 4
+ /* Offset is too large, so start from a new base. */
+ add x2, x1, #VCPU_GIC
+
+ mrs x3, ich_hcr_el2
+ mrs x4, icc_sre_el2
+ stp x3, x4, [x2, #16 * 0]
+#endif
+
+ /* Save floating point registers. */
+ /* Use x28 as the base. */
+ add x28, x1, #VCPU_FREGS
+ stp q0, q1, [x28], #32
+ stp q2, q3, [x28], #32
+ stp q4, q5, [x28], #32
+ stp q6, q7, [x28], #32
+ stp q8, q9, [x28], #32
+ stp q10, q11, [x28], #32
+ stp q12, q13, [x28], #32
+ stp q14, q15, [x28], #32
+ stp q16, q17, [x28], #32
+ stp q18, q19, [x28], #32
+ stp q20, q21, [x28], #32
+ stp q22, q23, [x28], #32
+ stp q24, q25, [x28], #32
+ stp q26, q27, [x28], #32
+ stp q28, q29, [x28], #32
+ stp q30, q31, [x28], #32
+ mrs x3, fpsr
+ mrs x4, fpcr
+ stp x3, x4, [x28], #32
+
+ /* Save new vCPU pointer in non-volatile register. */
+ mov x19, x0
+
+ /*
+ * Save peripheral registers, and inform the arch-independent sections
+ * that registers have been saved.
+ */
+ mov x0, x1
+ bl complete_saving_state
+ mov x0, x19
+
+ /* Intentional fallthrough. */
+.global vcpu_restore_all_and_run
+vcpu_restore_all_and_run:
+ /* Update pointer to current vCPU. */
+ msr tpidr_el2, x0
+
+ /* Restore peripheral registers. */
+ mov x19, x0
+ bl begin_restoring_state
+ mov x0, x19
+
+ /*
+ * Restore floating point registers.
+ *
+ * Offset is too large, so start from a new base.
+ */
+ add x2, x0, #VCPU_FREGS
+ ldp q0, q1, [x2, #32 * 0]
+ ldp q2, q3, [x2, #32 * 1]
+ ldp q4, q5, [x2, #32 * 2]
+ ldp q6, q7, [x2, #32 * 3]
+ ldp q8, q9, [x2, #32 * 4]
+ ldp q10, q11, [x2, #32 * 5]
+ ldp q12, q13, [x2, #32 * 6]
+ ldp q14, q15, [x2, #32 * 7]
+ ldp q16, q17, [x2, #32 * 8]
+ ldp q18, q19, [x2, #32 * 9]
+ ldp q20, q21, [x2, #32 * 10]
+ ldp q22, q23, [x2, #32 * 11]
+ ldp q24, q25, [x2, #32 * 12]
+ ldp q26, q27, [x2, #32 * 13]
+ ldp q28, q29, [x2, #32 * 14]
+ /* Offset becomes too large, so move the base. */
+ ldp q30, q31, [x2, #32 * 15]!
+ ldp x3, x4, [x2, #32 * 1]
+ msr fpsr, x3
+
+ /*
+ * Only restore FPCR if changed, to avoid expensive
+ * self-synchronising operation where possible.
+ */
+ mrs x5, fpcr
+ cmp x5, x4
+ b.eq vcpu_restore_lazy_and_run
+ msr fpcr, x4
+ /* Intentional fallthrough. */
+
+vcpu_restore_lazy_and_run:
+ /* Restore lazy registers. */
+ /* Use x28 as the base. */
+ add x28, x0, #VCPU_LAZY
+
+ ldp x24, x25, [x28], #16
+ msr vmpidr_el2, x24
+ msr csselr_el1, x25
+
+ ldp x2, x3, [x28], #16
+ msr sctlr_el1, x2
+ msr actlr_el1, x3
+
+ ldp x4, x5, [x28], #16
+ msr cpacr_el1, x4
+ msr ttbr0_el1, x5
+
+ ldp x6, x7, [x28], #16
+ msr ttbr1_el1, x6
+ msr tcr_el1, x7
+
+ ldp x8, x9, [x28], #16
+ msr esr_el1, x8
+ msr afsr0_el1, x9
+
+ ldp x10, x11, [x28], #16
+ msr afsr1_el1, x10
+ msr far_el1, x11
+
+ ldp x12, x13, [x28], #16
+ msr mair_el1, x12
+ msr vbar_el1, x13
+
+ ldp x14, x15, [x28], #16
+ msr contextidr_el1, x14
+ msr tpidr_el0, x15
+
+ ldp x16, x17, [x28], #16
+ msr tpidrro_el0, x16
+ msr tpidr_el1, x17
+
+ ldp x18, x19, [x28], #16
+ msr amair_el1, x18
+ msr cntkctl_el1, x19
+
+ ldp x20, x21, [x28], #16
+ msr sp_el0, x20
+ msr sp_el1, x21
+
+ ldp x22, x23, [x28], #16
+ msr elr_el1, x22
+ msr spsr_el1, x23
+
+ ldp x24, x25, [x28], #16
+ msr par_el1, x24
+ msr hcr_el2, x25
+
+ ldp x26, x27, [x28], #16
+ msr cnthctl_el2, x26
+ msr vttbr_el2, x27
+
+ ldp x4, x5, [x28], #16
+ msr mdcr_el2, x4
+ msr mdscr_el1, x5
+
+ ldp x6, x7, [x28], #16
+ msr pmccfiltr_el0, x6
+ msr pmcr_el0, x7
+
+ ldp x8, x9, [x28], #16
+ /*
+ * NOTE: Writing 0s to pmcntenset_el0's bits do not alter their values.
+ * To reset them, clear the register by writing to pmcntenclr_el0.
+ */
+ mov x27, #0xffffffff
+ msr pmcntenclr_el0, x27
+ msr pmcntenset_el0, x8
+
+ /*
+ * NOTE: Writing 0s to pmintenset_el1's bits do not alter their values.
+ * To reset them, clear the register by writing to pmintenclr_el1.
+ */
+ msr pmintenclr_el1, x27
+ msr pmintenset_el1, x9
+
+ /* Restore GIC registers. */
+#if GIC_VERSION == 3 || GIC_VERSION == 4
+ /* Offset is too large, so start from a new base. */
+ add x2, x0, #VCPU_GIC
+
+ ldp x3, x4, [x2, #16 * 0]
+ msr ich_hcr_el2, x3
+ msr icc_sre_el2, x4
+#endif
+
+ /*
+ * If a different vCPU is being run on this physical CPU to the last one
+ * which was run for this VM, invalidate the TLB. This must be called
+ * after vttbr_el2 has been updated, so that we have the page table and
+ * VMID of the vCPU to which we are switching.
+ */
+ mov x19, x0
+ bl maybe_invalidate_tlb
+ mov x0, x19
+
+ /* Intentional fallthrough. */
+
+vcpu_restore_nonvolatile_and_run:
+ /* Restore non-volatile registers. */
+ ldp x19, x20, [x0, #VCPU_REGS + 8 * 19]
+ ldp x21, x22, [x0, #VCPU_REGS + 8 * 21]
+ ldp x23, x24, [x0, #VCPU_REGS + 8 * 23]
+ ldp x25, x26, [x0, #VCPU_REGS + 8 * 25]
+ ldp x27, x28, [x0, #VCPU_REGS + 8 * 27]
+
+ /* Intentional fallthrough. */
+/**
+ * Restore volatile registers and run the given vCPU.
+ *
+ * x0 is a pointer to the target vCPU.
+ */
+vcpu_restore_volatile_and_run:
+ ldp x4, x5, [x0, #VCPU_REGS + 8 * 4]
+ ldp x6, x7, [x0, #VCPU_REGS + 8 * 6]
+ ldp x8, x9, [x0, #VCPU_REGS + 8 * 8]
+ ldp x10, x11, [x0, #VCPU_REGS + 8 * 10]
+ ldp x12, x13, [x0, #VCPU_REGS + 8 * 12]
+ ldp x14, x15, [x0, #VCPU_REGS + 8 * 14]
+ ldp x16, x17, [x0, #VCPU_REGS + 8 * 16]
+ ldr x18, [x0, #VCPU_REGS + 8 * 18]
+ ldp x29, x30, [x0, #VCPU_REGS + 8 * 29]
+
+ /* Restore return address & mode. */
+ ldp x1, x2, [x0, #VCPU_REGS + 8 * 31]
+ msr elr_el2, x1
+ msr spsr_el2, x2
+
+ /* Restore x0..x3, which we have used as scratch before. */
+ ldp x2, x3, [x0, #VCPU_REGS + 8 * 2]
+ ldp x0, x1, [x0, #VCPU_REGS + 8 * 0]
+ eret_with_sb
+
+.balign 0x40
+/**
+ * Restore volatile registers from stack and return to original caller.
+ */
+restore_from_stack_and_return:
+ restore_volatile_from_stack el2
+ eret_with_sb
diff --git a/src/arch/aarch64/hypervisor/feature_id.c b/src/arch/aarch64/hypervisor/feature_id.c
new file mode 100644
index 0000000..20e6f55
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/feature_id.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "feature_id.h"
+
+#include "hf/check.h"
+#include "hf/dlog.h"
+#include "hf/types.h"
+#include "hf/vm.h"
+
+#include "msr.h"
+#include "sysregs.h"
+
+/* clang-format off */
+
+/**
+ * Definitions of read-only feature ID (group 3) registers' encodings.
+ * See Arm Architecture Reference Manual Armv8-A, Table D1-52 and D12-2.
+ * NAME, op0, op1, crn, crm, op2
+ */
+#define FEATURE_ID_REGISTERS_READ \
+ X(ID_PFR0_EL1 , 3, 0, 0, 1, 0) \
+ X(ID_PFR1_EL1 , 3, 0, 0, 1, 1) \
+ X(ID_DFR0_EL1 , 3, 0, 0, 1, 2) \
+ X(ID_AFR0_EL1 , 3, 0, 0, 1, 3) \
+ X(ID_MMFR0_EL1 , 3, 0, 0, 1, 4) \
+ X(ID_MMFR1_EL1 , 3, 0, 0, 1, 5) \
+ X(ID_MMFR2_EL1 , 3, 0, 0, 1, 6) \
+ X(ID_MMFR3_EL1 , 3, 0, 0, 1, 7) \
+ X(ID_ISAR0_EL1 , 3, 0, 0, 2, 0) \
+ X(ID_ISAR1_EL1 , 3, 0, 0, 2, 1) \
+ X(ID_ISAR2_EL1 , 3, 0, 0, 2, 2) \
+ X(ID_ISAR3_EL1 , 3, 0, 0, 2, 3) \
+ X(ID_ISAR4_EL1 , 3, 0, 0, 2, 4) \
+ X(ID_ISAR5_EL1 , 3, 0, 0, 2, 5) \
+ X(ID_MMFR4_EL1 , 3, 0, 0, 2, 6) \
+ \
+ X(MVFR0_EL1 , 3, 0, 0, 3, 0) \
+ X(MVFR1_EL1 , 3, 0, 0, 3, 1) \
+ X(MVFR2_EL1 , 3, 0, 0, 3, 2) \
+ \
+ X(ID_AA64PFR0_EL1 , 3, 0, 0, 4, 0) \
+ X(ID_AA64PFR1_EL1 , 3, 0, 0, 4, 1) \
+ \
+ X(ID_AA64DFR0_EL1 , 3, 0, 0, 5, 0) \
+ X(ID_AA64DFR1_EL1 , 3, 0, 0, 5, 1) \
+ \
+ X(ID_AA64AFR0_EL1 , 3, 0, 0, 5, 4) \
+ X(ID_AA64AFR1_EL1 , 3, 0, 0, 5, 5) \
+ \
+ X(ID_AA64ISAR0_EL1 , 3, 0, 0, 6, 0) \
+ X(ID_AA64ISAR1_EL1 , 3, 0, 0, 6, 1) \
+ \
+ X(ID_AA64MMFR0_EL1 , 3, 0, 0, 7, 0) \
+ X(ID_AA64MMFR1_EL1 , 3, 0, 0, 7, 1) \
+ X(ID_AA64MMFR2_EL1 , 3, 0, 0, 7, 2)
+
+/* clang-format on */
+
+enum {
+#define X(reg_name, op0, op1, crn, crm, op2) \
+ reg_name##_ENC = GET_ISS_ENCODING(op0, op1, crn, crm, op2),
+ FEATURE_ID_REGISTERS_READ
+#undef X
+};
+
+/**
+ * Returns true if the ESR register shows an access to a feature ID group 3
+ * register.
+ */
+bool feature_id_is_register_access(uintreg_t esr)
+{
+ uintreg_t op0 = GET_ISS_OP0(esr);
+ uintreg_t op1 = GET_ISS_OP1(esr);
+ uintreg_t crn = GET_ISS_CRN(esr);
+ uintreg_t crm = GET_ISS_CRM(esr);
+
+ /* From the Arm Architecture Reference Manual Table D12-2. */
+ return op0 == 3 && op1 == 0 && crn == 0 && crm >= 1 && crm <= 7;
+}
+
+/**
+ * RAS-related. RES0 when RAS is not implemented.
+ */
+#define ID_AA64MMFR1_EL1_SPEC_SEI (UINT64_C(0xf) << 24)
+
+/**
+ * Indicates support for LORegions.
+ */
+#define ID_AA64MMFR1_EL1_LO (UINT64_C(0xf) << 24)
+
+/**
+ * RAS Extension version.
+ */
+#define ID_AA64PFR0_EL1_RAS (UINT64_C(0xf) << 28)
+
+/**
+ * Self-hosted Trace Extension Version
+ */
+#define ID_AA64DFR0_EL1_TRACE_FILT (UINT64_C(0xf) << 40)
+
+/**
+ * OS Double Lock implemented.
+ */
+#define ID_AA64DFR0_EL1_DOUBLE_LOCK (UINT64_C(0xf) << 36)
+
+/**
+ * Statistical Profiling Extension version.
+ */
+#define ID_AA64DFR0_EL1_PMS_VER (UINT64_C(0xf) << 32)
+
+/**
+ * Performance Monitors Extension version.
+ */
+#define ID_AA64DFR0_EL1_PMU_VER (UINT64_C(0xf) << 8)
+
+/**
+ * Indicates whether System register interface to trace unit is implemented.
+ */
+#define ID_AA64DFR0_EL1_TRACE_VER (UINT64_C(0xf) << 4)
+
+/**
+ * Debug architecture version.
+ */
+#define ID_AA64DFR0_EL1_DEBUG_VER (UINT64_C(0xf))
+
+/**
+ * PAuth: whether an implementation defined algorithm for generic code
+ * authentication is implemented.
+ */
+#define ID_AA64ISAR1_EL1_GPI (UINT64_C(0xf) << 28)
+
+/**
+ * PAuth: whether QARMA or Architected algorithm for generic code authentication
+ * is implemented.
+ */
+#define ID_AA64ISAR1_EL1_GPA (UINT64_C(0xf) << 24)
+
+/**
+ * PAuth: whether an implementation defined algorithm for address authentication
+ * is implemented.
+ */
+#define ID_AA64ISAR1_EL1_API (UINT64_C(0xf) << 8)
+
+/**
+ * PAuth: whether QARMA or Architected algorithm for address authentication is
+ * implemented.
+ */
+#define ID_AA64ISAR1_EL1_APA (UINT64_C(0xf) << 24)
+
+void feature_set_traps(struct vm *vm, struct arch_regs *regs)
+{
+ arch_features_t features = vm->arch.trapped_features;
+
+ if (features & ~HF_FEATURE_ALL) {
+ panic("features has undefined bits 0x%x", features);
+ }
+
+ /* By default do not mask out any features. */
+ vm->arch.tid3_masks.id_aa64mmfr1_el1 = ~0ULL;
+ vm->arch.tid3_masks.id_aa64pfr0_el1 = ~0ULL;
+ vm->arch.tid3_masks.id_aa64dfr0_el1 = ~0ULL;
+ vm->arch.tid3_masks.id_aa64isar1_el1 = ~0ULL;
+
+ if (features & HF_FEATURE_RAS) {
+ regs->lazy.hcr_el2 |= HCR_EL2_TERR;
+ vm->arch.tid3_masks.id_aa64mmfr1_el1 &=
+ ~ID_AA64MMFR1_EL1_SPEC_SEI;
+ vm->arch.tid3_masks.id_aa64pfr0_el1 &= ~ID_AA64PFR0_EL1_RAS;
+ }
+
+ if (features & HF_FEATURE_SPE) {
+ /*
+ * Trap VM accesses to Statistical Profiling Extension (SPE)
+ * registers.
+ */
+ regs->lazy.mdcr_el2 |= MDCR_EL2_TPMS;
+
+ /*
+ * Set E2PB to 0b00. This ensures that accesses to Profiling
+ * Buffer controls at EL1 are trapped to EL2.
+ */
+ regs->lazy.mdcr_el2 &= ~MDCR_EL2_E2PB;
+
+ vm->arch.tid3_masks.id_aa64dfr0_el1 &= ~ID_AA64DFR0_EL1_PMS_VER;
+ }
+
+ if (features & HF_FEATURE_DEBUG) {
+ regs->lazy.mdcr_el2 |=
+ MDCR_EL2_TDRA | MDCR_EL2_TDOSA | MDCR_EL2_TDA;
+
+ vm->arch.tid3_masks.id_aa64dfr0_el1 &=
+ ~ID_AA64DFR0_EL1_DOUBLE_LOCK;
+ }
+
+ if (features & HF_FEATURE_TRACE) {
+ regs->lazy.mdcr_el2 |= MDCR_EL2_TTRF;
+
+ vm->arch.tid3_masks.id_aa64dfr0_el1 &=
+ ~ID_AA64DFR0_EL1_TRACE_FILT;
+ vm->arch.tid3_masks.id_aa64dfr0_el1 &=
+ ~ID_AA64DFR0_EL1_TRACE_VER;
+ }
+
+ if (features & HF_FEATURE_PERFMON) {
+ regs->lazy.mdcr_el2 |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
+
+ vm->arch.tid3_masks.id_aa64dfr0_el1 &= ~ID_AA64DFR0_EL1_PMU_VER;
+ }
+
+ if (features & HF_FEATURE_LOR) {
+ regs->lazy.hcr_el2 |= HCR_EL2_TLOR;
+
+ vm->arch.tid3_masks.id_aa64mmfr1_el1 &= ~ID_AA64MMFR1_EL1_LO;
+ }
+
+ if (features & HF_FEATURE_PAUTH) {
+ /* APK and API bits *enable* trapping when cleared. */
+ regs->lazy.hcr_el2 &= ~(HCR_EL2_APK | HCR_EL2_API);
+
+ vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPI;
+ vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPA;
+ vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_API;
+ vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_APA;
+ }
+}
+
+/**
+ * Processes an access (mrs) to a feature ID register.
+ * Returns true if the access was allowed and performed, false otherwise.
+ */
+bool feature_id_process_access(struct vcpu *vcpu, uintreg_t esr)
+{
+ const struct vm *vm = vcpu->vm;
+ uintreg_t sys_register = GET_ISS_SYSREG(esr);
+ uintreg_t rt_register = GET_ISS_RT(esr);
+ uintreg_t value;
+
+ /* +1 because Rt can access register XZR */
+ CHECK(rt_register < NUM_GP_REGS + 1);
+
+ if (!ISS_IS_READ(esr)) {
+ dlog("Unsupported feature ID register write: "
+ "op0=%d, op1=%d, crn=%d, crm=%d, op2=%d, rt=%d.\n",
+ GET_ISS_OP0(esr), GET_ISS_OP1(esr), GET_ISS_CRN(esr),
+ GET_ISS_CRM(esr), GET_ISS_OP2(esr), GET_ISS_RT(esr));
+ return true;
+ }
+
+ switch (sys_register) {
+#define X(reg_name, op0, op1, crn, crm, op2) \
+ case (GET_ISS_ENCODING(op0, op1, crn, crm, op2)): \
+ value = read_msr(reg_name); \
+ break;
+ FEATURE_ID_REGISTERS_READ
+#undef X
+ default:
+ /* Reserved registers should be read as zero (raz). */
+ value = 0;
+ dlog("Unsupported feature ID register read: "
+ "op0=%d, op1=%d, crn=%d, crm=%d, op2=%d, rt=%d.\n",
+ GET_ISS_OP0(esr), GET_ISS_OP1(esr), GET_ISS_CRN(esr),
+ GET_ISS_CRM(esr), GET_ISS_OP2(esr), GET_ISS_RT(esr));
+ break;
+ }
+
+ /* Mask values for features Hafnium might restrict. */
+ switch (sys_register) {
+ case ID_AA64MMFR1_EL1_ENC:
+ value &= vm->arch.tid3_masks.id_aa64mmfr1_el1;
+ break;
+ case ID_AA64PFR0_EL1_ENC:
+ value &= vm->arch.tid3_masks.id_aa64pfr0_el1;
+ break;
+ case ID_AA64DFR0_EL1_ENC:
+ value &= vm->arch.tid3_masks.id_aa64dfr0_el1;
+ break;
+ case ID_AA64ISAR1_EL1_ENC:
+ value &= vm->arch.tid3_masks.id_aa64isar1_el1;
+ break;
+ default:
+ break;
+ }
+
+ if (rt_register != RT_REG_XZR) {
+ vcpu->regs.r[rt_register] = value;
+ }
+
+ return true;
+}
diff --git a/src/arch/aarch64/hypervisor/feature_id.h b/src/arch/aarch64/hypervisor/feature_id.h
new file mode 100644
index 0000000..86c7c01
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/feature_id.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/arch/types.h"
+
+#include "hf/cpu.h"
+
+#include "vmapi/hf/spci.h"
+
+#define HF_FEATURE_NONE UINT64_C(0)
+
+/* Reliability, Availability, and Serviceability (RAS) Extension Features */
+#define HF_FEATURE_RAS UINT64_C(1)
+
+/* Limited Ordering Regions */
+#define HF_FEATURE_LOR (UINT64_C(1) << 1)
+
+/* Performance Monitor */
+#define HF_FEATURE_PERFMON (UINT64_C(1) << 2)
+
+/* Debug Registers */
+#define HF_FEATURE_DEBUG (UINT64_C(1) << 3)
+
+/* Statistical Profiling Extension (SPE) */
+#define HF_FEATURE_SPE (UINT64_C(1) << 4)
+
+/* Self-hosted Trace */
+#define HF_FEATURE_TRACE (UINT64_C(1) << 5)
+
+/* Pointer Authentication (PAuth) */
+#define HF_FEATURE_PAUTH (UINT64_C(1) << 6)
+
+/*
+ * NOTE: This should be based on the last (highest value) defined feature.
+ * Adjust if adding more features.
+ */
+#define HF_FEATURE_ALL ((HF_FEATURE_PAUTH << 1) - 1)
+
+bool feature_id_is_register_access(uintreg_t esr_el2);
+
+bool feature_id_process_access(struct vcpu *vcpu, uintreg_t esr_el2);
+
+void feature_set_traps(struct vm *vm, struct arch_regs *regs);
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
new file mode 100644
index 0000000..65aef52
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdnoreturn.h>
+
+#include "hf/arch/barriers.h"
+#include "hf/arch/init.h"
+#include "hf/arch/mm.h"
+#include "hf/arch/plat/smc.h"
+
+#include "hf/api.h"
+#include "hf/check.h"
+#include "hf/cpu.h"
+#include "hf/dlog.h"
+#include "hf/panic.h"
+#include "hf/spci.h"
+#include "hf/vm.h"
+
+#include "vmapi/hf/call.h"
+
+#include "debug_el1.h"
+#include "feature_id.h"
+#include "msr.h"
+#include "perfmon.h"
+#include "psci.h"
+#include "psci_handler.h"
+#include "smc.h"
+#include "sysregs.h"
+
+/**
+ * Gets the Exception Class from the ESR.
+ */
+#define GET_ESR_EC(esr) ((esr) >> 26)
+
+/**
+ * Gets the Instruction Length bit for the synchronous exception
+ */
+#define GET_ESR_IL(esr) ((esr) & (1 << 25))
+
+/**
+ * Gets the value to increment for the next PC.
+ * The ESR encodes whether the instruction is 2 bytes or 4 bytes long.
+ */
+#define GET_NEXT_PC_INC(esr) (GET_ESR_IL(esr) ? 4 : 2)
+
+/**
+ * The Client ID field within X7 for an SMC64 call.
+ */
+#define CLIENT_ID_MASK UINT64_C(0xffff)
+
+/**
+ * Returns a reference to the currently executing vCPU.
+ */
+static struct vcpu *current(void)
+{
+ return (struct vcpu *)read_msr(tpidr_el2);
+}
+
+/**
+ * Saves the state of per-vCPU peripherals, such as the virtual timer, and
+ * informs the arch-independent sections that registers have been saved.
+ */
+void complete_saving_state(struct vcpu *vcpu)
+{
+ vcpu->regs.peripherals.cntv_cval_el0 = read_msr(cntv_cval_el0);
+ vcpu->regs.peripherals.cntv_ctl_el0 = read_msr(cntv_ctl_el0);
+
+ api_regs_state_saved(vcpu);
+
+ /*
+ * If switching away from the primary, copy the current EL0 virtual
+ * timer registers to the corresponding EL2 physical timer registers.
+ * This is used to emulate the virtual timer for the primary in case it
+ * should fire while the secondary is running.
+ */
+ if (vcpu->vm->id == HF_PRIMARY_VM_ID) {
+ /*
+ * Clear timer control register before copying compare value, to
+ * avoid a spurious timer interrupt. This could be a problem if
+ * the interrupt is configured as edge-triggered, as it would
+ * then be latched in.
+ */
+ write_msr(cnthp_ctl_el2, 0);
+ write_msr(cnthp_cval_el2, read_msr(cntv_cval_el0));
+ write_msr(cnthp_ctl_el2, read_msr(cntv_ctl_el0));
+ }
+}
+
+/**
+ * Restores the state of per-vCPU peripherals, such as the virtual timer.
+ */
+void begin_restoring_state(struct vcpu *vcpu)
+{
+ /*
+ * Clear timer control register before restoring compare value, to avoid
+ * a spurious timer interrupt. This could be a problem if the interrupt
+ * is configured as edge-triggered, as it would then be latched in.
+ */
+ write_msr(cntv_ctl_el0, 0);
+ write_msr(cntv_cval_el0, vcpu->regs.peripherals.cntv_cval_el0);
+ write_msr(cntv_ctl_el0, vcpu->regs.peripherals.cntv_ctl_el0);
+
+ /*
+ * If we are switching (back) to the primary, disable the EL2 physical
+ * timer which was being used to emulate the EL0 virtual timer, as the
+ * virtual timer is now running for the primary again.
+ */
+ if (vcpu->vm->id == HF_PRIMARY_VM_ID) {
+ write_msr(cnthp_ctl_el2, 0);
+ write_msr(cnthp_cval_el2, 0);
+ }
+}
+
+/**
+ * Invalidate all stage 1 TLB entries on the current (physical) CPU for the
+ * current VMID.
+ */
+static void invalidate_vm_tlb(void)
+{
+ /*
+ * Ensure that the last VTTBR write has taken effect so we invalidate
+ * the right set of TLB entries.
+ */
+ isb();
+
+ __asm__ volatile("tlbi vmalle1");
+
+ /*
+ * Ensure that no instructions are fetched for the VM until after the
+ * TLB invalidation has taken effect.
+ */
+ isb();
+
+ /*
+ * Ensure that no data reads or writes for the VM happen until after the
+ * TLB invalidation has taken effect. Non-shareable is enough because
+ * the TLB is local to the CPU.
+ */
+ dsb(nsh);
+}
+
+/**
+ * Invalidates the TLB if a different vCPU is being run than the last vCPU of
+ * the same VM which was run on the current pCPU.
+ *
+ * This is necessary because VMs may (contrary to the architecture
+ * specification) use inconsistent ASIDs across vCPUs. c.f. KVM's similar
+ * workaround:
+ * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94d0e5980d6791b9
+ */
+void maybe_invalidate_tlb(struct vcpu *vcpu)
+{
+ size_t current_cpu_index = cpu_index(vcpu->cpu);
+ spci_vcpu_index_t new_vcpu_index = vcpu_index(vcpu);
+
+ if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] !=
+ new_vcpu_index) {
+ /*
+ * The vCPU has changed since the last time this VM was run on
+ * this pCPU, so we need to invalidate the TLB.
+ */
+ invalidate_vm_tlb();
+
+ /* Record the fact that this vCPU is now running on this CPU. */
+ vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] =
+ new_vcpu_index;
+ }
+}
+
+noreturn void irq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
+{
+ (void)elr;
+ (void)spsr;
+
+ panic("IRQ from current");
+}
+
+noreturn void fiq_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
+{
+ (void)elr;
+ (void)spsr;
+
+ panic("FIQ from current");
+}
+
+noreturn void serr_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
+{
+ (void)elr;
+ (void)spsr;
+
+ panic("SERR from current");
+}
+
+noreturn void sync_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
+{
+ uintreg_t esr = read_msr(esr_el2);
+ uintreg_t ec = GET_ESR_EC(esr);
+
+ (void)spsr;
+
+ switch (ec) {
+ case 0x25: /* EC = 100101, Data abort. */
+ dlog("Data abort: pc=%#x, esr=%#x, ec=%#x", elr, esr, ec);
+ if (!(esr & (1U << 10))) { /* Check FnV bit. */
+ dlog(", far=%#x", read_msr(far_el2));
+ } else {
+ dlog(", far=invalid");
+ }
+
+ dlog("\n");
+ break;
+
+ default:
+ dlog("Unknown current sync exception pc=%#x, esr=%#x, "
+ "ec=%#x\n",
+ elr, esr, ec);
+ break;
+ }
+
+ panic("EL2 exception");
+}
+
+/**
+ * Sets or clears the VI bit in the HCR_EL2 register saved in the given
+ * arch_regs.
+ */
+static void set_virtual_interrupt(struct arch_regs *r, bool enable)
+{
+ if (enable) {
+ r->lazy.hcr_el2 |= HCR_EL2_VI;
+ } else {
+ r->lazy.hcr_el2 &= ~HCR_EL2_VI;
+ }
+}
+
+/**
+ * Sets or clears the VI bit in the HCR_EL2 register.
+ */
+static void set_virtual_interrupt_current(bool enable)
+{
+ uintreg_t hcr_el2 = read_msr(hcr_el2);
+
+ if (enable) {
+ hcr_el2 |= HCR_EL2_VI;
+ } else {
+ hcr_el2 &= ~HCR_EL2_VI;
+ }
+ write_msr(hcr_el2, hcr_el2);
+}
+
+/**
+ * Checks whether to block an SMC being forwarded from a VM.
+ */
+static bool smc_is_blocked(const struct vm *vm, uint32_t func)
+{
+ bool block_by_default = !vm->smc_whitelist.permissive;
+
+ for (size_t i = 0; i < vm->smc_whitelist.smc_count; ++i) {
+ if (func == vm->smc_whitelist.smcs[i]) {
+ return false;
+ }
+ }
+
+ dlog("SMC %#010x attempted from VM %d, blocked=%d\n", func, vm->id,
+ block_by_default);
+
+ /* Access is still allowed in permissive mode. */
+ return block_by_default;
+}
+
+/**
+ * Applies SMC access control according to manifest and forwards the call if
+ * access is granted.
+ */
+static void smc_forwarder(const struct vm *vm, struct spci_value *args)
+{
+ struct spci_value ret;
+ uint32_t client_id = vm->id;
+ uintreg_t arg7 = args->arg7;
+
+ if (smc_is_blocked(vm, args->func)) {
+ args->func = SMCCC_ERROR_UNKNOWN;
+ return;
+ }
+
+ /*
+ * Set the Client ID but keep the existing Secure OS ID and anything
+ * else (currently unspecified) that the client may have passed in the
+ * upper bits.
+ */
+ args->arg7 = client_id | (arg7 & ~CLIENT_ID_MASK);
+ ret = smc_forward(args->func, args->arg1, args->arg2, args->arg3,
+ args->arg4, args->arg5, args->arg6, args->arg7);
+
+ /*
+ * Preserve the value passed by the caller, rather than the generated
+ * client_id. Note that this would also overwrite any return value that
+ * may be in x7, but the SMCs that we are forwarding are legacy calls
+ * from before SMCCC 1.2 so won't have more than 4 return values anyway.
+ */
+ ret.arg7 = arg7;
+
+ plat_smc_post_forward(*args, &ret);
+
+ *args = ret;
+}
+
+static bool spci_handler(struct spci_value *args, struct vcpu **next)
+{
+ /*
+ * NOTE: When adding new methods to this handler update
+ * api_spci_features accordingly.
+ */
+ switch (args->func & ~SMCCC_CONVENTION_MASK) {
+ case SPCI_VERSION_32:
+ *args = api_spci_version();
+ return true;
+ case SPCI_ID_GET_32:
+ *args = api_spci_id_get(current());
+ return true;
+ case SPCI_FEATURES_32:
+ *args = api_spci_features(args->arg1);
+ return true;
+ case SPCI_RX_RELEASE_32:
+ *args = api_spci_rx_release(current(), next);
+ return true;
+ case SPCI_RXTX_MAP_32:
+ *args = api_spci_rxtx_map(ipa_init(args->arg1),
+ ipa_init(args->arg2), args->arg3,
+ current(), next);
+ return true;
+ case SPCI_YIELD_32:
+ api_yield(current(), next);
+
+ /* SPCI_YIELD always returns SPCI_SUCCESS. */
+ *args = (struct spci_value){.func = SPCI_SUCCESS_32};
+
+ return true;
+ case SPCI_MSG_SEND_32:
+ *args = api_spci_msg_send(spci_msg_send_sender(*args),
+ spci_msg_send_receiver(*args),
+ spci_msg_send_size(*args),
+ spci_msg_send_attributes(*args),
+ current(), next);
+ return true;
+ case SPCI_MSG_WAIT_32:
+ *args = api_spci_msg_recv(true, current(), next);
+ return true;
+ case SPCI_MSG_POLL_32:
+ *args = api_spci_msg_recv(false, current(), next);
+ return true;
+ case SPCI_RUN_32:
+ *args = api_spci_run(spci_vm_id(*args), spci_vcpu_index(*args),
+ current(), next);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * Set or clear VI bit according to pending interrupts.
+ */
+static void update_vi(struct vcpu *next)
+{
+ if (next == NULL) {
+ /*
+ * Not switching vCPUs, set the bit for the current vCPU
+ * directly in the register.
+ */
+ struct vcpu *vcpu = current();
+
+ sl_lock(&vcpu->lock);
+ set_virtual_interrupt_current(
+ vcpu->interrupts.enabled_and_pending_count > 0);
+ sl_unlock(&vcpu->lock);
+ } else {
+ /*
+ * About to switch vCPUs, set the bit for the vCPU to which we
+ * are switching in the saved copy of the register.
+ */
+ sl_lock(&next->lock);
+ set_virtual_interrupt(
+ &next->regs,
+ next->interrupts.enabled_and_pending_count > 0);
+ sl_unlock(&next->lock);
+ }
+}
+
+/**
+ * Processes SMC instruction calls.
+ */
+static struct vcpu *smc_handler(struct vcpu *vcpu)
+{
+ struct spci_value args = {
+ .func = vcpu->regs.r[0],
+ .arg1 = vcpu->regs.r[1],
+ .arg2 = vcpu->regs.r[2],
+ .arg3 = vcpu->regs.r[3],
+ .arg4 = vcpu->regs.r[4],
+ .arg5 = vcpu->regs.r[5],
+ .arg6 = vcpu->regs.r[6],
+ .arg7 = vcpu->regs.r[7],
+ };
+ struct vcpu *next = NULL;
+
+ if (psci_handler(vcpu, args.func, args.arg1, args.arg2, args.arg3,
+ &vcpu->regs.r[0], &next)) {
+ return next;
+ }
+
+ if (spci_handler(&args, &next)) {
+ arch_regs_set_retval(&vcpu->regs, args);
+ update_vi(next);
+ return next;
+ }
+
+ switch (args.func & ~SMCCC_CONVENTION_MASK) {
+ case HF_DEBUG_LOG:
+ vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu);
+ return NULL;
+ }
+
+ smc_forwarder(vcpu->vm, &args);
+ arch_regs_set_retval(&vcpu->regs, args);
+ return NULL;
+}
+
+/*
+ * Exception vector offsets.
+ * See Arm Architecture Reference Manual Armv8-A, D1.10.2.
+ */
+
+/**
+ * Offset for synchronous exceptions at current EL with SPx.
+ */
+#define OFFSET_CURRENT_SPX UINT64_C(0x200)
+
+/**
+ * Offset for synchronous exceptions at lower EL using AArch64.
+ */
+#define OFFSET_LOWER_EL_64 UINT64_C(0x400)
+
+/**
+ * Offset for synchronous exceptions at lower EL using AArch32.
+ */
+#define OFFSET_LOWER_EL_32 UINT64_C(0x600)
+
+/**
+ * Returns the address for the exception handler at EL1.
+ */
+static uintreg_t get_el1_exception_handler_addr(const struct vcpu *vcpu)
+{
+ uintreg_t base_addr = read_msr(vbar_el1);
+ uintreg_t pe_mode = vcpu->regs.spsr & PSR_PE_MODE_MASK;
+ bool is_arch32 = vcpu->regs.spsr & PSR_ARCH_MODE_32;
+
+ if (pe_mode == PSR_PE_MODE_EL0T) {
+ if (is_arch32) {
+ base_addr += OFFSET_LOWER_EL_32;
+ } else {
+ base_addr += OFFSET_LOWER_EL_64;
+ }
+ } else {
+ CHECK(!is_arch32);
+ base_addr += OFFSET_CURRENT_SPX;
+ }
+
+ return base_addr;
+}
+
+/**
+ * Injects an exception with an unknown reason (EC=0x0) to the EL1.
+ * See Arm Architecture Reference Manual Armv8-A, page D13-2924.
+ *
+ * NOTE: This function assumes that the lazy registers haven't been saved, and
+ * writes to the lazy registers of the CPU directly instead of the vCPU.
+ */
+static struct vcpu *inject_el1_unknown_exception(struct vcpu *vcpu,
+ uintreg_t esr_el2)
+{
+ uintreg_t esr_el1_value = GET_ESR_IL(esr_el2);
+ uintreg_t handler_address = get_el1_exception_handler_addr(vcpu);
+ char *direction_str;
+
+ /* Update the CPU state to inject the exception. */
+ write_msr(esr_el1, esr_el1_value);
+ write_msr(elr_el1, vcpu->regs.pc);
+ write_msr(spsr_el1, vcpu->regs.spsr);
+
+ /*
+ * Mask (disable) interrupts and run in EL1h mode.
+ * EL1h mode is used because by default, taking an exception selects the
+ * stack pointer for the target Exception level. The software can change
+ * that later in the handler if needed.
+ * See Arm Architecture Reference Manual Armv8-A, page D13-2924
+ */
+ vcpu->regs.spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H;
+
+ /* Transfer control to the exception hander. */
+ vcpu->regs.pc = handler_address;
+
+ direction_str = ISS_IS_READ(esr_el2) ? "read" : "write";
+ dlog("Trapped access to system register %s: op0=%d, op1=%d, crn=%d, "
+ "crm=%d, op2=%d, rt=%d.\n",
+ direction_str, GET_ISS_OP0(esr_el2), GET_ISS_OP1(esr_el2),
+ GET_ISS_CRN(esr_el2), GET_ISS_CRM(esr_el2), GET_ISS_OP2(esr_el2),
+ GET_ISS_RT(esr_el2));
+
+ dlog("Injecting Unknown Reason exception into VM%d.\n", vcpu->vm->id);
+ dlog("Exception handler address 0x%x\n", handler_address);
+
+ /* Schedule the same VM to continue running. */
+ return NULL;
+}
+
+struct vcpu *hvc_handler(struct vcpu *vcpu)
+{
+ struct spci_value args = {
+ .func = vcpu->regs.r[0],
+ .arg1 = vcpu->regs.r[1],
+ .arg2 = vcpu->regs.r[2],
+ .arg3 = vcpu->regs.r[3],
+ .arg4 = vcpu->regs.r[4],
+ .arg5 = vcpu->regs.r[5],
+ .arg6 = vcpu->regs.r[6],
+ .arg7 = vcpu->regs.r[7],
+ };
+ struct vcpu *next = NULL;
+
+ if (psci_handler(vcpu, args.func, args.arg1, args.arg2, args.arg3,
+ &vcpu->regs.r[0], &next)) {
+ return next;
+ }
+
+ if (spci_handler(&args, &next)) {
+ arch_regs_set_retval(&vcpu->regs, args);
+ update_vi(next);
+ return next;
+ }
+
+ switch (args.func) {
+ case HF_VM_GET_COUNT:
+ vcpu->regs.r[0] = api_vm_get_count();
+ break;
+
+ case HF_VCPU_GET_COUNT:
+ vcpu->regs.r[0] = api_vcpu_get_count(args.arg1, vcpu);
+ break;
+
+ case HF_MAILBOX_WRITABLE_GET:
+ vcpu->regs.r[0] = api_mailbox_writable_get(vcpu);
+ break;
+
+ case HF_MAILBOX_WAITER_GET:
+ vcpu->regs.r[0] = api_mailbox_waiter_get(args.arg1, vcpu);
+ break;
+
+ case HF_INTERRUPT_ENABLE:
+ vcpu->regs.r[0] =
+ api_interrupt_enable(args.arg1, args.arg2, vcpu);
+ break;
+
+ case HF_INTERRUPT_GET:
+ vcpu->regs.r[0] = api_interrupt_get(vcpu);
+ break;
+
+ case HF_INTERRUPT_INJECT:
+ vcpu->regs.r[0] = api_interrupt_inject(args.arg1, args.arg2,
+ args.arg3, vcpu, &next);
+ break;
+
+ case HF_DEBUG_LOG:
+ vcpu->regs.r[0] = api_debug_log(args.arg1, vcpu);
+ break;
+
+ default:
+ vcpu->regs.r[0] = SMCCC_ERROR_UNKNOWN;
+ }
+
+ update_vi(next);
+
+ return next;
+}
+
+struct vcpu *irq_lower(void)
+{
+ /*
+ * Switch back to primary VM, interrupts will be handled there.
+ *
+ * If the VM has aborted, this vCPU will be aborted when the scheduler
+ * tries to run it again. This means the interrupt will not be delayed
+ * by the aborted VM.
+ *
+ * TODO: Only switch when the interrupt isn't for the current VM.
+ */
+ return api_preempt(current());
+}
+
+struct vcpu *fiq_lower(void)
+{
+ return irq_lower();
+}
+
+struct vcpu *serr_lower(void)
+{
+ dlog("SERR from lower\n");
+ return api_abort(current());
+}
+
+/**
+ * Initialises a fault info structure. It assumes that an FnV bit exists at
+ * bit offset 10 of the ESR, and that it is only valid when the bottom 6 bits of
+ * the ESR (the fault status code) are 010000; this is the case for both
+ * instruction and data aborts, but not necessarily for other exception reasons.
+ */
+static struct vcpu_fault_info fault_info_init(uintreg_t esr,
+ const struct vcpu *vcpu,
+ uint32_t mode)
+{
+ uint32_t fsc = esr & 0x3f;
+ struct vcpu_fault_info r;
+
+ r.mode = mode;
+ r.pc = va_init(vcpu->regs.pc);
+
+ /*
+ * Check the FnV bit, which is only valid if dfsc/ifsc is 010000. It
+ * indicates that we cannot rely on far_el2.
+ */
+ if (fsc == 0x10 && esr & (1U << 10)) {
+ r.vaddr = va_init(0);
+ r.ipaddr = ipa_init(read_msr(hpfar_el2) << 8);
+ } else {
+ r.vaddr = va_init(read_msr(far_el2));
+ r.ipaddr = ipa_init((read_msr(hpfar_el2) << 8) |
+ (read_msr(far_el2) & (PAGE_SIZE - 1)));
+ }
+
+ return r;
+}
+
+struct vcpu *sync_lower_exception(uintreg_t esr)
+{
+ struct vcpu *vcpu = current();
+ struct vcpu_fault_info info;
+ struct vcpu *new_vcpu;
+ uintreg_t ec = GET_ESR_EC(esr);
+
+ switch (ec) {
+ case 0x01: /* EC = 000001, WFI or WFE. */
+ /* Skip the instruction. */
+ vcpu->regs.pc += GET_NEXT_PC_INC(esr);
+ /* Check TI bit of ISS, 0 = WFI, 1 = WFE. */
+ if (esr & 1) {
+ /* WFE */
+ /*
+ * TODO: consider giving the scheduler more context,
+ * somehow.
+ */
+ api_yield(vcpu, &new_vcpu);
+ return new_vcpu;
+ }
+ /* WFI */
+ return api_wait_for_interrupt(vcpu);
+
+ case 0x24: /* EC = 100100, Data abort. */
+ info = fault_info_init(
+ esr, vcpu, (esr & (1U << 6)) ? MM_MODE_W : MM_MODE_R);
+ if (vcpu_handle_page_fault(vcpu, &info)) {
+ return NULL;
+ }
+ break;
+
+ case 0x20: /* EC = 100000, Instruction abort. */
+ info = fault_info_init(esr, vcpu, MM_MODE_X);
+ if (vcpu_handle_page_fault(vcpu, &info)) {
+ return NULL;
+ }
+ break;
+
+ case 0x16: /* EC = 010110, HVC instruction */
+ return hvc_handler(vcpu);
+
+ case 0x17: /* EC = 010111, SMC instruction. */ {
+ uintreg_t smc_pc = vcpu->regs.pc;
+ struct vcpu *next = smc_handler(vcpu);
+
+ /* Skip the SMC instruction. */
+ vcpu->regs.pc = smc_pc + GET_NEXT_PC_INC(esr);
+
+ return next;
+ }
+
+ /*
+ * EC = 011000, MSR, MRS or System instruction execution that is not
+ * reported using EC 000000, 000001 or 000111.
+ */
+ case 0x18:
+ /*
+ * NOTE: This should never be reached because it goes through a
+ * separate path handled by handle_system_register_access().
+ */
+ panic("Handled by handle_system_register_access().");
+
+ default:
+ dlog("Unknown lower sync exception pc=%#x, esr=%#x, "
+ "ec=%#x\n",
+ vcpu->regs.pc, esr, ec);
+ break;
+ }
+
+ /*
+ * The exception wasn't handled. Inject to the VM to give it chance to
+ * handle as an unknown exception.
+ */
+ return inject_el1_unknown_exception(vcpu, esr);
+}
+
+/**
+ * Handles EC = 011000, MSR, MRS instruction traps.
+ * Returns non-null ONLY if the access failed and the vCPU is changing.
+ */
+struct vcpu *handle_system_register_access(uintreg_t esr_el2)
+{
+ struct vcpu *vcpu = current();
+ spci_vm_id_t vm_id = vcpu->vm->id;
+ uintreg_t ec = GET_ESR_EC(esr_el2);
+
+ CHECK(ec == 0x18);
+ /*
+ * Handle accesses to debug and performance monitor registers.
+ * Inject an exception for unhandled/unsupported registers.
+ */
+ if (debug_el1_is_register_access(esr_el2)) {
+ if (!debug_el1_process_access(vcpu, vm_id, esr_el2)) {
+ return inject_el1_unknown_exception(vcpu, esr_el2);
+ }
+ } else if (perfmon_is_register_access(esr_el2)) {
+ if (!perfmon_process_access(vcpu, vm_id, esr_el2)) {
+ return inject_el1_unknown_exception(vcpu, esr_el2);
+ }
+ } else if (feature_id_is_register_access(esr_el2)) {
+ if (!feature_id_process_access(vcpu, esr_el2)) {
+ return inject_el1_unknown_exception(vcpu, esr_el2);
+ }
+ } else {
+ return inject_el1_unknown_exception(vcpu, esr_el2);
+ }
+
+ /* Instruction was fulfilled. Skip it and run the next one. */
+ vcpu->regs.pc += GET_NEXT_PC_INC(esr_el2);
+ return NULL;
+}
diff --git a/src/arch/aarch64/hypervisor/hypervisor_entry.S b/src/arch/aarch64/hypervisor/hypervisor_entry.S
new file mode 100644
index 0000000..c6fcb62
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/hypervisor_entry.S
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/offsets.h"
+
+/**
+ * Called only on first boot after the image has been relocated and BSS zeroed.
+ *
+ * It is required that caches be clean and invalid.
+ */
+.section .init.image_entry, "ax"
+.global image_entry
+image_entry:
+ /* Interpret the registers passed from the loader. */
+ bl plat_boot_flow_hook
+
+ /* Get pointer to first CPU. */
+ adrp x28, cpus
+ add x28, x28, :lo12:cpus
+
+ /* Set the ID of this CPU from the affinity bits of mpidr. */
+ mrs x30, mpidr_el1
+ ubfx x29, x30, 0, 24
+ ubfx x30, x30, 32, 8
+ orr x30, x29, x30
+ str x30, [x28, CPU_ID]
+
+ mov x0, x28
+ bl prepare_for_c
+
+ /*
+ * Call into C to initialize the memory management configuration with
+ * MMU and caches disabled. Result will be stored in `arch_mm_config`.
+ */
+ bl one_time_init_mm
+
+ /* Enable MMU and caches before running the rest of initialization. */
+ bl mm_enable
+ bl one_time_init
+
+ /* Begin steady state operation. */
+ mov x0, x28
+ b cpu_init
+
+/**
+ * Entry point for all cases other than the first boot e.g. secondary CPUs and
+ * resuming from suspend.
+ *
+ * It is required that caches be coherent but not necessarily clean or invalid.
+ *
+ * x0 points to the current CPU.
+ */
+.section .text.entry, "ax"
+.global cpu_entry
+cpu_entry:
+ bl mm_enable
+ bl prepare_for_c
+
+ /* Intentional fallthrough. */
+
+cpu_init:
+ /* Call into C code, x0 holds the CPU pointer. */
+ bl cpu_main
+
+ /* Run the vCPU returned by cpu_main. */
+ bl vcpu_restore_all_and_run
+
+ /* Loop forever waiting for interrupts. */
+0: wfi
+ b 0b
+
+/**
+ * Set up CPU environment for executing C code. This is called on first boot
+ * with caches disabled but subsequent calls will have caches enabled.
+ *
+ * x0 points to the current CPU on entry and exit.
+ */
+prepare_for_c:
+ /* Use SPx (instead of SP0). */
+ msr spsel, #1
+
+ /* Prepare the stack. */
+ ldr x1, [x0, #CPU_STACK_BOTTOM]
+ mov sp, x1
+
+ /* Configure exception handlers. */
+ adr x2, vector_table_el2
+ msr vbar_el2, x2
+ ret
+
+/**
+ * Applies the memory management configuration to the CPU, preserving x0 along
+ * the way.
+ */
+mm_enable:
+ /*
+ * Invalidate any potentially stale local TLB entries for the
+ * hypervisor's stage-1 and the VM's stage-2 before they start being
+ * used. The VM's stage-1 is invalidated as a side effect but it wasn't
+ * using it yet anyway.
+ */
+ tlbi alle2
+ tlbi vmalls12e1
+
+ /*
+ * Load and apply the memory management configuration. Order depends on
+ * `struct arch_mm_config.
+ */
+ adrp x6, arch_mm_config
+ add x6, x6, :lo12:arch_mm_config
+
+ ldp x1, x2, [x6]
+ ldp x3, x4, [x6, #16]
+ ldr x5, [x6, #32]
+
+ msr ttbr0_el2, x1
+ msr vtcr_el2, x2
+ msr mair_el2, x3
+ msr tcr_el2, x4
+
+ /* Ensure everything before this point has completed. */
+ dsb sy
+ isb
+
+ /*
+ * Configure sctlr_el2 to enable MMU and cache and don't proceed until
+ * this has completed.
+ */
+ msr sctlr_el2, x5
+ isb
+ ret
diff --git a/src/arch/aarch64/hypervisor/offsets.c b/src/arch/aarch64/hypervisor/offsets.c
new file mode 100644
index 0000000..b99273c
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/offsets.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/cpu.h"
+#include "hf/offset_size_header.h"
+
+DEFINE_OFFSETOF(CPU_ID, struct cpu, id)
+DEFINE_OFFSETOF(CPU_STACK_BOTTOM, struct cpu, stack_bottom)
+DEFINE_OFFSETOF(VCPU_REGS, struct vcpu, regs)
+DEFINE_OFFSETOF(VCPU_LAZY, struct vcpu, regs.lazy)
+DEFINE_OFFSETOF(VCPU_FREGS, struct vcpu, regs.fp)
+
+#if GIC_VERSION == 3 || GIC_VERSION == 4
+DEFINE_OFFSETOF(VCPU_GIC, struct vcpu, regs.gic)
+#endif
diff --git a/src/arch/aarch64/hypervisor/perfmon.c b/src/arch/aarch64/hypervisor/perfmon.c
new file mode 100644
index 0000000..5be27ae
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/perfmon.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfmon.h"
+
+#include "hf/check.h"
+#include "hf/dlog.h"
+#include "hf/types.h"
+
+#include "msr.h"
+#include "sysregs.h"
+
+/* clang-format off */
+
+/**
+ * Definitions of read-only performance monitor registers' encodings.
+ * See Arm Architecture Reference Manual Armv8-A, D12.3.1.
+ * NAME, op0, op1, crn, crm, op2
+ */
+#define PERFMON_REGISTERS_READ \
+ X(PMCEID0_EL0 , 3, 3, 9, 12, 6) \
+ X(PMCEID1_EL0 , 3, 3, 9, 12, 7) \
+ X(PMEVCNTR0_EL0 , 3, 3, 14, 8, 0) \
+ X(PMEVCNTR1_EL0 , 3, 3, 14, 8, 1) \
+ X(PMEVCNTR2_EL0 , 3, 3, 14, 8, 2) \
+ X(PMEVCNTR3_EL0 , 3, 3, 14, 8, 3) \
+ X(PMEVCNTR4_EL0 , 3, 3, 14, 8, 4) \
+ X(PMEVCNTR5_EL0 , 3, 3, 14, 8, 5) \
+ X(PMEVCNTR6_EL0 , 3, 3, 14, 8, 6) \
+ X(PMEVCNTR7_EL0 , 3, 3, 14, 8, 7) \
+ X(PMEVCNTR8_EL0 , 3, 3, 14, 9, 0) \
+ X(PMEVCNTR9_EL0 , 3, 3, 14, 9, 1) \
+ X(PMEVCNTR10_EL0 , 3, 3, 14, 9, 2) \
+ X(PMEVCNTR11_EL0 , 3, 3, 14, 9, 3) \
+ X(PMEVCNTR12_EL0 , 3, 3, 14, 9, 4) \
+ X(PMEVCNTR13_EL0 , 3, 3, 14, 9, 5) \
+ X(PMEVCNTR14_EL0 , 3, 3, 14, 9, 6) \
+ X(PMEVCNTR15_EL0 , 3, 3, 14, 9, 7) \
+ X(PMEVCNTR16_EL0 , 3, 3, 14, 10, 0) \
+ X(PMEVCNTR17_EL0 , 3, 3, 14, 10, 1) \
+ X(PMEVCNTR18_EL0 , 3, 3, 14, 10, 2) \
+ X(PMEVCNTR19_EL0 , 3, 3, 14, 10, 3) \
+ X(PMEVCNTR20_EL0 , 3, 3, 14, 10, 4) \
+ X(PMEVCNTR21_EL0 , 3, 3, 14, 10, 5) \
+ X(PMEVCNTR22_EL0 , 3, 3, 14, 10, 6) \
+ X(PMEVCNTR23_EL0 , 3, 3, 14, 10, 7) \
+ X(PMEVCNTR24_EL0 , 3, 3, 14, 11, 0) \
+ X(PMEVCNTR25_EL0 , 3, 3, 14, 11, 1) \
+ X(PMEVCNTR26_EL0 , 3, 3, 14, 11, 2) \
+ X(PMEVCNTR27_EL0 , 3, 3, 14, 11, 3) \
+ X(PMEVCNTR28_EL0 , 3, 3, 14, 11, 4) \
+ X(PMEVCNTR29_EL0 , 3, 3, 14, 11, 5) \
+ X(PMEVCNTR30_EL0 , 3, 3, 14, 11, 6) \
+
+/**
+ * Definitions of write-only performance monitor registers' encodings.
+ * See Arm Architecture Reference Manual Armv8-A, D12.3.1.
+ * NAME, op0, op1, crn, crm, op2
+ */
+#define PERFMON_REGISTERS_WRITE \
+ X(PMSWINC_EL0 , 3, 3, 9, 12, 4) \
+
+/**
+ * Definitions of readable and writeable performance monitor registers' encodings.
+ * See Arm Architecture Reference Manual Armv8-A, D12.3.1.
+ * NAME, op0, op1, crn, crm, op2
+ */
+#define PERFMON_REGISTERS_READ_WRITE \
+ X(PMINTENSET_EL1 , 3, 0, 9, 14, 1) \
+ X(PMINTENCLR_EL1 , 3, 0, 9, 14, 2) \
+ X(PMCR_EL0 , 3, 3, 9, 12, 0) \
+ X(PMCNTENSET_EL0 , 3, 3, 9, 12, 1) \
+ X(PMCNTENCLR_EL0 , 3, 3, 9, 12, 2) \
+ X(PMOVSCLR_EL0 , 3, 3, 9, 12, 3) \
+ X(PMSELR_EL0 , 3, 3, 9, 12, 5) \
+ X(PMCCNTR_EL0 , 3, 3, 9, 13, 0) \
+ X(PMXEVTYPER_EL0 , 3, 3, 9, 13, 1) \
+ X(PMXEVCNTR_EL0 , 3, 3, 9, 13, 2) \
+ X(PMUSERENR_EL0 , 3, 3, 9, 14, 0) \
+ X(PMOVSSET_EL0 , 3, 3, 9, 14, 3) \
+ X(PMEVTYPER0_EL0 , 3, 3, 14, 12, 0) \
+ X(PMEVTYPER1_EL0 , 3, 3, 14, 12, 1) \
+ X(PMEVTYPER2_EL0 , 3, 3, 14, 12, 2) \
+ X(PMEVTYPER3_EL0 , 3, 3, 14, 12, 3) \
+ X(PMEVTYPER4_EL0 , 3, 3, 14, 12, 4) \
+ X(PMEVTYPER5_EL0 , 3, 3, 14, 12, 5) \
+ X(PMEVTYPER6_EL0 , 3, 3, 14, 12, 6) \
+ X(PMEVTYPER7_EL0 , 3, 3, 14, 12, 7) \
+ X(PMEVTYPER8_EL0 , 3, 3, 14, 13, 0) \
+ X(PMEVTYPER9_EL0 , 3, 3, 14, 13, 1) \
+ X(PMEVTYPER10_EL0 , 3, 3, 14, 13, 2) \
+ X(PMEVTYPER11_EL0 , 3, 3, 14, 13, 3) \
+ X(PMEVTYPER12_EL0 , 3, 3, 14, 13, 4) \
+ X(PMEVTYPER13_EL0 , 3, 3, 14, 13, 5) \
+ X(PMEVTYPER14_EL0 , 3, 3, 14, 13, 6) \
+ X(PMEVTYPER15_EL0 , 3, 3, 14, 13, 7) \
+ X(PMEVTYPER16_EL0 , 3, 3, 14, 14, 0) \
+ X(PMEVTYPER17_EL0 , 3, 3, 14, 14, 1) \
+ X(PMEVTYPER18_EL0 , 3, 3, 14, 14, 2) \
+ X(PMEVTYPER19_EL0 , 3, 3, 14, 14, 3) \
+ X(PMEVTYPER20_EL0 , 3, 3, 14, 14, 4) \
+ X(PMEVTYPER21_EL0 , 3, 3, 14, 14, 5) \
+ X(PMEVTYPER22_EL0 , 3, 3, 14, 14, 6) \
+ X(PMEVTYPER23_EL0 , 3, 3, 14, 14, 7) \
+ X(PMEVTYPER24_EL0 , 3, 3, 14, 15, 0) \
+ X(PMEVTYPER25_EL0 , 3, 3, 14, 15, 1) \
+ X(PMEVTYPER26_EL0 , 3, 3, 14, 15, 2) \
+ X(PMEVTYPER27_EL0 , 3, 3, 14, 15, 3) \
+ X(PMEVTYPER28_EL0 , 3, 3, 14, 15, 4) \
+ X(PMEVTYPER29_EL0 , 3, 3, 14, 15, 5) \
+ X(PMEVTYPER30_EL0 , 3, 3, 14, 15, 6) \
+ X(PMCCFILTR_EL0 , 3, 3, 14, 15, 7)
+
+/* clang-format on */
+
+/**
+ * Returns true if the ESR register shows an access to a performance monitor
+ * register.
+ */
+bool perfmon_is_register_access(uintreg_t esr)
+{
+ uintreg_t op0 = GET_ISS_OP0(esr);
+ uintreg_t op1 = GET_ISS_OP1(esr);
+ uintreg_t crn = GET_ISS_CRN(esr);
+ uintreg_t crm = GET_ISS_CRM(esr);
+
+ /* From the Arm Architecture Reference Manual Table D12-2. */
+
+ /* For PMINTENCLR_EL1 and PMINTENSET_EL1*/
+ if (op0 == 3 && op1 == 0 && crn == 9 && crm == 14) {
+ return true;
+ }
+
+ /* For PMEVCNTRn_EL0, PMEVTYPERn_EL0, and PMCCFILTR_EL0. */
+ if (op0 == 3 && op1 == 3 && crn == 14 && crm >= 8 && crm <= 15) {
+ return true;
+ }
+
+ /* For all remaining performance monitor registers. */
+ return op0 == 3 && op1 == 3 && crn == 9 && crm >= 12 && crm <= 14;
+}
+
+/**
+ * Processes an access (msr, mrs) to a performance monitor register.
+ * Returns true if the access was allowed and performed, false otherwise.
+ */
+bool perfmon_process_access(struct vcpu *vcpu, spci_vm_id_t vm_id,
+ uintreg_t esr)
+{
+ /*
+ * For now, performance monitor registers are not supported by secondary
+ * VMs. Disallow accesses to them.
+ */
+ if (vm_id != HF_PRIMARY_VM_ID) {
+ return false;
+ }
+
+ uintreg_t sys_register = GET_ISS_SYSREG(esr);
+ uintreg_t rt_register = GET_ISS_RT(esr);
+ uintreg_t value;
+
+ /* +1 because Rt can access register XZR */
+ CHECK(rt_register < NUM_GP_REGS + 1);
+
+ if (ISS_IS_READ(esr)) {
+ switch (sys_register) {
+#define X(reg_name, op0, op1, crn, crm, op2) \
+ case (GET_ISS_ENCODING(op0, op1, crn, crm, op2)): \
+ value = read_msr(reg_name); \
+ break;
+ PERFMON_REGISTERS_READ
+ PERFMON_REGISTERS_READ_WRITE
+#undef X
+ default:
+ value = vcpu->regs.r[rt_register];
+ dlog("Unsupported performance monitor register read: "
+ "op0=%d, op1=%d, crn=%d, crm=%d, op2=%d, rt=%d.\n",
+ GET_ISS_OP0(esr), GET_ISS_OP1(esr),
+ GET_ISS_CRN(esr), GET_ISS_CRM(esr),
+ GET_ISS_OP2(esr), GET_ISS_RT(esr));
+ break;
+ }
+ if (rt_register != RT_REG_XZR) {
+ vcpu->regs.r[rt_register] = value;
+ }
+ } else {
+ if (rt_register != RT_REG_XZR) {
+ value = vcpu->regs.r[rt_register];
+ } else {
+ value = 0;
+ }
+ switch (sys_register) {
+#define X(reg_name, op0, op1, crn, crm, op2) \
+ case (GET_ISS_ENCODING(op0, op1, crn, crm, op2)): \
+ write_msr(reg_name, value); \
+ break;
+ PERFMON_REGISTERS_WRITE
+ PERFMON_REGISTERS_READ_WRITE
+#undef X
+ default:
+ dlog("Unsupported performance monitor register write: "
+ "op0=%d, op1=%d, crn=%d, crm=%d, op2=%d, rt=%d.\n",
+ GET_ISS_OP0(esr), GET_ISS_OP1(esr),
+ GET_ISS_CRN(esr), GET_ISS_CRM(esr),
+ GET_ISS_OP2(esr), GET_ISS_RT(esr));
+ break;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Returns the value register PMCCFILTR_EL0 should have at initialization.
+ */
+uintreg_t perfmon_get_pmccfiltr_el0_init_value(spci_vm_id_t vm_id)
+{
+ if (vm_id != HF_PRIMARY_VM_ID) {
+ /* Disable cycle counting for secondary VMs. */
+ return PMCCFILTR_EL0_P | PMCCFILTR_EL0_U;
+ }
+
+ return 0;
+}
diff --git a/src/arch/aarch64/hypervisor/perfmon.h b/src/arch/aarch64/hypervisor/perfmon.h
new file mode 100644
index 0000000..afeabd9
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/perfmon.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/arch/types.h"
+
+#include "hf/cpu.h"
+
+#include "vmapi/hf/spci.h"
+
+/**
+ * Set to disable cycle counting when event counting is prohibited.
+ */
+#define PMCR_EL0_DP 0x10
+
+/**
+ * Set to enable export of events where not prohibited.
+ */
+#define PMCR_EL0_X 0x8
+
+/**
+ * Set to enable event counting.
+ */
+#define PMCR_EL0_E 0x1
+
+/**
+ * Set to disable cycle counting in EL1.
+ */
+#define PMCCFILTR_EL0_P 0x80000000
+
+/**
+ * Set to disable cycle counting in EL0.
+ */
+#define PMCCFILTR_EL0_U 0x40000000
+
+/**
+ * Cycle counting in non-secure EL1 is enabled if NSK == P.
+ */
+#define PMCCFILTR_EL0_NSK 0x20000000
+
+/**
+ * Cycle counting in non-secure EL0 is enabled if NSU == U.
+ */
+#define PMCCFILTR_EL0_NSU 0x10000000
+
+/**
+ * Set to enable cycle counting in EL2.
+ */
+#define PMCCFILTR_EL0_NSH 0x8000000
+
+/**
+ * Cycle counting in EL3 is enabled if M == P.
+ */
+#define PMCCFILTR_EL0_M 0x4000000
+
+/**
+ * Cycle counting in Secutre EL2 is enabled if SH != NSH.
+ */
+#define PMCCFILTR_EL0_SH 0x1000000
+
+bool perfmon_is_register_access(uintreg_t esr_el2);
+
+bool perfmon_process_access(struct vcpu *vcpu, spci_vm_id_t vm_id,
+ uintreg_t esr_el2);
+
+uintreg_t perfmon_get_pmccfiltr_el0_init_value(spci_vm_id_t vm_id);
diff --git a/src/arch/aarch64/hypervisor/plat_entry.S b/src/arch/aarch64/hypervisor/plat_entry.S
new file mode 100644
index 0000000..d7ecc87
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/plat_entry.S
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.section .init.image_entry, "ax"
+.global plat_entry
+.weak plat_entry
+plat_entry:
+ /* Do nothing. */
+ ret
diff --git a/src/arch/aarch64/hypervisor/psci_handler.c b/src/arch/aarch64/hypervisor/psci_handler.c
new file mode 100644
index 0000000..1d638e4
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/psci_handler.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "psci_handler.h"
+
+#include <stdint.h>
+
+#include "hf/arch/types.h"
+
+#include "hf/api.h"
+#include "hf/cpu.h"
+#include "hf/dlog.h"
+#include "hf/panic.h"
+#include "hf/spci.h"
+#include "hf/vm.h"
+
+#include "psci.h"
+#include "smc.h"
+
+static uint32_t el3_psci_version;
+
+void cpu_entry(struct cpu *c);
+
+/* Performs arch specific boot time initialisation. */
+void arch_one_time_init(void)
+{
+ struct spci_value smc_res =
+ smc32(PSCI_VERSION, 0, 0, 0, 0, 0, 0, SMCCC_CALLER_HYPERVISOR);
+
+ el3_psci_version = smc_res.func;
+
+ /* Check there's nothing unexpected about PSCI. */
+ switch (el3_psci_version) {
+ case PSCI_VERSION_0_2:
+ case PSCI_VERSION_1_0:
+ case PSCI_VERSION_1_1:
+ /* Supported EL3 PSCI version. */
+ dlog("Found PSCI version: %#x\n", el3_psci_version);
+ break;
+
+ default:
+ /* Unsupported EL3 PSCI version. Log a warning but continue. */
+ dlog("Warning: unknown PSCI version: %#x\n", el3_psci_version);
+ el3_psci_version = 0;
+ break;
+ }
+}
+
+/**
+ * Handles PSCI requests received via HVC or SMC instructions from the primary
+ * VM.
+ *
+ * A minimal PSCI 1.1 interface is offered which can make use of the
+ * implementation of PSCI in EL3 by acting as an adapter.
+ *
+ * Returns true if the request was a PSCI one, false otherwise.
+ */
+bool psci_primary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0,
+ uintreg_t arg1, uintreg_t arg2, uintreg_t *ret)
+{
+ struct cpu *c;
+ struct spci_value smc_res;
+
+ /*
+ * If there's a problem with the EL3 PSCI, block standard secure service
+ * calls by marking them as unknown. Other calls will be allowed to pass
+ * through.
+ *
+ * This blocks more calls than just PSCI so it may need to be made more
+ * lenient in future.
+ */
+ if (el3_psci_version == 0) {
+ *ret = SMCCC_ERROR_UNKNOWN;
+ return (func & SMCCC_SERVICE_CALL_MASK) ==
+ SMCCC_STANDARD_SECURE_SERVICE_CALL;
+ }
+
+ switch (func & ~SMCCC_CONVENTION_MASK) {
+ case PSCI_VERSION:
+ *ret = PSCI_VERSION_1_1;
+ break;
+
+ case PSCI_FEATURES:
+ switch (arg0 & ~SMCCC_CONVENTION_MASK) {
+ case PSCI_CPU_SUSPEND:
+ if (el3_psci_version == PSCI_VERSION_0_2) {
+ /*
+ * PSCI 0.2 doesn't support PSCI_FEATURES so
+ * report PSCI 0.2 compatible features.
+ */
+ *ret = 0;
+ } else {
+ /* PSCI 1.x only defines two feature bits. */
+ smc_res = smc32(func, arg0, 0, 0, 0, 0, 0,
+ SMCCC_CALLER_HYPERVISOR);
+ *ret = smc_res.func & 0x3;
+ }
+ break;
+
+ case PSCI_VERSION:
+ case PSCI_FEATURES:
+ case PSCI_SYSTEM_OFF:
+ case PSCI_SYSTEM_RESET:
+ case PSCI_AFFINITY_INFO:
+ case PSCI_CPU_OFF:
+ case PSCI_CPU_ON:
+ /* These are supported without special features. */
+ *ret = 0;
+ break;
+
+ default:
+ /* Everything else is unsupported. */
+ *ret = PSCI_ERROR_NOT_SUPPORTED;
+ break;
+ }
+ break;
+
+ case PSCI_SYSTEM_OFF:
+ smc32(PSCI_SYSTEM_OFF, 0, 0, 0, 0, 0, 0,
+ SMCCC_CALLER_HYPERVISOR);
+ panic("System off failed");
+ break;
+
+ case PSCI_SYSTEM_RESET:
+ smc32(PSCI_SYSTEM_RESET, 0, 0, 0, 0, 0, 0,
+ SMCCC_CALLER_HYPERVISOR);
+ panic("System reset failed");
+ break;
+
+ case PSCI_AFFINITY_INFO:
+ c = cpu_find(arg0);
+ if (!c) {
+ *ret = PSCI_ERROR_INVALID_PARAMETERS;
+ break;
+ }
+
+ if (arg1 != 0) {
+ *ret = PSCI_ERROR_NOT_SUPPORTED;
+ break;
+ }
+
+ sl_lock(&c->lock);
+ if (c->is_on) {
+ *ret = PSCI_RETURN_ON;
+ } else {
+ *ret = PSCI_RETURN_OFF;
+ }
+ sl_unlock(&c->lock);
+ break;
+
+ case PSCI_CPU_SUSPEND: {
+ /*
+ * Update vCPU state to wake from the provided entry point but
+ * if suspend returns, for example because it failed or was a
+ * standby power state, the SMC will return and the updated
+ * vCPU registers will be ignored.
+ */
+ arch_regs_set_pc_arg(&vcpu->regs, ipa_init(arg1), arg2);
+ smc_res = smc64(PSCI_CPU_SUSPEND, arg0, (uintreg_t)&cpu_entry,
+ (uintreg_t)vcpu->cpu, 0, 0, 0,
+ SMCCC_CALLER_HYPERVISOR);
+ *ret = smc_res.func;
+ break;
+ }
+
+ case PSCI_CPU_OFF:
+ cpu_off(vcpu->cpu);
+ smc32(PSCI_CPU_OFF, 0, 0, 0, 0, 0, 0, SMCCC_CALLER_HYPERVISOR);
+ panic("CPU off failed");
+ break;
+
+ case PSCI_CPU_ON:
+ c = cpu_find(arg0);
+ if (!c) {
+ *ret = PSCI_ERROR_INVALID_PARAMETERS;
+ break;
+ }
+
+ if (cpu_on(c, ipa_init(arg1), arg2)) {
+ *ret = PSCI_ERROR_ALREADY_ON;
+ break;
+ }
+
+ /*
+ * There's a race when turning a CPU on when it's in the
+ * process of turning off. We need to loop here while it is
+ * reported that the CPU is on (because it's about to turn
+ * itself off).
+ */
+ do {
+ smc_res = smc64(PSCI_CPU_ON, arg0,
+ (uintreg_t)&cpu_entry, (uintreg_t)c, 0,
+ 0, 0, SMCCC_CALLER_HYPERVISOR);
+ *ret = smc_res.func;
+ } while (*ret == PSCI_ERROR_ALREADY_ON);
+
+ if (*ret != PSCI_RETURN_SUCCESS) {
+ cpu_off(c);
+ }
+ break;
+
+ case PSCI_MIGRATE:
+ case PSCI_MIGRATE_INFO_TYPE:
+ case PSCI_MIGRATE_INFO_UP_CPU:
+ case PSCI_CPU_FREEZE:
+ case PSCI_CPU_DEFAULT_SUSPEND:
+ case PSCI_NODE_HW_STATE:
+ case PSCI_SYSTEM_SUSPEND:
+ case PSCI_SET_SYSPEND_MODE:
+ case PSCI_STAT_RESIDENCY:
+ case PSCI_STAT_COUNT:
+ case PSCI_SYSTEM_RESET2:
+ case PSCI_MEM_PROTECT:
+ case PSCI_MEM_PROTECT_CHECK_RANGE:
+ /* Block all other known PSCI calls. */
+ *ret = PSCI_ERROR_NOT_SUPPORTED;
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Convert a PSCI CPU / affinity ID for a secondary VM to the corresponding vCPU
+ * index.
+ */
+spci_vcpu_index_t vcpu_id_to_index(cpu_id_t vcpu_id)
+{
+ /* For now we use indices as IDs for the purposes of PSCI. */
+ return vcpu_id;
+}
+
+/**
+ * Handles PSCI requests received via HVC or SMC instructions from a secondary
+ * VM.
+ *
+ * A minimal PSCI 1.1 interface is offered which can start and stop vCPUs in
+ * collaboration with the scheduler in the primary VM.
+ *
+ * Returns true if the request was a PSCI one, false otherwise.
+ */
+bool psci_secondary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0,
+ uintreg_t arg1, uintreg_t arg2, uintreg_t *ret,
+ struct vcpu **next)
+{
+ switch (func & ~SMCCC_CONVENTION_MASK) {
+ case PSCI_VERSION:
+ *ret = PSCI_VERSION_1_1;
+ break;
+
+ case PSCI_FEATURES:
+ switch (arg0 & ~SMCCC_CONVENTION_MASK) {
+ case PSCI_CPU_SUSPEND:
+ /*
+ * Does not offer OS-initiated mode but does use
+ * extended StateID Format.
+ */
+ *ret = 0x2;
+ break;
+
+ case PSCI_VERSION:
+ case PSCI_FEATURES:
+ case PSCI_AFFINITY_INFO:
+ case PSCI_CPU_OFF:
+ case PSCI_CPU_ON:
+ /* These are supported without special features. */
+ *ret = 0;
+ break;
+
+ default:
+ /* Everything else is unsupported. */
+ *ret = PSCI_ERROR_NOT_SUPPORTED;
+ break;
+ }
+ break;
+
+ case PSCI_AFFINITY_INFO: {
+ cpu_id_t target_affinity = arg0;
+ uint32_t lowest_affinity_level = arg1;
+ struct vm *vm = vcpu->vm;
+ struct vcpu_locked target_vcpu;
+ spci_vcpu_index_t target_vcpu_index =
+ vcpu_id_to_index(target_affinity);
+
+ if (lowest_affinity_level != 0) {
+ /* Affinity levels greater than 0 not supported. */
+ *ret = PSCI_ERROR_INVALID_PARAMETERS;
+ break;
+ }
+
+ if (target_vcpu_index >= vm->vcpu_count) {
+ *ret = PSCI_ERROR_INVALID_PARAMETERS;
+ break;
+ }
+
+ target_vcpu = vcpu_lock(vm_get_vcpu(vm, target_vcpu_index));
+ *ret = vcpu_is_off(target_vcpu) ? PSCI_RETURN_OFF
+ : PSCI_RETURN_ON;
+ vcpu_unlock(&target_vcpu);
+ break;
+ }
+
+ case PSCI_CPU_SUSPEND: {
+ /*
+ * Downgrade suspend request to WFI and return SUCCESS, as
+ * allowed by the specification.
+ */
+ *next = api_wait_for_interrupt(vcpu);
+ *ret = PSCI_RETURN_SUCCESS;
+ break;
+ }
+
+ case PSCI_CPU_OFF:
+ /*
+ * Should never return to the caller, but in case it somehow
+ * does.
+ */
+ *ret = PSCI_ERROR_DENIED;
+ /* Tell the scheduler not to run the vCPU again. */
+ *next = api_vcpu_off(vcpu);
+ break;
+
+ case PSCI_CPU_ON: {
+ /* Parameter names as per PSCI specification. */
+ cpu_id_t target_cpu = arg0;
+ ipaddr_t entry_point_address = ipa_init(arg1);
+ uint64_t context_id = arg2;
+ spci_vcpu_index_t target_vcpu_index =
+ vcpu_id_to_index(target_cpu);
+ struct vm *vm = vcpu->vm;
+ struct vcpu *target_vcpu;
+
+ if (target_vcpu_index >= vm->vcpu_count) {
+ *ret = PSCI_ERROR_INVALID_PARAMETERS;
+ break;
+ }
+
+ target_vcpu = vm_get_vcpu(vm, target_vcpu_index);
+
+ if (vcpu_secondary_reset_and_start(
+ target_vcpu, entry_point_address, context_id)) {
+ /*
+ * Tell the scheduler that it can start running the new
+ * vCPU now.
+ */
+ *next = api_wake_up(vcpu, target_vcpu);
+ *ret = PSCI_RETURN_SUCCESS;
+ } else {
+ *ret = PSCI_ERROR_ALREADY_ON;
+ }
+
+ break;
+ }
+
+ case PSCI_SYSTEM_OFF:
+ case PSCI_SYSTEM_RESET:
+ case PSCI_MIGRATE:
+ case PSCI_MIGRATE_INFO_TYPE:
+ case PSCI_MIGRATE_INFO_UP_CPU:
+ case PSCI_CPU_FREEZE:
+ case PSCI_CPU_DEFAULT_SUSPEND:
+ case PSCI_NODE_HW_STATE:
+ case PSCI_SYSTEM_SUSPEND:
+ case PSCI_SET_SYSPEND_MODE:
+ case PSCI_STAT_RESIDENCY:
+ case PSCI_STAT_COUNT:
+ case PSCI_SYSTEM_RESET2:
+ case PSCI_MEM_PROTECT:
+ case PSCI_MEM_PROTECT_CHECK_RANGE:
+ /* Block all other known PSCI calls. */
+ *ret = PSCI_ERROR_NOT_SUPPORTED;
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Handles PSCI requests received via HVC or SMC instructions from a VM.
+ * Requests from primary and secondary VMs are dealt with differently.
+ *
+ * Returns true if the request was a PSCI one, false otherwise.
+ */
+bool psci_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0,
+ uintreg_t arg1, uintreg_t arg2, uintreg_t *ret,
+ struct vcpu **next)
+{
+ if (vcpu->vm->id == HF_PRIMARY_VM_ID) {
+ return psci_primary_vm_handler(vcpu, func, arg0, arg1, arg2,
+ ret);
+ }
+ return psci_secondary_vm_handler(vcpu, func, arg0, arg1, arg2, ret,
+ next);
+}
diff --git a/src/arch/aarch64/hypervisor/psci_handler.h b/src/arch/aarch64/hypervisor/psci_handler.h
new file mode 100644
index 0000000..479c1cd
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/psci_handler.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include "hf/arch/types.h"
+
+#include "hf/cpu.h"
+
+bool psci_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0,
+ uintreg_t arg1, uintreg_t arg2, uintreg_t *ret,
+ struct vcpu **next);
diff --git a/src/arch/aarch64/hypervisor/sysregs.c b/src/arch/aarch64/hypervisor/sysregs.c
new file mode 100644
index 0000000..06385eb
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/sysregs.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sysregs.h"
+
+#include "msr.h"
+
+/**
+ * Returns the value for HCR_EL2 for the particular VM.
+ * For now, the primary VM has one value and all secondary VMs share a value.
+ */
+uintreg_t get_hcr_el2_value(spci_vm_id_t vm_id)
+{
+ uintreg_t hcr_el2_value = 0;
+
+ /* Baseline values for all VMs. */
+
+ /*
+ * Trap access to registers in ID group 3. These registers report on
+ * the underlying support for CPU features. Because Hafnium restricts
+ * certain features, e.g., RAS, it should emulate access to these
+ * registers to report the correct set of features supported.
+ */
+ hcr_el2_value |= HCR_EL2_TID3;
+
+ /* Execution state for EL1 is AArch64. */
+ hcr_el2_value |= HCR_EL2_RW;
+
+ /* Trap implementation registers and functionality. */
+ hcr_el2_value |= HCR_EL2_TACR | HCR_EL2_TIDCP;
+
+ /* Trap SMC instructions. */
+ hcr_el2_value |= HCR_EL2_TSC;
+
+ /*
+ * Translation table access made as part of a stage 1 translation
+ * table walk is subject to a stage 2 translation;
+ */
+ hcr_el2_value |= HCR_EL2_PTW;
+
+ /* Enable stage 2 address translation;*/
+ hcr_el2_value |= HCR_EL2_VM;
+
+ /* Trap cache maintenance instructions that operate by Set/Way. */
+ hcr_el2_value |= HCR_EL2_TSW;
+
+ /* Do *not* trap PAuth. APK and API bits *disable* trapping when set. */
+ hcr_el2_value |= HCR_EL2_APK | HCR_EL2_API;
+
+ /* Baseline values for all secondary VMs. */
+ if (vm_id != HF_PRIMARY_VM_ID) {
+ /*
+ * Set the minimum shareability domain to barrier instructions
+ * as inner shareable.
+ */
+ hcr_el2_value |= HCR_EL2_BSU_INNER_SHAREABLE;
+
+ /*
+ * Broadcast instructions related to invalidating the TLB within
+ * the Inner Shareable domain.
+ */
+ hcr_el2_value |= HCR_EL2_FB;
+
+ /* Route physical SError/IRQ/FIQ interrupts to EL2. */
+ hcr_el2_value |= HCR_EL2_AMO | HCR_EL2_IMO | HCR_EL2_FMO;
+
+ /* Trap wait for event/interrupt instructions. */
+ hcr_el2_value |= HCR_EL2_TWE | HCR_EL2_TWI;
+ }
+
+ return hcr_el2_value;
+}
+
+/**
+ * Returns the default value for MDCR_EL2.
+ */
+uintreg_t get_mdcr_el2_value(void)
+{
+ uintreg_t mdcr_el2_value = read_msr(MDCR_EL2);
+ uintreg_t pmcr_el0 = read_msr(PMCR_EL0);
+
+ /* Baseline values for all VMs. */
+
+ /* Disable cycle and event counting at EL2. */
+ mdcr_el2_value |= MDCR_EL2_HCCD | MDCR_EL2_HPMD;
+
+ /* All available event counters accessible from all exception levels. */
+ mdcr_el2_value |= GET_PMCR_EL0_N(pmcr_el0) & MDCR_EL2_HPMN;
+
+ return mdcr_el2_value;
+}
+
+/**
+ * Returns the value for CPTR_EL2 for the CPU.
+ */
+uintreg_t get_cptr_el2_value(void)
+{
+ return CPTR_EL2_TTA;
+}
diff --git a/src/arch/aarch64/hypervisor/sysregs.h b/src/arch/aarch64/hypervisor/sysregs.h
new file mode 100644
index 0000000..250c9bf
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/sysregs.h
@@ -0,0 +1,451 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/arch/types.h"
+
+#include "hf/cpu.h"
+
+#include "vmapi/hf/spci.h"
+
+/**
+ * RT value that indicates an access to register XZR (always 0).
+ * See Arm Architecture Reference Manual Armv8-A, C1.2.5
+ */
+#define RT_REG_XZR (UINT64_C(31))
+
+/**
+ * Hypervisor (EL2) Cycle Count Disable.
+ */
+#define MDCR_EL2_HCCD (UINT64_C(0x1) << 23)
+
+/**
+ * Controls traps for Trace Filter when Self-hosted Trace is implemented.
+ */
+#define MDCR_EL2_TTRF (UINT64_C(0x1) << 19)
+
+/**
+ * Hypervisor (EL2) Event Count Disable.
+ */
+#define MDCR_EL2_HPMD (UINT64_C(0x1) << 17)
+
+/**
+ * Trap Performance Monitor Sampling.
+ * Traps access to Statistical Profiling control registers from EL1 when
+ * the Statistical Profiling Extension (SPE) is implemented.
+ */
+#define MDCR_EL2_TPMS (UINT64_C(0x1) << 14)
+
+/**
+ * Controls the owning translation regime and access to Profiling Buffer control
+ * registers from EL1. Depends on whether SPE is implemented.
+ */
+#define MDCR_EL2_E2PB (UINT64_C(0x3) << 12)
+
+/**
+ * Controls traps for Debug ROM.
+ */
+#define MDCR_EL2_TDRA (UINT64_C(0x1) << 11)
+
+/**
+ * Controls traps for debug OS-Related Register accesses when DoubleLock is
+ * implemented.
+ */
+#define MDCR_EL2_TDOSA (UINT64_C(0x1) << 10)
+
+/**
+ * Controls traps for remaining Debug Registers not trapped by TDRA and TDOSA.
+ */
+#define MDCR_EL2_TDA (UINT64_C(0x1) << 9)
+
+/**
+ * Controls traps for all debug exceptions (e.g., breakpoints).
+ */
+#define MDCR_EL2_TDE (UINT64_C(0x1) << 8)
+
+/**
+ * Controls traps for all performance monitor register accesses other than
+ * PMCR_EL0.
+ */
+#define MDCR_EL2_TPM (UINT64_C(0x1) << 6)
+
+/**
+ * Controls traps for performance monitor register PMCR_EL0.
+ */
+#define MDCR_EL2_TPMCR (UINT64_C(0x1) << 5)
+
+/**
+ * Defines the number of event counters that are accessible from various
+ * exception levels, if permitted. Dependant on whether PMUv3
+ * is implemented.
+ */
+#define MDCR_EL2_HPMN (UINT64_C(0x1f) << 0)
+
+/**
+ * System register are identified by op0, op2, op1, crn, crm. The ISS encoding
+ * includes also rt and direction. Exclude them, @see D13.2.37 (D13-2977).
+ */
+#define ISS_SYSREG_MASK \
+ (((UINT64_C(0x1) << 22) - UINT64_C(0x1)) & /* Select the ISS bits */ \
+ ~(UINT64_C(0x1f) << 5) & /* exclude rt */ \
+ ~UINT64_C(0x1) /* exclude direction */)
+
+#define GET_ISS_SYSREG(esr) (ISS_SYSREG_MASK & (esr))
+
+/**
+ * Op0 from the ISS encoding in the ESR.
+ */
+#define ISS_OP0_MASK UINT64_C(0x300000)
+#define ISS_OP0_SHIFT 20
+#define GET_ISS_OP0(esr) ((ISS_OP0_MASK & (esr)) >> ISS_OP0_SHIFT)
+
+/**
+ * Op1 from the ISS encoding in the ESR.
+ */
+#define ISS_OP1_MASK UINT64_C(0x1c000)
+#define ISS_OP1_SHIFT 14
+#define GET_ISS_OP1(esr) ((ISS_OP1_MASK & (esr)) >> ISS_OP1_SHIFT)
+
+/**
+ * Op2 from the ISS encoding in the ESR.
+ */
+#define ISS_OP2_MASK UINT64_C(0xe0000)
+#define ISS_OP2_SHIFT 17
+#define GET_ISS_OP2(esr) ((ISS_OP2_MASK & (esr)) >> ISS_OP2_SHIFT)
+
+/**
+ * CRn from the ISS encoding in the ESR.
+ */
+#define ISS_CRN_MASK UINT64_C(0x3c00)
+#define ISS_CRN_SHIFT 10
+#define GET_ISS_CRN(esr) ((ISS_CRN_MASK & (esr)) >> ISS_CRN_SHIFT)
+
+/**
+ * CRm from the ISS encoding in the ESR.
+ */
+#define ISS_CRM_MASK UINT64_C(0x1e)
+#define ISS_CRM_SHIFT 1
+#define GET_ISS_CRM(esr) ((ISS_CRM_MASK & (esr)) >> ISS_CRM_SHIFT)
+
+/**
+ * Rt, which identifies the general purpose register used for the operation.
+ */
+#define ISS_RT_MASK UINT64_C(0x3e0)
+#define ISS_RT_SHIFT 5
+#define GET_ISS_RT(esr) ((ISS_RT_MASK & (esr)) >> ISS_RT_SHIFT)
+
+/**
+ * Direction (i.e., read (1) or write (0), is the first bit in the ISS/ESR.
+ */
+#define ISS_DIRECTION_MASK UINT64_C(0x1)
+
+/**
+ * Gets the direction of the system register access, read (1) or write (0).
+ */
+#define GET_ISS_DIRECTION(esr) (ISS_DIRECTION_MASK & (esr))
+
+/**
+ * True if the ISS encoded in the esr indicates a read of the system register.
+ */
+#define ISS_IS_READ(esr) (ISS_DIRECTION_MASK & (esr))
+
+/**
+ * Returns the ISS encoding given the various instruction encoding parameters.
+ */
+#define GET_ISS_ENCODING(op0, op1, crn, crm, op2) \
+ ((op0) << ISS_OP0_SHIFT | (op2) << ISS_OP2_SHIFT | \
+ (op1) << ISS_OP1_SHIFT | (crn) << ISS_CRN_SHIFT | \
+ (crm) << ISS_CRM_SHIFT)
+
+#define PMCR_EL0_N_MASK UINT64_C(0xf800)
+#define PMCR_EL0_N_SHIFT 11
+#define GET_PMCR_EL0_N(pmcr) ((PMCR_EL0_N_MASK & (pmcr)) >> PMCR_EL0_N_SHIFT)
+
+/*
+ * Define various configurations bits for the Hypervisor Configuration Register,
+ * HCR_EL2. See Arm Architecture Reference Manual, D13.2.46.
+ */
+
+/**
+ * Trap ID group 5 (Armv8.5-MemTag related).
+ */
+#define HCR_EL2_TID5 (UINT64_C(0x1) << 58)
+
+/**
+ * Trap TLB maintenance instructions that operate on the Outer Shareable domain.
+ */
+#define HCR_EL2_TTLBOS (UINT64_C(0x1) << 55)
+
+/**
+ * Trap TLB maintenance instructions that operate on the Inner Shareable domain.
+ */
+#define HCR_EL2_TTLBIS (UINT64_C(0x1) << 54)
+
+/**
+ * Trap cache maintenance instructions that operate to the Point of Unification.
+ */
+#define HCR_EL2_TOCU (UINT64_C(0x1) << 52)
+
+/**
+ * Trap ICIALLUIS/IC IALLUIS cache maintenance instructions.
+ */
+#define HCR_EL2_TICAB (UINT64_C(0x1) << 52)
+
+/**
+ * Trap ID group 4.
+ */
+#define HCR_EL2_TID4 (UINT64_C(0x1) << 49)
+
+/**
+ * When set *disables* traps on Pointer Authentication related instruction
+ * execution.
+ */
+#define HCR_EL2_API (UINT64_C(0x1) << 41)
+
+/**
+ * When set *disables* traps on access to Pointer Authentication's "key"
+ * registers.
+ */
+#define HCR_EL2_APK (UINT64_C(0x1) << 40)
+
+/**
+ * Trap Error record accesses when RAS is implemented.
+ */
+#define HCR_EL2_TERR (UINT64_C(0x1) << 36)
+
+/**
+ * Trap LOR register accesses when LORegions is implemented.
+ */
+#define HCR_EL2_TLOR (UINT64_C(0x1) << 35)
+
+/**
+ * Stage 2 Instruction access cacheability disable.
+ * When set, forces all stage 2 translations for instruction accesses to normal
+ * memory to be non-cacheable.
+ */
+#define HCR_EL2_ID (UINT64_C(0x1) << 33)
+
+/**
+ * Stage 2 Data access cacheability disable.
+ * When set, forces all stage 2 translations for data accesses to normal memory
+ * to be non-cacheable.
+ */
+#define HCR_EL2_CD (UINT64_C(0x1) << 32)
+
+/**
+ * Execution state control for lower exception levels.
+ * When set, the execution state for EL1 is AArch64.
+ */
+#define HCR_EL2_RW (UINT64_C(0x1) << 31)
+
+/**
+ * Trap reads of Virtual Memory controls.
+ */
+#define HCR_EL2_TRVM (UINT64_C(0x1) << 30)
+
+/**
+ * Trap writes of Virtual Memory controls.
+ */
+#define HCR_EL2_TVM (UINT64_C(0x1) << 26)
+
+/**
+ * Trap TLB maintenance instructions.
+ */
+#define HCR_EL2_TTLB (UINT64_C(0x1) << 25)
+
+/**
+ * Trap cache maintenance instructions.
+ */
+#define HCR_EL2_TPU (UINT64_C(0x1) << 24)
+
+/**
+ * Trap data or unified cache maintenance instructions.
+ */
+#define HCR_EL2_TPCP (UINT64_C(0x1) << 23)
+
+/**
+ * Trap data or unified cache maintenance instructions that operate by Set/Way.
+ */
+#define HCR_EL2_TSW (UINT64_C(0x1) << 22)
+
+/**
+ * Trap Auxiliary Control Registers.
+ * When set, traps ACTLR_EL1 accesses to EL2.
+ */
+#define HCR_EL2_TACR (UINT64_C(0x1) << 21)
+
+/**
+ * Trap implementation defined functionality.
+ * When set, traps EL1 accesses to implementation defined encodings to EL2.
+ */
+#define HCR_EL2_TIDCP (UINT64_C(0x1) << 20)
+
+/**
+ * Trap SMC instructions.
+ * When set, traps EL1 execution of SMC instructions to EL2.
+ */
+#define HCR_EL2_TSC (UINT64_C(0x1) << 19)
+
+/**
+ * Trap ID group 3.
+ */
+#define HCR_EL2_TID3 (UINT64_C(0x1) << 18)
+
+/**
+ * Trap ID group 2.
+ */
+#define HCR_EL2_TID2 (UINT64_C(0x1) << 17)
+
+/**
+ * Trap ID group 1.
+ */
+#define HCR_EL2_TID1 (UINT64_C(0x1) << 16)
+
+/**
+ * Trap ID group 0.
+ */
+#define HCR_EL2_TID0 (UINT64_C(0x1) << 15)
+
+/**
+ * Traps EL0 and EL1 execution of Wait for Event (WFE) instructions to EL2.
+ */
+#define HCR_EL2_TWE (UINT64_C(0x1) << 14)
+
+/**
+ * Trap WFI instructions.
+ * When set, traps EL0 and EL1 execution of WFI instructions to EL2.
+ */
+#define HCR_EL2_TWI (UINT64_C(0x1) << 13)
+
+/**
+ * Barrier Shareability upgrade (2 bits).
+ * When set to 0b01, the minimum shareability domain to barrier instructions
+ * is inner shareable.
+ */
+#define HCR_EL2_BSU_INNER_SHAREABLE (UINT64_C(0x1) << 10)
+
+/**
+ * Force Broadcast.
+ * When set certain instructions related to invalidating the TLB are broadcast
+ * within the Inner Shareable domain.
+ */
+#define HCR_EL2_FB (UINT64_C(0x1) << 9)
+
+/**
+ * Virtual IRQ Interrupt.
+ * When set indicates that there is a virtual IRQ pending.
+ */
+#define HCR_EL2_VI (UINT64_C(0x1) << 7)
+
+/**
+ * Physical SError Routing.
+ * When set, physical SError interrupts are taken to EL2, unless routed to EL3.
+ */
+#define HCR_EL2_AMO (UINT64_C(0x1) << 5)
+
+/**
+ * Physical IRQ Routing.
+ * When set, physical IRQ interrupts are taken to EL2, unless routed to EL3.
+ */
+#define HCR_EL2_IMO (UINT64_C(0x1) << 4)
+
+/**
+ * Physical FIQ Routing.
+ * When set, physical FIQ interrupts are taken to EL2, unless routed to EL3.
+ */
+#define HCR_EL2_FMO (UINT64_C(0x1) << 3)
+
+/**
+ * Protected Table Walk.
+ * When set a translation table access made as part of a stage 1 translation
+ * table walk is subject to a stage 2 translation. The memory access generates
+ * a stage 2 permission fault.
+ */
+#define HCR_EL2_PTW (UINT64_C(0x1) << 2)
+
+/**
+ * Set/Way Invalidation Override.
+ * Causes EL1 execution of the data cache invalidate by set/way instructions to
+ * perform a data cache clean and invalidate by set/way.
+ */
+#define HCR_EL2_SWIO (UINT64_C(0x1) << 1)
+
+/**
+ * Virtualization enable.
+ * When set EL1 and EL0 stage 2 address translation is enabled.
+ */
+#define HCR_EL2_VM (UINT64_C(0x1) << 0)
+
+/**
+ * Trap system register accesses to trace registers.
+ * Traps accesses to ETM registers using the register interface. Does not trap
+ * on accesses through the memory-mapped interface.
+ */
+#define CPTR_EL2_TTA (UINT64_C(0x1) << 28)
+
+/*
+ * Process State Bit definitions.
+ *
+ * These apply to the PSTATE, as well as registers that contain PSTATE fields,
+ * e.g., SPSR_EL1.
+ */
+
+/**
+ * Debug exception mask bit.
+ */
+#define PSR_D (UINT64_C(1) << 9)
+
+/**
+ * Asynchronous SError interrupt mask bit.
+ */
+#define PSR_A (UINT64_C(1) << 8)
+
+/**
+ * Asynchronous IRQ interrupt mask bit.
+ */
+#define PSR_I (UINT64_C(1) << 7)
+
+/**
+ * Asynchronous FIQ interrupt mask bit.
+ */
+#define PSR_F (UINT64_C(1) << 6)
+
+/**
+ * AArch32 State bit.
+ */
+#define PSR_ARCH_MODE_32 (UINT64_C(1) << 4)
+
+/**
+ * PE Mode bit mask.
+ */
+#define PSR_PE_MODE_MASK UINT64_C(0xf)
+
+/**
+ * PE Mode: EL0t.
+ */
+#define PSR_PE_MODE_EL0T UINT64_C(0x0)
+
+/**
+ * PE Mode: EL1h.
+ */
+#define PSR_PE_MODE_EL1H UINT64_C(0x5)
+
+uintreg_t get_hcr_el2_value(spci_vm_id_t vm_id);
+
+uintreg_t get_mdcr_el2_value(void);
+
+uintreg_t get_cptr_el2_value(void);
diff --git a/src/arch/aarch64/hypervisor/vm.c b/src/arch/aarch64/hypervisor/vm.c
new file mode 100644
index 0000000..619ee65
--- /dev/null
+++ b/src/arch/aarch64/hypervisor/vm.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm.h"
+
+#include "hypervisor/feature_id.h"
+
+void arch_vm_features_set(struct vm *vm)
+{
+ /* Features to trap for all VMs. */
+
+ /*
+ * It is not safe to enable this yet, in part, because the feature's
+ * registers are not context switched in Hafnium.
+ */
+ vm->arch.trapped_features |= HF_FEATURE_LOR;
+
+ vm->arch.trapped_features |= HF_FEATURE_SPE;
+
+ vm->arch.trapped_features |= HF_FEATURE_TRACE;
+
+ vm->arch.trapped_features |= HF_FEATURE_DEBUG;
+
+ if (vm->id != HF_PRIMARY_VM_ID) {
+ /* Features to trap only for the secondary VMs. */
+
+ vm->arch.trapped_features |= HF_FEATURE_PERFMON;
+
+ /*
+ * TODO(b/132395845): Access to RAS registers is not trapped at
+ * the moment for the primary VM, only for the secondaries. RAS
+ * register access isn't needed now, but it might be
+ * required for debugging. When Hafnium introduces debug vs
+ * release builds, trap accesses for primary VMs in release
+ * builds, but do not trap them in debug builds.
+ */
+ vm->arch.trapped_features |= HF_FEATURE_RAS;
+
+ /*
+ * The PAuth mechanism holds state in the key registers. Only
+ * the primary VM is allowed to use the PAuth functionality for
+ * now. This prevents Hafnium from having to save/restore the
+ * key register on a VM switch.
+ */
+ vm->arch.trapped_features |= HF_FEATURE_PAUTH;
+ }
+}
diff --git a/src/arch/aarch64/inc/hf/arch/barriers.h b/src/arch/aarch64/inc/hf/arch/barriers.h
new file mode 100644
index 0000000..3d61434
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/barriers.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/** AArch64-specific API */
+
+/**
+ * Ensures explicit memory accesses before this point are completed before any
+ * later memory accesses are performed. The instruction argument specifies:
+ * - the shareability domain over which the instruction must operate,
+ * - the accesses for which the instruction operates.
+ */
+#define dmb(arg) \
+ do { \
+ __asm__ volatile("dmb " #arg); \
+ } while (0)
+
+/**
+ * Ensures explicit memory access and management instructions have completed
+ * before continuing. The instruction argument specifies:
+ * - the shareability domain over which the instruction must operate,
+ * - the accesses for which the instruction operates.
+ */
+#define dsb(arg) \
+ do { \
+ __asm__ volatile("dsb " #arg); \
+ } while (0)
+
+/**
+ * Flushes the instruction pipeline so that instructions are fetched from
+ * memory.
+ */
+#define isb() \
+ do { \
+ __asm__ volatile("isb"); \
+ } while (0)
+
+/** Platform-agnostic API */
+
+/**
+ * Ensures all explicit memory accesses before this point are completed before
+ * any later memory accesses are performed.
+ */
+#define memory_ordering_barrier() dmb(sy)
+
+/**
+ * Ensures all explicit memory access and management instructions have completed
+ * before continuing.
+ */
+#define data_sync_barrier() dsb(sy)
diff --git a/src/arch/aarch64/inc/hf/arch/spinlock.h b/src/arch/aarch64/inc/hf/arch/spinlock.h
new file mode 100644
index 0000000..3e3d1a0
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/spinlock.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/**
+ * Spinlock implementation using Armv8.0 LDXR/STXR pair and a WFE pause.
+ *
+ * Implementation using C11 atomics also generates a LDXR/STXR pair but no WFE.
+ * Without it we observe that Cortex A72 can easily livelock and not make
+ * forward progress.
+ *
+ * TODO(b/141087046): Forward progress is still not guaranteed as even with WFE
+ * we see that A72 can livelock for extremely tight loops. We should investigate
+ * the guarantees provided by atomic instructions introduced in Armv8.1 LSE.
+ */
+
+#include <stdint.h>
+
+#include "hf/arch/types.h"
+
+struct spinlock {
+ volatile uint32_t v;
+};
+
+#define SPINLOCK_INIT ((struct spinlock){.v = 0})
+
+static inline void sl_lock(struct spinlock *l)
+{
+ register uintreg_t tmp1;
+ register uintreg_t tmp2;
+
+ /*
+ * Acquire the lock with a LDAXR/STXR pair (acquire semantics on the
+ * load instruction). Pause using WFE if the lock is currently taken.
+ * This is NOT guaranteed to make progress.
+ */
+ __asm__ volatile(
+ " mov %w2, #1\n"
+ " sevl\n" /* set event bit */
+ "1: wfe\n" /* wait for event, clear event bit */
+ "2: ldaxr %w1, [%0]\n" /* load lock value */
+ " cbnz %w1, 1b\n" /* if lock taken, goto WFE */
+ " stxr %w1, %w2, [%0]\n" /* try to take lock */
+ " cbnz %w1, 2b\n" /* loop if unsuccessful */
+ : "+r"(l), "=&r"(tmp1), "=&r"(tmp2)
+ :
+ : "cc");
+}
+
+static inline void sl_unlock(struct spinlock *l)
+{
+ /*
+ * Store zero to lock's value with release semantics. This triggers an
+ * event which wakes up other threads waiting on a lock (no SEV needed).
+ */
+ __asm__ volatile("stlr wzr, [%0]" : "+r"(l)::"cc");
+}
diff --git a/src/arch/aarch64/inc/hf/arch/types.h b/src/arch/aarch64/inc/hf/arch/types.h
new file mode 100644
index 0000000..667fc20
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/types.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdalign.h>
+#include <stdint.h>
+
+#include "hf/spci.h"
+#include "hf/static_assert.h"
+
+#define PAGE_BITS 12
+#define PAGE_LEVEL_BITS 9
+#define STACK_ALIGN 16
+#define FLOAT_REG_BYTES 16
+#define NUM_GP_REGS 31
+
+/** The type of a page table entry (PTE). */
+typedef uint64_t pte_t;
+
+/** Integer type large enough to hold a physical address. */
+typedef uintptr_t uintpaddr_t;
+
+/** Integer type large enough to hold a virtual address. */
+typedef uintptr_t uintvaddr_t;
+
+/** The integer type corresponding to the native register size. */
+typedef uint64_t uintreg_t;
+
+/** The ID of a physical or virtual CPU. */
+typedef uint64_t cpu_id_t;
+
+/** A bitset for AArch64 CPU features. */
+typedef uint64_t arch_features_t;
+
+/**
+ * The struct for storing a floating point register.
+ *
+ * 2 64-bit integers used to avoid need for FP support at this level.
+ */
+struct float_reg {
+ alignas(FLOAT_REG_BYTES) uint64_t low;
+ uint64_t high;
+};
+
+static_assert(sizeof(struct float_reg) == FLOAT_REG_BYTES,
+ "Ensure float register type is 128 bits.");
+
+/** Arch-specific information about a VM. */
+struct arch_vm {
+ /**
+ * The index of the last vCPU of this VM which ran on each pCPU. Each
+ * element of this array should only be read or written by code running
+ * on that CPU, which avoids contention and so no lock is needed to
+ * access this field.
+ */
+ spci_vcpu_index_t last_vcpu_on_cpu[MAX_CPUS];
+ arch_features_t trapped_features;
+
+ /*
+ * Masks for feature registers trappable by HCR_EL2.TID3.
+ */
+ struct {
+ uintreg_t id_aa64mmfr1_el1;
+ uintreg_t id_aa64pfr0_el1;
+ uintreg_t id_aa64dfr0_el1;
+ uintreg_t id_aa64isar1_el1;
+ } tid3_masks;
+};
+
+/** Type to represent the register state of a vCPU. */
+struct arch_regs {
+ /* General purpose registers. */
+ uintreg_t r[NUM_GP_REGS];
+ uintreg_t pc;
+ uintreg_t spsr;
+
+ /*
+ * System registers.
+ * NOTE: Ordering is important. If adding to or reordering registers
+ * below, make sure to update src/arch/aarch64/hypervisor/exceptions.S.
+ */
+ struct {
+ uintreg_t vmpidr_el2;
+ uintreg_t csselr_el1;
+ uintreg_t sctlr_el1;
+ uintreg_t actlr_el1;
+ uintreg_t cpacr_el1;
+ uintreg_t ttbr0_el1;
+ uintreg_t ttbr1_el1;
+ uintreg_t tcr_el1;
+ uintreg_t esr_el1;
+ uintreg_t afsr0_el1;
+ uintreg_t afsr1_el1;
+ uintreg_t far_el1;
+ uintreg_t mair_el1;
+ uintreg_t vbar_el1;
+ uintreg_t contextidr_el1;
+ uintreg_t tpidr_el0;
+ uintreg_t tpidrro_el0;
+ uintreg_t tpidr_el1;
+ uintreg_t amair_el1;
+ uintreg_t cntkctl_el1;
+ uintreg_t sp_el0;
+ uintreg_t sp_el1;
+ uintreg_t elr_el1;
+ uintreg_t spsr_el1;
+ uintreg_t par_el1;
+ uintreg_t hcr_el2;
+ uintreg_t cnthctl_el2;
+ uintreg_t vttbr_el2;
+ uintreg_t mdcr_el2;
+ uintreg_t mdscr_el1;
+ uintreg_t pmccfiltr_el0;
+ uintreg_t pmcr_el0;
+ uintreg_t pmcntenset_el0;
+ uintreg_t pmintenset_el1;
+ } lazy;
+
+ /* Floating point registers. */
+ struct float_reg fp[32];
+ uintreg_t fpsr;
+ uintreg_t fpcr;
+
+#if GIC_VERSION == 3 || GIC_VERSION == 4
+ struct {
+ uintreg_t ich_hcr_el2;
+ uintreg_t icc_sre_el2;
+ } gic;
+#endif
+
+ /*
+ * Peripheral registers, handled separately from other system registers.
+ */
+ struct {
+ uintreg_t cntv_cval_el0;
+ uintreg_t cntv_ctl_el0;
+ } peripherals;
+};
diff --git a/src/arch/aarch64/inc/hf/arch/vm/events.h b/src/arch/aarch64/inc/hf/arch/vm/events.h
new file mode 100644
index 0000000..aff4d77
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/vm/events.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+void event_wait(void);
+void event_send_local(void);
diff --git a/src/arch/aarch64/inc/hf/arch/vm/interrupts.h b/src/arch/aarch64/inc/hf/arch/vm/interrupts.h
new file mode 100644
index 0000000..33087b6
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/vm/interrupts.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+
+void exception_setup(void (*irq)(void), bool (*exception)(void));
+void interrupt_wait(void);
diff --git a/src/arch/aarch64/inc/hf/arch/vm/interrupts_gicv3.h b/src/arch/aarch64/inc/hf/arch/vm/interrupts_gicv3.h
new file mode 100644
index 0000000..b98c304
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/vm/interrupts_gicv3.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "hf/io.h"
+
+#if GIC_VERSION != 3 && GIC_VERSION != 4
+#error This header should only be included for GICv3 or v4.
+#endif
+
+/* Keep macro alignment */
+/* clang-format off */
+
+#define SGI_BASE (GICR_BASE + 0x10000)
+
+#define GICD_CTLR IO32_C(GICD_BASE + 0x0000)
+#define GICD_ISENABLER IO32_ARRAY_C(GICD_BASE + 0x0100, 32)
+#define GICD_ICENABLER IO32_ARRAY_C(GICD_BASE + 0x0180, 32)
+#define GICD_ISPENDR IO32_ARRAY_C(GICD_BASE + 0x0200, 32)
+#define GICD_ICPENDR IO32_ARRAY_C(GICD_BASE + 0x0280, 32)
+#define GICD_ISACTIVER IO32_ARRAY_C(GICD_BASE + 0x0300, 32)
+#define GICD_ICACTIVER IO32_ARRAY_C(GICD_BASE + 0x0380, 32)
+#define GICD_IPRIORITYR IO8_ARRAY_C(GICD_BASE + 0x0400, 1020)
+#define GICD_ITARGETSR IO8_ARRAY_C(GICD_BASE + 0x0800, 1020)
+#define GICD_ICFGR IO32_ARRAY_C(GICD_BASE + 0x0c00, 64)
+#define GICR_WAKER IO32_C(GICR_BASE + 0x0014)
+#define GICR_IGROUPR0 IO32_C(SGI_BASE + 0x0080)
+#define GICR_ISENABLER0 IO32_C(SGI_BASE + 0x0100)
+#define GICR_ICENABLER0 IO32_C(SGI_BASE + 0x0180)
+#define GICR_ISPENDR0 IO32_C(SGI_BASE + 0x0200)
+#define GICR_ICPENDR0 IO32_C(SGI_BASE + 0x0280)
+#define GICR_ISACTIVER0 IO32_C(SGI_BASE + 0x0300)
+#define GICR_ICFGR IO32_ARRAY_C(SGI_BASE + 0x0c00, 32)
+
+/* clang-format on */
+
+void interrupt_gic_setup(void);
+void interrupt_enable(uint32_t intid, bool enable);
+void interrupt_enable_all(bool enable);
+void interrupt_set_priority_mask(uint8_t min_priority);
+void interrupt_set_priority(uint32_t intid, uint8_t priority);
+void interrupt_set_edge_triggered(uint32_t intid, bool edge_triggered);
+void interrupt_send_sgi(uint8_t intid, bool irm, uint8_t affinity3,
+ uint8_t affinity2, uint8_t affinity1,
+ uint16_t target_list);
+uint32_t interrupt_get_and_acknowledge(void);
+void interrupt_end(uint32_t intid);
diff --git a/src/arch/aarch64/inc/hf/arch/vm/mm.h b/src/arch/aarch64/inc/hf/arch/vm/mm.h
new file mode 100644
index 0000000..d212fa5
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/vm/mm.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "hf/mm.h"
+
+bool arch_vm_mm_init(void);
+void arch_vm_mm_enable(paddr_t table);
diff --git a/src/arch/aarch64/inc/hf/arch/vm/power_mgmt.h b/src/arch/aarch64/inc/hf/arch/vm/power_mgmt.h
new file mode 100644
index 0000000..41ebe49
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/vm/power_mgmt.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdnoreturn.h>
+
+#include "hf/arch/types.h"
+
+enum power_status {
+ POWER_STATUS_ON,
+ POWER_STATUS_OFF,
+ POWER_STATUS_ON_PENDING,
+};
+
+/**
+ * Holds temporary state used to set up the environment on which CPUs will
+ * start executing.
+ *
+ * vm_cpu_entry() depends on the layout of this struct.
+ */
+struct arch_cpu_start_state {
+ uintptr_t initial_sp;
+ void (*entry)(uintreg_t arg);
+ uintreg_t arg;
+};
+
+bool arch_cpu_start(uintptr_t id, struct arch_cpu_start_state *s);
+
+noreturn void arch_cpu_stop(void);
+enum power_status arch_cpu_status(cpu_id_t cpu_id);
+
+noreturn void arch_power_off(void);
diff --git a/src/arch/aarch64/inc/hf/arch/vm/registers.h b/src/arch/aarch64/inc/hf/arch/vm/registers.h
new file mode 100644
index 0000000..f6305af
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/vm/registers.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+
+void fill_fp_registers(double value);
+bool check_fp_register(double value);
diff --git a/src/arch/aarch64/inc/hf/arch/vm/state.h b/src/arch/aarch64/inc/hf/arch/vm/state.h
new file mode 100644
index 0000000..8f2dbe2
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/vm/state.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+void per_cpu_ptr_set(uintptr_t v);
+uintptr_t per_cpu_ptr_get(void);
diff --git a/src/arch/aarch64/inc/hf/arch/vm/timer.h b/src/arch/aarch64/inc/hf/arch/vm/timer.h
new file mode 100644
index 0000000..d1c07a3
--- /dev/null
+++ b/src/arch/aarch64/inc/hf/arch/vm/timer.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include "../msr.h"
+
+static inline void timer_set(uint32_t ticks)
+{
+ write_msr(CNTV_TVAL_EL0, ticks);
+}
+
+static inline void timer_start(void)
+{
+ write_msr(CNTV_CTL_EL0, 0x00000001);
+}
diff --git a/src/arch/aarch64/irq.c b/src/arch/aarch64/irq.c
new file mode 100644
index 0000000..4114d76
--- /dev/null
+++ b/src/arch/aarch64/irq.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/irq.h"
+
+#include "msr.h"
+
+void arch_irq_disable(void)
+{
+ __asm__ volatile("msr DAIFSet, #0xf");
+}
+
+void arch_irq_enable(void)
+{
+ __asm__ volatile("msr DAIFClr, #0xf");
+}
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
new file mode 100644
index 0000000..f856ac4
--- /dev/null
+++ b/src/arch/aarch64/mm.c
@@ -0,0 +1,700 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/mm.h"
+
+#include "hf/arch/barriers.h"
+#include "hf/arch/cpu.h"
+
+#include "hf/dlog.h"
+
+#include "msr.h"
+
+/* Keep macro alignment */
+/* clang-format off */
+
+#define NON_SHAREABLE UINT64_C(0)
+#define OUTER_SHAREABLE UINT64_C(2)
+#define INNER_SHAREABLE UINT64_C(3)
+
+#define PTE_VALID (UINT64_C(1) << 0)
+#define PTE_LEVEL0_BLOCK (UINT64_C(1) << 1)
+#define PTE_TABLE (UINT64_C(1) << 1)
+
+#define STAGE1_XN (UINT64_C(1) << 54)
+#define STAGE1_PXN (UINT64_C(1) << 53)
+#define STAGE1_CONTIGUOUS (UINT64_C(1) << 52)
+#define STAGE1_DBM (UINT64_C(1) << 51)
+#define STAGE1_NG (UINT64_C(1) << 11)
+#define STAGE1_AF (UINT64_C(1) << 10)
+#define STAGE1_SH(x) ((x) << 8)
+#define STAGE1_AP2 (UINT64_C(1) << 7)
+#define STAGE1_AP1 (UINT64_C(1) << 6)
+#define STAGE1_AP(x) ((x) << 6)
+#define STAGE1_NS (UINT64_C(1) << 5)
+#define STAGE1_ATTRINDX(x) ((x) << 2)
+
+#define STAGE1_READONLY UINT64_C(2)
+#define STAGE1_READWRITE UINT64_C(0)
+
+#define STAGE1_DEVICEINDX UINT64_C(0)
+#define STAGE1_NORMALINDX UINT64_C(1)
+
+#define STAGE2_XN(x) ((x) << 53)
+#define STAGE2_CONTIGUOUS (UINT64_C(1) << 52)
+#define STAGE2_DBM (UINT64_C(1) << 51)
+#define STAGE2_AF (UINT64_C(1) << 10)
+#define STAGE2_SH(x) ((x) << 8)
+#define STAGE2_S2AP(x) ((x) << 6)
+
+#define STAGE2_EXECUTE_ALL UINT64_C(0)
+#define STAGE2_EXECUTE_EL0 UINT64_C(1)
+#define STAGE2_EXECUTE_NONE UINT64_C(2)
+#define STAGE2_EXECUTE_EL1 UINT64_C(3)
+#define STAGE2_EXECUTE_MASK UINT64_C(3)
+
+/* Table attributes only apply to stage 1 translations. */
+#define TABLE_NSTABLE (UINT64_C(1) << 63)
+#define TABLE_APTABLE1 (UINT64_C(1) << 62)
+#define TABLE_APTABLE0 (UINT64_C(1) << 61)
+#define TABLE_XNTABLE (UINT64_C(1) << 60)
+#define TABLE_PXNTABLE (UINT64_C(1) << 59)
+
+/* The following are stage-2 software defined attributes. */
+#define STAGE2_SW_OWNED (UINT64_C(1) << 55)
+#define STAGE2_SW_EXCLUSIVE (UINT64_C(1) << 56)
+
+/* The following are stage-2 memory attributes for normal memory. */
+#define STAGE2_DEVICE_MEMORY UINT64_C(0)
+#define STAGE2_NONCACHEABLE UINT64_C(1)
+#define STAGE2_WRITETHROUGH UINT64_C(2)
+#define STAGE2_WRITEBACK UINT64_C(3)
+
+/* The following are stage-2 memory attributes for device memory. */
+#define STAGE2_MEMATTR_DEVICE_nGnRnE UINT64_C(0)
+#define STAGE2_MEMATTR_DEVICE_nGnRE UINT64_C(1)
+#define STAGE2_MEMATTR_DEVICE_nGRE UINT64_C(2)
+#define STAGE2_MEMATTR_DEVICE_GRE UINT64_C(3)
+
+/* The following construct and destruct stage-2 memory attributes. */
+#define STAGE2_MEMATTR(outer, inner) ((((outer) << 2) | (inner)) << 2)
+#define STAGE2_MEMATTR_TYPE_MASK UINT64_C(3 << 4)
+
+#define STAGE2_ACCESS_READ UINT64_C(1)
+#define STAGE2_ACCESS_WRITE UINT64_C(2)
+
+#define CACHE_WORD_SIZE 4
+
+/**
+ * Threshold number of pages in TLB to invalidate after which we invalidate all
+ * TLB entries on a given level.
+ * Constant is the number of pointers per page table entry, also used by Linux.
+ */
+#define MAX_TLBI_OPS MM_PTE_PER_PAGE
+
+/* clang-format on */
+
+#define tlbi(op) \
+ do { \
+ __asm__ volatile("tlbi " #op); \
+ } while (0)
+#define tlbi_reg(op, reg) \
+ do { \
+ __asm__ __volatile__("tlbi " #op ", %0" : : "r"(reg)); \
+ } while (0)
+
+/** Mask for the address bits of the pte. */
+#define PTE_ADDR_MASK \
+ (((UINT64_C(1) << 48) - 1) & ~((UINT64_C(1) << PAGE_BITS) - 1))
+
+/** Mask for the attribute bits of the pte. */
+#define PTE_ATTR_MASK (~(PTE_ADDR_MASK | (UINT64_C(1) << 1)))
+
+/**
+ * Configuration information for memory management. Order is important as this
+ * is read from assembly.
+ *
+ * It must only be written to from `arch_mm_one_time_init()` to avoid cache and
+ * synchronization problems.
+ */
+struct arch_mm_config {
+ uintreg_t ttbr0_el2;
+ uintreg_t vtcr_el2;
+ uintreg_t mair_el2;
+ uintreg_t tcr_el2;
+ uintreg_t sctlr_el2;
+} arch_mm_config;
+
+static uint8_t mm_s2_max_level;
+static uint8_t mm_s2_root_table_count;
+
+/**
+ * Returns the encoding of a page table entry that isn't present.
+ */
+pte_t arch_mm_absent_pte(uint8_t level)
+{
+ (void)level;
+ return 0;
+}
+
+/**
+ * Converts a physical address to a table PTE.
+ *
+ * The spec says that 'Table descriptors for stage 2 translations do not
+ * include any attribute field', so we don't take any attributes as arguments.
+ */
+pte_t arch_mm_table_pte(uint8_t level, paddr_t pa)
+{
+ /* This is the same for all levels on aarch64. */
+ (void)level;
+ return pa_addr(pa) | PTE_TABLE | PTE_VALID;
+}
+
+/**
+ * Converts a physical address to a block PTE.
+ *
+ * The level must allow block entries.
+ */
+pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs)
+{
+ pte_t pte = pa_addr(pa) | attrs;
+
+ if (level == 0) {
+ /* A level 0 'block' is actually a page entry. */
+ pte |= PTE_LEVEL0_BLOCK;
+ }
+ return pte;
+}
+
+/**
+ * Specifies whether block mappings are acceptable at the given level.
+ *
+ * Level 0 must allow block entries.
+ */
+bool arch_mm_is_block_allowed(uint8_t level)
+{
+ return level <= 2;
+}
+
+/**
+ * Determines if the given pte is present, i.e., if it is valid or it is invalid
+ * but still holds state about the memory so needs to be present in the table.
+ */
+bool arch_mm_pte_is_present(pte_t pte, uint8_t level)
+{
+ return arch_mm_pte_is_valid(pte, level) || (pte & STAGE2_SW_OWNED) != 0;
+}
+
+/**
+ * Determines if the given pte is valid, i.e., if it points to another table,
+ * to a page, or a block of pages that can be accessed.
+ */
+bool arch_mm_pte_is_valid(pte_t pte, uint8_t level)
+{
+ (void)level;
+ return (pte & PTE_VALID) != 0;
+}
+
+/**
+ * Determines if the given pte references a block of pages.
+ */
+bool arch_mm_pte_is_block(pte_t pte, uint8_t level)
+{
+ /* We count pages at level 0 as blocks. */
+ return arch_mm_is_block_allowed(level) &&
+ (level == 0 ? (pte & PTE_LEVEL0_BLOCK) != 0
+ : arch_mm_pte_is_present(pte, level) &&
+ !arch_mm_pte_is_table(pte, level));
+}
+
+/**
+ * Determines if the given pte references another table.
+ */
+bool arch_mm_pte_is_table(pte_t pte, uint8_t level)
+{
+ return level != 0 && arch_mm_pte_is_valid(pte, level) &&
+ (pte & PTE_TABLE) != 0;
+}
+
+static uint64_t pte_addr(pte_t pte)
+{
+ return pte & PTE_ADDR_MASK;
+}
+
+/**
+ * Clears the given physical address, i.e., clears the bits of the address that
+ * are not used in the pte.
+ */
+paddr_t arch_mm_clear_pa(paddr_t pa)
+{
+ return pa_init(pte_addr(pa_addr(pa)));
+}
+
+/**
+ * Extracts the physical address of the block referred to by the given page
+ * table entry.
+ */
+paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level)
+{
+ (void)level;
+ return pa_init(pte_addr(pte));
+}
+
+/**
+ * Extracts the physical address of the page table referred to by the given page
+ * table entry.
+ */
+paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level)
+{
+ (void)level;
+ return pa_init(pte_addr(pte));
+}
+
+/**
+ * Extracts the architecture-specific attributes applies to the given page table
+ * entry.
+ */
+uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level)
+{
+ (void)level;
+ return pte & PTE_ATTR_MASK;
+}
+
+/**
+ * Invalidates stage-1 TLB entries referring to the given virtual address range.
+ */
+void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end)
+{
+ uintvaddr_t begin = va_addr(va_begin);
+ uintvaddr_t end = va_addr(va_end);
+ uintvaddr_t it;
+
+ /* Sync with page table updates. */
+ dsb(ishst);
+
+ /*
+ * Revisions prior to Armv8.4 do not support invalidating a range of
+ * addresses, which means we have to loop over individual pages. If
+ * there are too many, it is quicker to invalidate all TLB entries.
+ */
+ if ((end - begin) > (MAX_TLBI_OPS * PAGE_SIZE)) {
+ if (VM_TOOLCHAIN == 1) {
+ tlbi(vmalle1is);
+ } else {
+ tlbi(alle2is);
+ }
+ } else {
+ begin >>= 12;
+ end >>= 12;
+ /* Invalidate stage-1 TLB, one page from the range at a time. */
+ for (it = begin; it < end;
+ it += (UINT64_C(1) << (PAGE_BITS - 12))) {
+ if (VM_TOOLCHAIN == 1) {
+ tlbi_reg(vae1is, it);
+ } else {
+ tlbi_reg(vae2is, it);
+ }
+ }
+ }
+
+ /* Sync data accesses with TLB invalidation completion. */
+ dsb(ish);
+
+ /* Sync instruction fetches with TLB invalidation completion. */
+ isb();
+}
+
+/**
+ * Invalidates stage-2 TLB entries referring to the given intermediate physical
+ * address range.
+ */
+void arch_mm_invalidate_stage2_range(ipaddr_t va_begin, ipaddr_t va_end)
+{
+ uintpaddr_t begin = ipa_addr(va_begin);
+ uintpaddr_t end = ipa_addr(va_end);
+ uintpaddr_t it;
+
+ /* TODO: This only applies to the current VMID. */
+
+ /* Sync with page table updates. */
+ dsb(ishst);
+
+ /*
+ * Revisions prior to Armv8.4 do not support invalidating a range of
+ * addresses, which means we have to loop over individual pages. If
+ * there are too many, it is quicker to invalidate all TLB entries.
+ */
+ if ((end - begin) > (MAX_TLBI_OPS * PAGE_SIZE)) {
+ /*
+ * Invalidate all stage-1 and stage-2 entries of the TLB for
+ * the current VMID.
+ */
+ tlbi(vmalls12e1is);
+ } else {
+ begin >>= 12;
+ end >>= 12;
+
+ /*
+ * Invalidate stage-2 TLB, one page from the range at a time.
+ * Note that this has no effect if the CPU has a TLB with
+ * combined stage-1/stage-2 translation.
+ */
+ for (it = begin; it < end;
+ it += (UINT64_C(1) << (PAGE_BITS - 12))) {
+ tlbi_reg(ipas2e1is, it);
+ }
+
+ /*
+ * Ensure completion of stage-2 invalidation in case a page
+ * table walk on another CPU refilled the TLB with a complete
+ * stage-1 + stage-2 walk based on the old stage-2 mapping.
+ */
+ dsb(ish);
+
+ /*
+ * Invalidate all stage-1 TLB entries. If the CPU has a combined
+ * TLB for stage-1 and stage-2, this will invalidate stage-2 as
+ * well.
+ */
+ tlbi(vmalle1is);
+ }
+
+ /* Sync data accesses with TLB invalidation completion. */
+ dsb(ish);
+
+ /* Sync instruction fetches with TLB invalidation completion. */
+ isb();
+}
+
+/**
+ * Returns the smallest cache line size of all the caches for this core.
+ */
+static uint16_t arch_mm_dcache_line_size(void)
+{
+ return CACHE_WORD_SIZE *
+ (UINT16_C(1) << ((read_msr(CTR_EL0) >> 16) & 0xf));
+}
+
+void arch_mm_flush_dcache(void *base, size_t size)
+{
+ /* Clean and invalidate each data cache line in the range. */
+ uint16_t line_size = arch_mm_dcache_line_size();
+ uintptr_t line_begin = (uintptr_t)base & ~(line_size - 1);
+ uintptr_t end = (uintptr_t)base + size;
+
+ while (line_begin < end) {
+ __asm__ volatile("dc civac, %0" : : "r"(line_begin));
+ line_begin += line_size;
+ }
+ dsb(sy);
+}
+
+uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode)
+{
+ uint64_t attrs = 0;
+
+ attrs |= STAGE1_AF | STAGE1_SH(OUTER_SHAREABLE);
+
+ /* Define the execute bits. */
+ if (!(mode & MM_MODE_X)) {
+ attrs |= STAGE1_XN;
+ }
+
+ /* Define the read/write bits. */
+ if (mode & MM_MODE_W) {
+ attrs |= STAGE1_AP(STAGE1_READWRITE);
+ } else {
+ attrs |= STAGE1_AP(STAGE1_READONLY);
+ }
+
+ /* Define the memory attribute bits. */
+ if (mode & MM_MODE_D) {
+ attrs |= STAGE1_ATTRINDX(STAGE1_DEVICEINDX);
+ } else {
+ attrs |= STAGE1_ATTRINDX(STAGE1_NORMALINDX);
+ }
+
+ /* Define the valid bit. */
+ if (!(mode & MM_MODE_INVALID)) {
+ attrs |= PTE_VALID;
+ }
+
+ return attrs;
+}
+
+uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode)
+{
+ uint64_t attrs = 0;
+ uint64_t access = 0;
+
+ /*
+ * Non-shareable is the "neutral" share mode, i.e., the shareability
+ * attribute of stage 1 will determine the actual attribute.
+ */
+ attrs |= STAGE2_AF | STAGE2_SH(NON_SHAREABLE);
+
+ /* Define the read/write bits. */
+ if (mode & MM_MODE_R) {
+ access |= STAGE2_ACCESS_READ;
+ }
+
+ if (mode & MM_MODE_W) {
+ access |= STAGE2_ACCESS_WRITE;
+ }
+
+ attrs |= STAGE2_S2AP(access);
+
+ /* Define the execute bits. */
+ if (mode & MM_MODE_X) {
+ attrs |= STAGE2_XN(STAGE2_EXECUTE_ALL);
+ } else {
+ attrs |= STAGE2_XN(STAGE2_EXECUTE_NONE);
+ }
+
+ /*
+ * Define the memory attribute bits, using the "neutral" values which
+ * give the stage-1 attributes full control of the attributes.
+ */
+ if (mode & MM_MODE_D) {
+ attrs |= STAGE2_MEMATTR(STAGE2_DEVICE_MEMORY,
+ STAGE2_MEMATTR_DEVICE_GRE);
+ } else {
+ attrs |= STAGE2_MEMATTR(STAGE2_WRITEBACK, STAGE2_WRITEBACK);
+ }
+
+ /* Define the ownership bit. */
+ if (!(mode & MM_MODE_UNOWNED)) {
+ attrs |= STAGE2_SW_OWNED;
+ }
+
+ /* Define the exclusivity bit. */
+ if (!(mode & MM_MODE_SHARED)) {
+ attrs |= STAGE2_SW_EXCLUSIVE;
+ }
+
+ /* Define the valid bit. */
+ if (!(mode & MM_MODE_INVALID)) {
+ attrs |= PTE_VALID;
+ }
+
+ return attrs;
+}
+
+uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs)
+{
+ uint32_t mode = 0;
+
+ if (attrs & STAGE2_S2AP(STAGE2_ACCESS_READ)) {
+ mode |= MM_MODE_R;
+ }
+
+ if (attrs & STAGE2_S2AP(STAGE2_ACCESS_WRITE)) {
+ mode |= MM_MODE_W;
+ }
+
+ if ((attrs & STAGE2_XN(STAGE2_EXECUTE_MASK)) ==
+ STAGE2_XN(STAGE2_EXECUTE_ALL)) {
+ mode |= MM_MODE_X;
+ }
+
+ if ((attrs & STAGE2_MEMATTR_TYPE_MASK) == STAGE2_DEVICE_MEMORY) {
+ mode |= MM_MODE_D;
+ }
+
+ if (!(attrs & STAGE2_SW_OWNED)) {
+ mode |= MM_MODE_UNOWNED;
+ }
+
+ if (!(attrs & STAGE2_SW_EXCLUSIVE)) {
+ mode |= MM_MODE_SHARED;
+ }
+
+ if (!(attrs & PTE_VALID)) {
+ mode |= MM_MODE_INVALID;
+ }
+
+ return mode;
+}
+
+uint8_t arch_mm_stage1_max_level(void)
+{
+ /*
+ * For stage 1 we hard-code this to 2 for now so that we can
+ * save one page table level at the expense of limiting the
+ * physical memory to 512GB.
+ */
+ return 2;
+}
+
+uint8_t arch_mm_stage2_max_level(void)
+{
+ return mm_s2_max_level;
+}
+
+uint8_t arch_mm_stage1_root_table_count(void)
+{
+ /* Stage 1 doesn't concatenate tables. */
+ return 1;
+}
+
+uint8_t arch_mm_stage2_root_table_count(void)
+{
+ return mm_s2_root_table_count;
+}
+
+/**
+ * Given the attrs from a table at some level and the attrs from all the blocks
+ * in that table, returns equivalent attrs to use for a block which will replace
+ * the entire table.
+ */
+uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
+ uint64_t block_attrs)
+{
+ /*
+ * Only stage 1 table descriptors have attributes, but the bits are res0
+ * for stage 2 table descriptors so this code is safe for both.
+ */
+ if (table_attrs & TABLE_NSTABLE) {
+ block_attrs |= STAGE1_NS;
+ }
+ if (table_attrs & TABLE_APTABLE1) {
+ block_attrs |= STAGE1_AP2;
+ }
+ if (table_attrs & TABLE_APTABLE0) {
+ block_attrs &= ~STAGE1_AP1;
+ }
+ if (table_attrs & TABLE_XNTABLE) {
+ block_attrs |= STAGE1_XN;
+ }
+ if (table_attrs & TABLE_PXNTABLE) {
+ block_attrs |= STAGE1_PXN;
+ }
+ return block_attrs;
+}
+
+/**
+ * This is called early in initialization without MMU or caches enabled.
+ */
+bool arch_mm_init(paddr_t table)
+{
+ static const int pa_bits_table[16] = {32, 36, 40, 42, 44, 48};
+ uint64_t features = read_msr(id_aa64mmfr0_el1);
+ int pa_bits = pa_bits_table[features & 0xf];
+ int extend_bits;
+ int sl0;
+
+ /* Check that 4KB granules are supported. */
+ if ((features >> 28) & 0xf) {
+ dlog("4KB granules are not supported\n");
+ return false;
+ }
+
+ /* Check the physical address range. */
+ if (!pa_bits) {
+ dlog("Unsupported value of id_aa64mmfr0_el1.PARange: %x\n",
+ features & 0xf);
+ return false;
+ }
+
+ dlog("Supported bits in physical address: %d\n", pa_bits);
+
+ /*
+ * Determine sl0, starting level of the page table, based on the number
+ * of bits. The value is chosen to give the shallowest tree by making
+ * use of concatenated translation tables.
+ *
+ * - 0 => start at level 1
+ * - 1 => start at level 2
+ * - 2 => start at level 3
+ */
+ if (pa_bits >= 44) {
+ sl0 = 2;
+ mm_s2_max_level = 3;
+ } else if (pa_bits >= 35) {
+ sl0 = 1;
+ mm_s2_max_level = 2;
+ } else {
+ sl0 = 0;
+ mm_s2_max_level = 1;
+ }
+
+ /*
+ * Since the shallowest possible tree is used, the maximum number of
+ * concatenated tables must be used. This means if no more than 4 bits
+ * are used from the next level, they are instead used to index into the
+ * concatenated tables.
+ */
+ extend_bits = ((pa_bits - PAGE_BITS) % PAGE_LEVEL_BITS);
+ if (extend_bits > 4) {
+ extend_bits = 0;
+ }
+ mm_s2_root_table_count = 1 << extend_bits;
+
+ dlog("Stage 2 has %d page table levels with %d pages at the root.\n",
+ mm_s2_max_level + 1, mm_s2_root_table_count);
+
+ arch_mm_config = (struct arch_mm_config){
+ .ttbr0_el2 = pa_addr(table),
+
+ .vtcr_el2 =
+ (1U << 31) | /* RES1. */
+ ((features & 0xf) << 16) | /* PS, matching features. */
+ (0 << 14) | /* TG0: 4 KB granule. */
+ (3 << 12) | /* SH0: inner shareable. */
+ (1 << 10) | /* ORGN0: normal, cacheable ... */
+ (1 << 8) | /* IRGN0: normal, cacheable ... */
+ (sl0 << 6) | /* SL0. */
+ ((64 - pa_bits) << 0) | /* T0SZ: dependent on PS. */
+ 0,
+
+ /*
+ * 0 -> Device-nGnRnE memory
+ * 0xff -> Normal memory, Inner/Outer Write-Back Non-transient,
+ * Write-Alloc, Read-Alloc.
+ */
+ .mair_el2 = (0 << (8 * STAGE1_DEVICEINDX)) |
+ (0xff << (8 * STAGE1_NORMALINDX)),
+
+ /*
+ * Configure tcr_el2.
+ */
+ .tcr_el2 =
+ (1 << 20) | /* TBI, top byte ignored. */
+ ((features & 0xf) << 16) | /* PS. */
+ (0 << 14) | /* TG0, granule size, 4KB. */
+ (3 << 12) | /* SH0, inner shareable. */
+ (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */
+ (1 << 8) | /* IRGN0, normal mem, WB RA WA Cacheable. */
+ (25 << 0) | /* T0SZ, input address is 2^39 bytes. */
+ 0,
+
+ .sctlr_el2 = (1 << 0) | /* M, enable stage 1 EL2 MMU. */
+ (1 << 1) | /* A, enable alignment check faults. */
+ (1 << 2) | /* C, data cache enable. */
+ (1 << 3) | /* SA, enable stack alignment check. */
+ (3 << 4) | /* RES1 bits. */
+ (1 << 11) | /* RES1 bit. */
+ (1 << 12) | /* I, instruction cache enable. */
+ (1 << 16) | /* RES1 bit. */
+ (1 << 18) | /* RES1 bit. */
+ (1 << 19) | /* WXN bit, writable execute never. */
+ (3 << 22) | /* RES1 bits. */
+ (3 << 28) | /* RES1 bits. */
+ 0,
+ };
+
+ return true;
+}
diff --git a/src/arch/aarch64/msr.h b/src/arch/aarch64/msr.h
new file mode 100644
index 0000000..1b96538
--- /dev/null
+++ b/src/arch/aarch64/msr.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stddef.h>
+
+#include "hf/arch/cpu.h"
+
+/**
+ * Macros to stringify a parameter, and to allow the results of a macro to be
+ * stringified in turn.
+ */
+#define str_(s) #s
+#define str(s) str_(s)
+
+/**
+ * Reads a system register, supported by the current assembler, and returns the
+ * result.
+ */
+#define read_msr(name) \
+ __extension__({ \
+ uintreg_t __v; \
+ __asm__ volatile("mrs %0, " str(name) : "=r"(__v)); \
+ __v; \
+ })
+
+/**
+ * Writes the value to the system register, supported by the current assembler.
+ */
+#define write_msr(name, value) \
+ __extension__({ \
+ __asm__ volatile("msr " str(name) ", %x0" \
+ : \
+ : "rZ"((uintreg_t)(value))); \
+ })
+
+/*
+ * Encodings for registers supported after Armv8.0.
+ * We aim to build one binary that supports a variety of platforms, therefore,
+ * use encodings in Arm Architecture Reference Manual Armv8-a, D13.2 for
+ * registers supported after Armv8.0.
+ */
+
+/*
+ * Registers supported from Armv8.1 onwards.
+ */
+
+/*
+ * Registers for feature Armv8.1-LOR (Limited Ordering Regions).
+ */
+
+/**
+ * Encoding for the LORegion Control register (LORC_EL1).
+ * This register enables and disables LORegions (Armv8.1).
+ */
+#define MSR_LORC_EL1 S3_0_C10_C4_3
diff --git a/src/arch/aarch64/pl011/BUILD.gn b/src/arch/aarch64/pl011/BUILD.gn
new file mode 100644
index 0000000..7d97e33
--- /dev/null
+++ b/src/arch/aarch64/pl011/BUILD.gn
@@ -0,0 +1,29 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("args.gni")
+
+# aarch64 PL011 implementation of putchar for debugging.
+source_set("pl011") {
+ sources = [
+ "pl011.c",
+ ]
+ deps = [
+ "//src/arch/aarch64:arch",
+ ]
+
+ assert(defined(pl011_base_address),
+ "\"pl011_base_address\" must be defined for ${target_name}.")
+ defines = [ "PL011_BASE=${pl011_base_address}" ]
+}
diff --git a/src/arch/aarch64/pl011/args.gni b/src/arch/aarch64/pl011/args.gni
new file mode 100644
index 0000000..f5b1e99
--- /dev/null
+++ b/src/arch/aarch64/pl011/args.gni
@@ -0,0 +1,17 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+declare_args() {
+ pl011_base_address = ""
+}
diff --git a/src/arch/aarch64/pl011/pl011.c b/src/arch/aarch64/pl011/pl011.c
new file mode 100644
index 0000000..f98f36e
--- /dev/null
+++ b/src/arch/aarch64/pl011/pl011.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/io.h"
+#include "hf/mm.h"
+#include "hf/mpool.h"
+#include "hf/plat/console.h"
+
+/* UART Data Register. */
+#define UARTDR IO32_C(PL011_BASE + 0x0)
+
+/* UART Flag Register. */
+#define UARTFR IO32_C(PL011_BASE + 0x018)
+
+/* UART Flag Register bit: transmit fifo is full. */
+#define UARTFR_TXFF (1 << 5)
+
+/* UART Flag Register bit: UART is busy. */
+#define UARTFR_BUSY (1 << 3)
+
+void plat_console_init(void)
+{
+ /* No hardware initialisation required. */
+}
+
+void plat_console_mm_init(struct mm_stage1_locked stage1_locked,
+ struct mpool *ppool)
+{
+ /* Map page for UART. */
+ mm_identity_map(stage1_locked, pa_init(PL011_BASE),
+ pa_add(pa_init(PL011_BASE), PAGE_SIZE),
+ MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool);
+}
+
+void plat_console_putchar(char c)
+{
+ /* Print a carriage-return as well. */
+ if (c == '\n') {
+ plat_console_putchar('\r');
+ }
+
+ /* Wait until there is room in the tx buffer. */
+ while (io_read32(UARTFR) & UARTFR_TXFF) {
+ /* do nothing */
+ }
+
+ /* Write the character out, force memory access ordering. */
+ memory_ordering_barrier();
+ io_write32(UARTDR, c);
+ memory_ordering_barrier();
+
+ /* Wait until the UART is no longer busy. */
+ while (io_read32_mb(UARTFR) & UARTFR_BUSY) {
+ /* do nothing */
+ }
+}
diff --git a/src/arch/aarch64/psci.h b/src/arch/aarch64/psci.h
new file mode 100644
index 0000000..355c618
--- /dev/null
+++ b/src/arch/aarch64/psci.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "smc.h"
+
+/* clang-format off */
+
+/* The following are PSCI version codes. */
+#define PSCI_VERSION_0_2 0x00000002
+#define PSCI_VERSION_1_0 0x00010000
+#define PSCI_VERSION_1_1 0x00010001
+
+/* The following are function identifiers for PSCI. */
+#define PSCI_VERSION 0x84000000
+#define PSCI_CPU_SUSPEND 0x84000001
+#define PSCI_CPU_OFF 0x84000002
+#define PSCI_CPU_ON 0x84000003
+#define PSCI_AFFINITY_INFO 0x84000004
+#define PSCI_MIGRATE 0x84000005
+#define PSCI_MIGRATE_INFO_TYPE 0x84000006
+#define PSCI_MIGRATE_INFO_UP_CPU 0x84000007
+#define PSCI_SYSTEM_OFF 0x84000008
+#define PSCI_SYSTEM_RESET 0x84000009
+#define PSCI_FEATURES 0x8400000a
+#define PSCI_CPU_FREEZE 0x8400000b
+#define PSCI_CPU_DEFAULT_SUSPEND 0x8400000c
+#define PSCI_NODE_HW_STATE 0x8400000d
+#define PSCI_SYSTEM_SUSPEND 0x8400000e
+#define PSCI_SET_SYSPEND_MODE 0x8400000f
+#define PSCI_STAT_RESIDENCY 0x84000010
+#define PSCI_STAT_COUNT 0x84000011
+#define PSCI_SYSTEM_RESET2 0x84000012
+#define PSCI_MEM_PROTECT 0x84000013
+#define PSCI_MEM_PROTECT_CHECK_RANGE 0x84000014
+
+/* The following are return codes for PSCI. */
+#define PSCI_RETURN_ON_PENDING 2
+#define PSCI_RETURN_OFF 1
+#define PSCI_RETURN_ON 0
+#define PSCI_RETURN_SUCCESS 0
+#define PSCI_ERROR_NOT_SUPPORTED SMCCC_ERROR_UNKNOWN
+#define PSCI_ERROR_INVALID_PARAMETERS (-2)
+#define PSCI_ERROR_DENIED (-3)
+#define PSCI_ERROR_ALREADY_ON (-4)
+#define PSCI_ERROR_ON_PENDING (-5)
+#define PSCI_ERROR_INTERNAL_FAILURE (-6)
+#define PSCI_ERROR_NOT_PRESENT (-7)
+#define PSCI_ERROR_DISABLE (-8)
+#define PSCI_ERROR_INVALID_ADDRESS (-9)
+
+/* clang-format on */
diff --git a/src/arch/aarch64/smc.c b/src/arch/aarch64/smc.c
new file mode 100644
index 0000000..e5de0bd
--- /dev/null
+++ b/src/arch/aarch64/smc.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "smc.h"
+
+#include <stdint.h>
+
+#include "vmapi/hf/spci.h"
+
+static struct spci_value smc_internal(uint32_t func, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4,
+ uint64_t arg5, uint32_t caller_id)
+{
+ register uint64_t r0 __asm__("x0") = func;
+ register uint64_t r1 __asm__("x1") = arg0;
+ register uint64_t r2 __asm__("x2") = arg1;
+ register uint64_t r3 __asm__("x3") = arg2;
+ register uint64_t r4 __asm__("x4") = arg3;
+ register uint64_t r5 __asm__("x5") = arg4;
+ register uint64_t r6 __asm__("x6") = arg5;
+ register uint64_t r7 __asm__("x7") = caller_id;
+
+ __asm__ volatile(
+ "smc #0"
+ : /* Output registers, also used as inputs ('+' constraint). */
+ "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5),
+ "+r"(r6), "+r"(r7));
+
+ return (struct spci_value){.func = r0,
+ .arg1 = r1,
+ .arg2 = r2,
+ .arg3 = r3,
+ .arg4 = r4,
+ .arg5 = r5,
+ .arg6 = r6,
+ .arg7 = r7};
+}
+
+struct spci_value smc32(uint32_t func, uint32_t arg0, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3, uint32_t arg4,
+ uint32_t arg5, uint32_t caller_id)
+{
+ return smc_internal(func | SMCCC_32_BIT, arg0, arg1, arg2, arg3, arg4,
+ arg5, caller_id);
+}
+
+struct spci_value smc64(uint32_t func, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3, uint64_t arg4,
+ uint64_t arg5, uint32_t caller_id)
+{
+ return smc_internal(func | SMCCC_64_BIT, arg0, arg1, arg2, arg3, arg4,
+ arg5, caller_id);
+}
+
+struct spci_value smc_forward(uint32_t func, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3, uint64_t arg4,
+ uint64_t arg5, uint32_t caller_id)
+{
+ return smc_internal(func, arg0, arg1, arg2, arg3, arg4, arg5,
+ caller_id);
+}
diff --git a/src/arch/aarch64/smc.h b/src/arch/aarch64/smc.h
new file mode 100644
index 0000000..ad8ce5b
--- /dev/null
+++ b/src/arch/aarch64/smc.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include "vmapi/hf/spci.h"
+
+/* clang-format off */
+
+#define SMCCC_CALL_TYPE_MASK 0x80000000
+#define SMCCC_YIELDING_CALL 0x00000000
+#define SMCCC_FAST_CALL 0x80000000
+
+#define SMCCC_CONVENTION_MASK 0x40000000
+#define SMCCC_32_BIT 0x00000000
+#define SMCCC_64_BIT 0x40000000
+
+#define SMCCC_SERVICE_CALL_MASK 0x3f000000
+#define SMCCC_ARM_ARCHITECTURE_CALL 0x00000000
+#define SMCCC_CPU_SERVICE_CALL 0x01000000
+#define SMCCC_SIP_SERVICE_CALL 0x02000000
+#define SMCCC_OEM_SERVICE_CALL 0x03000000
+#define SMCCC_STANDARD_SECURE_SERVICE_CALL 0x04000000
+#define SMCCC_STANDARD_HYPERVISOR_SERVICE_CALL 0x05000000
+#define SMCCC_VENDOR_HYPERVISOR_SERVICE_CALL 0x06000000
+
+#define SMCCC_CALLER_HYPERVISOR 0x0
+/*
+ * TODO: Trusted application call: 0x30000000 - 0x31000000
+ * TODO: Trusted OS call: 0x32000000 - 0x3f000000
+ */
+
+#define SMCCC_ERROR_UNKNOWN (-1)
+
+/* clang-format on */
+
+struct spci_value smc32(uint32_t func, uint32_t arg0, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3, uint32_t arg4,
+ uint32_t arg5, uint32_t caller_id);
+
+struct spci_value smc64(uint32_t func, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3, uint64_t arg4,
+ uint64_t arg5, uint32_t caller_id);
+
+struct spci_value smc_forward(uint32_t func, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3, uint64_t arg4,
+ uint64_t arg5, uint32_t caller_id);
diff --git a/src/arch/aarch64/smc/BUILD.gn b/src/arch/aarch64/smc/BUILD.gn
new file mode 100644
index 0000000..00004f3
--- /dev/null
+++ b/src/arch/aarch64/smc/BUILD.gn
@@ -0,0 +1,19 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source_set("absent") {
+ sources = [
+ "absent.c",
+ ]
+}
diff --git a/src/arch/aarch64/smc/absent.c b/src/arch/aarch64/smc/absent.c
new file mode 100644
index 0000000..6ea9b8d
--- /dev/null
+++ b/src/arch/aarch64/smc/absent.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/plat/smc.h"
+
+void plat_smc_post_forward(struct spci_value args, struct spci_value *ret)
+{
+ (void)args;
+ (void)ret;
+}
diff --git a/src/arch/aarch64/stack_protector.c b/src/arch/aarch64/stack_protector.c
new file mode 100644
index 0000000..489bb50
--- /dev/null
+++ b/src/arch/aarch64/stack_protector.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <stdnoreturn.h>
+
+#include "hf/panic.h"
+
+/**
+ * This is the value that is used as the stack canary. It is written to the top
+ * of the stack when entering a function and compared against the stack when
+ * exiting a function. If there is a mismatch, a failure is triggered.
+ *
+ * As the value must be the same at the beginning and end of the function, this
+ * is a global variable and there are multiple CPUs executing concurrently, this
+ * value cannot change after being initialized.
+ *
+ * TODO: initialize to a random value at boot.
+ */
+uint64_t __attribute__((used)) __stack_chk_guard = 0x72afaf72bad0feed;
+
+/**
+ * Called when the stack canary is invalid. The stack can no longer be trusted
+ * so this function must not return.
+ */
+noreturn void __stack_chk_fail(void)
+{
+ panic("stack corruption");
+}
diff --git a/src/arch/aarch64/std.c b/src/arch/aarch64/std.c
new file mode 100644
index 0000000..c7f109d
--- /dev/null
+++ b/src/arch/aarch64/std.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/std.h"
+
+void *memset(void *s, int c, size_t n)
+{
+ char *p = (char *)s;
+
+ while (n--) {
+ *p++ = c;
+ }
+
+ return s;
+}
+
+void *memcpy(void *dst, const void *src, size_t n)
+{
+ char *x = dst;
+ const char *y = src;
+
+ while (n--) {
+ *x = *y;
+ x++;
+ y++;
+ }
+
+ return dst;
+}
+
+void *memmove(void *dst, const void *src, size_t n)
+{
+ char *x;
+ const char *y;
+
+ if (dst < src) {
+ /*
+ * Clang analyzer doesn't like us calling unsafe memory
+ * functions, so make it ignore this while still knowing that
+ * the function returns.
+ */
+#ifdef __clang_analyzer__
+ return dst;
+#else
+ return memcpy(dst, src, n);
+#endif
+ }
+
+ x = (char *)dst + n - 1;
+ y = (const char *)src + n - 1;
+
+ while (n--) {
+ *x = *y;
+ x--;
+ y--;
+ }
+
+ return dst;
+}
+
+int memcmp(const void *a, const void *b, size_t n)
+{
+ const char *x = a;
+ const char *y = b;
+
+ while (n--) {
+ if (*x != *y) {
+ return *x - *y;
+ }
+ x++;
+ y++;
+ }
+
+ return 0;
+}
+
+int strcmp(const char *a, const char *b)
+{
+ const char *x = a;
+ const char *y = b;
+
+ while (*x != 0 && *y != 0) {
+ if (*x != *y) {
+ return *x - *y;
+ }
+ x++;
+ y++;
+ }
+
+ return *x - *y;
+}
diff --git a/src/arch/aarch64/timer.c b/src/arch/aarch64/timer.c
new file mode 100644
index 0000000..3b76e26
--- /dev/null
+++ b/src/arch/aarch64/timer.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/timer.h"
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "hf/arch/cpu.h"
+
+#include "hf/addr.h"
+
+#include "msr.h"
+
+#define CNTV_CTL_EL0_ENABLE (1u << 0)
+#define CNTV_CTL_EL0_IMASK (1u << 1)
+#define CNTV_CTL_EL0_ISTATUS (1u << 2)
+
+#define NANOS_PER_UNIT 1000000000
+
+/**
+ * Sets the bit to mask virtual timer interrupts.
+ */
+void arch_timer_mask(struct arch_regs *regs)
+{
+ regs->peripherals.cntv_ctl_el0 |= CNTV_CTL_EL0_IMASK;
+}
+
+/**
+ * Checks whether the virtual timer is enabled and its interrupt not masked.
+ */
+bool arch_timer_enabled(struct arch_regs *regs)
+{
+ uintreg_t cntv_ctl_el0 = regs->peripherals.cntv_ctl_el0;
+
+ return (cntv_ctl_el0 & CNTV_CTL_EL0_ENABLE) &&
+ !(cntv_ctl_el0 & CNTV_CTL_EL0_IMASK);
+}
+
+/**
+ * Converts a number of timer ticks to the equivalent number of nanoseconds.
+ */
+static uint64_t ticks_to_ns(uint64_t ticks)
+{
+ return (ticks * NANOS_PER_UNIT) / read_msr(cntfrq_el0);
+}
+
+/**
+ * Returns the number of ticks remaining on the virtual timer as stored in
+ * the given `arch_regs`, or 0 if it has already expired. This is undefined if
+ * the timer is not enabled.
+ */
+static uint64_t arch_timer_remaining_ticks(struct arch_regs *regs)
+{
+ /*
+ * Calculate the value from the saved CompareValue (cntv_cval_el0) and
+ * the virtual count value.
+ */
+ uintreg_t cntv_cval_el0 = regs->peripherals.cntv_cval_el0;
+ uintreg_t cntvct_el0 = read_msr(cntvct_el0);
+
+ if (cntv_cval_el0 >= cntvct_el0) {
+ return cntv_cval_el0 - cntvct_el0;
+ }
+
+ return 0;
+}
+
+/**
+ * Returns the number of nanoseconds remaining on the virtual timer as stored in
+ * the given `arch_regs`, or 0 if it has already expired. This is undefined if
+ * the timer is not enabled.
+ */
+uint64_t arch_timer_remaining_ns(struct arch_regs *regs)
+{
+ return ticks_to_ns(arch_timer_remaining_ticks(regs));
+}
+
+/**
+ * Returns whether the timer is ready to fire: i.e. it is enabled, not masked,
+ * and the condition is met.
+ */
+bool arch_timer_pending(struct arch_regs *regs)
+{
+ if (!arch_timer_enabled(regs)) {
+ return false;
+ }
+
+ if (regs->peripherals.cntv_ctl_el0 & CNTV_CTL_EL0_ISTATUS) {
+ return true;
+ }
+
+ if (arch_timer_remaining_ticks(regs) == 0) {
+ /*
+ * This can happen even if the (stored) ISTATUS bit is not set,
+ * because time has passed between when the registers were
+ * stored and now.
+ */
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * Checks whether the virtual timer is enabled and its interrupt not masked, for
+ * the currently active vCPU.
+ */
+bool arch_timer_enabled_current(void)
+{
+ uintreg_t cntv_ctl_el0 = read_msr(cntv_ctl_el0);
+
+ return (cntv_ctl_el0 & CNTV_CTL_EL0_ENABLE) &&
+ !(cntv_ctl_el0 & CNTV_CTL_EL0_IMASK);
+}
+
+/**
+ * Disables the virtual timer for the currently active vCPU.
+ */
+void arch_timer_disable_current(void)
+{
+ write_msr(cntv_ctl_el0, 0x0);
+}
+
+/**
+ * Returns the number of ticks remaining on the virtual timer of the currently
+ * active vCPU, or 0 if it has already expired. This is undefined if the timer
+ * is not enabled.
+ */
+static uint64_t arch_timer_remaining_ticks_current(void)
+{
+ uintreg_t cntv_cval_el0 = read_msr(cntv_cval_el0);
+ uintreg_t cntvct_el0 = read_msr(cntvct_el0);
+
+ if (cntv_cval_el0 >= cntvct_el0) {
+ return cntv_cval_el0 - cntvct_el0;
+ }
+
+ return 0;
+}
+
+/**
+ * Returns the number of nanoseconds remaining on the virtual timer of the
+ * currently active vCPU, or 0 if it has already expired. This is undefined if
+ * the timer is not enabled.
+ */
+uint64_t arch_timer_remaining_ns_current(void)
+{
+ return ticks_to_ns(arch_timer_remaining_ticks_current());
+}
diff --git a/src/arch/fake/BUILD.gn b/src/arch/fake/BUILD.gn
new file mode 100644
index 0000000..a0c7219
--- /dev/null
+++ b/src/arch/fake/BUILD.gn
@@ -0,0 +1,37 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source_set("fake") {
+ sources = [
+ "mm.c",
+ "timer.c",
+ ]
+}
+
+# Empty implementation of platform boot flow.
+# Fake arch targets should not depend on the boot flow functions. Will fail to
+# compile if they do.
+source_set("boot_flow") {
+}
+
+# Fake implementation of putchar logs to the console.
+source_set("console") {
+ sources = [
+ "console.c",
+ ]
+}
+
+# Empty, as the functions are provided by libc already.
+source_set("std") {
+}
diff --git a/src/arch/fake/console.c b/src/arch/fake/console.c
new file mode 100644
index 0000000..036cced
--- /dev/null
+++ b/src/arch/fake/console.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/plat/console.h"
+
+#include <stdio.h>
+
+#include "hf/mm.h"
+#include "hf/mpool.h"
+
+void plat_console_init(void)
+{
+}
+
+void plat_console_mm_init(struct mm_stage1_locked stage1_locked,
+ struct mpool *ppool)
+{
+}
+
+void plat_console_putchar(char c)
+{
+ putchar(c);
+}
diff --git a/src/arch/fake/hftest/BUILD.gn b/src/arch/fake/hftest/BUILD.gn
new file mode 100644
index 0000000..c69041f
--- /dev/null
+++ b/src/arch/fake/hftest/BUILD.gn
@@ -0,0 +1,23 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# These components are only used by tests running until Linux VMs.
+
+# Shutdown the system.
+source_set("power_mgmt") {
+ testonly = true
+ sources = [
+ "power_mgmt.c",
+ ]
+}
diff --git a/src/arch/fake/hftest/power_mgmt.c b/src/arch/fake/hftest/power_mgmt.c
new file mode 100644
index 0000000..a171036
--- /dev/null
+++ b/src/arch/fake/hftest/power_mgmt.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/power_mgmt.h"
+
+#include <sys/reboot.h>
+
+noreturn void arch_power_off(void)
+{
+ reboot(RB_POWER_OFF);
+ for (;;) {
+ /* This should never be reached. */
+ }
+}
diff --git a/src/arch/fake/hypervisor/BUILD.gn b/src/arch/fake/hypervisor/BUILD.gn
new file mode 100644
index 0000000..b6435ed
--- /dev/null
+++ b/src/arch/fake/hypervisor/BUILD.gn
@@ -0,0 +1,22 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source_set("hypervisor") {
+ sources = [
+ "cpu.c",
+ ]
+ deps = [
+ "//src/arch/fake",
+ ]
+}
diff --git a/src/arch/fake/hypervisor/cpu.c b/src/arch/fake/hypervisor/cpu.c
new file mode 100644
index 0000000..3fc09f9
--- /dev/null
+++ b/src/arch/fake/hypervisor/cpu.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/cpu.h"
+
+#include "hf/cpu.h"
+#include "hf/spci.h"
+
+void arch_irq_disable(void)
+{
+ /* TODO */
+}
+
+void arch_irq_enable(void)
+{
+ /* TODO */
+}
+
+void arch_regs_reset(struct vcpu *vcpu)
+{
+ /* TODO */
+ (void)vcpu;
+}
+
+void arch_regs_set_pc_arg(struct arch_regs *r, ipaddr_t pc, uintreg_t arg)
+{
+ (void)pc;
+ r->arg[0] = arg;
+}
+
+void arch_regs_set_retval(struct arch_regs *r, struct spci_value v)
+{
+ r->arg[0] = v.func;
+ r->arg[1] = v.arg1;
+ r->arg[2] = v.arg2;
+ r->arg[3] = v.arg3;
+ r->arg[4] = v.arg4;
+ r->arg[5] = v.arg5;
+ r->arg[6] = v.arg6;
+ r->arg[7] = v.arg7;
+}
diff --git a/src/arch/fake/inc/hf/arch/barriers.h b/src/arch/fake/inc/hf/arch/barriers.h
new file mode 100644
index 0000000..46f35a9
--- /dev/null
+++ b/src/arch/fake/inc/hf/arch/barriers.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdatomic.h>
+
+/** Platform-agnostic API */
+
+/**
+ * Ensures all explicit memory accesses before this point are completed before
+ * any later memory accesses are performed.
+ */
+#define memory_ordering_barrier() atomic_thread_fence(memory_order_seq_cst)
+
+/**
+ * Ensures all explicit memory access and management instructions have completed
+ * before continuing.
+ *
+ * FIXME: this is just a memory barrier but, without MMIO or registers to modify
+ * operation in the fake architecture, this is likely enough. If there's a way
+ * to have a true synchronization then we should update it.
+ */
+#define data_sync_barrier() atomic_thread_fence(memory_order_seq_cst)
diff --git a/src/arch/fake/inc/hf/arch/spinlock.h b/src/arch/fake/inc/hf/arch/spinlock.h
new file mode 100644
index 0000000..db3b45c
--- /dev/null
+++ b/src/arch/fake/inc/hf/arch/spinlock.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/**
+ * Generic implementation of a spinlock using C11 atomics.
+ * Does not work very well under contention.
+ */
+
+#include <stdatomic.h>
+
+struct spinlock {
+ atomic_flag v;
+};
+
+#define SPINLOCK_INIT ((struct spinlock){.v = ATOMIC_FLAG_INIT})
+
+static inline void sl_lock(struct spinlock *l)
+{
+ while (atomic_flag_test_and_set_explicit(&l->v, memory_order_acquire)) {
+ /* do nothing */
+ }
+}
+
+static inline void sl_unlock(struct spinlock *l)
+{
+ atomic_flag_clear_explicit(&l->v, memory_order_release);
+}
diff --git a/src/arch/fake/inc/hf/arch/types.h b/src/arch/fake/inc/hf/arch/types.h
new file mode 100644
index 0000000..4cb5be4
--- /dev/null
+++ b/src/arch/fake/inc/hf/arch/types.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#define PAGE_BITS 12
+#define PAGE_LEVEL_BITS 9
+#define STACK_ALIGN 64
+
+/** The type of a page table entry (PTE). */
+typedef uint64_t pte_t;
+
+/** Integer type large enough to hold a physical address. */
+typedef uintptr_t uintpaddr_t;
+
+/** Integer type large enough to hold a virtual address. */
+typedef uintptr_t uintvaddr_t;
+
+/** The integer corresponding to the native register size. */
+typedef uint64_t uintreg_t;
+
+/** The ID of a physical or virtual CPU. */
+typedef uint32_t cpu_id_t;
+
+/** Arch-specifc information about a VM. */
+struct arch_vm {
+ /* This field is only here because empty structs aren't allowed. */
+ void *dummy;
+};
+
+/** Type to represent the register state of a VM. */
+struct arch_regs {
+ uintreg_t arg[8];
+ cpu_id_t vcpu_id;
+ bool virtual_interrupt;
+};
diff --git a/src/arch/fake/inc/hf/arch/vm/power_mgmt.h b/src/arch/fake/inc/hf/arch/vm/power_mgmt.h
new file mode 100644
index 0000000..607705e
--- /dev/null
+++ b/src/arch/fake/inc/hf/arch/vm/power_mgmt.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdnoreturn.h>
+
+noreturn void arch_power_off(void);
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
new file mode 100644
index 0000000..d0066fb
--- /dev/null
+++ b/src/arch/fake/mm.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/mm.h"
+
+#include "hf/mm.h"
+
+/*
+ * The fake architecture uses the mode flags to represent the attributes applied
+ * to memory. The flags are shifted to avoid equality of modes and attributes.
+ */
+#define PTE_ATTR_MODE_SHIFT 48
+#define PTE_ATTR_MODE_MASK \
+ ((uint64_t)(MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_D | \
+ MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED) \
+ << PTE_ATTR_MODE_SHIFT)
+
+/* The bit to distinguish a table from a block is the highest of the page bits.
+ */
+#define PTE_TABLE (UINT64_C(1) << (PAGE_BITS - 1))
+
+/* Mask for the address part of an entry. */
+#define PTE_ADDR_MASK (~(PTE_ATTR_MODE_MASK | (UINT64_C(1) << PAGE_BITS) - 1))
+
+/* Offset the bits of each level so they can't be misued. */
+#define PTE_LEVEL_SHIFT(lvl) ((lvl)*2)
+
+pte_t arch_mm_absent_pte(uint8_t level)
+{
+ return ((uint64_t)(MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED)
+ << PTE_ATTR_MODE_SHIFT) >>
+ PTE_LEVEL_SHIFT(level);
+}
+
+pte_t arch_mm_table_pte(uint8_t level, paddr_t pa)
+{
+ return (pa_addr(pa) | PTE_TABLE) >> PTE_LEVEL_SHIFT(level);
+}
+
+pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs)
+{
+ return (pa_addr(pa) | attrs) >> PTE_LEVEL_SHIFT(level);
+}
+
+bool arch_mm_is_block_allowed(uint8_t level)
+{
+ (void)level;
+ return true;
+}
+
+bool arch_mm_pte_is_present(pte_t pte, uint8_t level)
+{
+ return arch_mm_pte_is_valid(pte, level) ||
+ !(((pte << PTE_LEVEL_SHIFT(level)) >> PTE_ATTR_MODE_SHIFT) &
+ MM_MODE_UNOWNED);
+}
+
+bool arch_mm_pte_is_valid(pte_t pte, uint8_t level)
+{
+ return !(((pte << PTE_LEVEL_SHIFT(level)) >> PTE_ATTR_MODE_SHIFT) &
+ MM_MODE_INVALID);
+}
+
+bool arch_mm_pte_is_block(pte_t pte, uint8_t level)
+{
+ return arch_mm_pte_is_present(pte, level) &&
+ !arch_mm_pte_is_table(pte, level);
+}
+
+bool arch_mm_pte_is_table(pte_t pte, uint8_t level)
+{
+ return (pte << PTE_LEVEL_SHIFT(level)) & PTE_TABLE;
+}
+
+paddr_t arch_mm_clear_pa(paddr_t pa)
+{
+ return pa_init(pa_addr(pa) & PTE_ADDR_MASK);
+}
+
+paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level)
+{
+ return pa_init((pte << PTE_LEVEL_SHIFT(level)) & PTE_ADDR_MASK);
+}
+
+paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level)
+{
+ return pa_init((pte << PTE_LEVEL_SHIFT(level)) & PTE_ADDR_MASK);
+}
+
+uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level)
+{
+ return (pte << PTE_LEVEL_SHIFT(level)) & PTE_ATTR_MODE_MASK;
+}
+
+uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
+ uint64_t block_attrs)
+{
+ return table_attrs | block_attrs;
+}
+
+void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end)
+{
+ /* There's no modelling of the stage-1 TLB. */
+}
+
+void arch_mm_invalidate_stage2_range(ipaddr_t va_begin, ipaddr_t va_end)
+{
+ /* There's no modelling of the stage-2 TLB. */
+}
+
+void arch_mm_flush_dcache(void *base, size_t size)
+{
+ /* There's no modelling of the cache. */
+}
+
+uint8_t arch_mm_stage1_max_level(void)
+{
+ return 2;
+}
+
+uint8_t arch_mm_stage2_max_level(void)
+{
+ return 2;
+}
+
+uint8_t arch_mm_stage1_root_table_count(void)
+{
+ return 1;
+}
+
+uint8_t arch_mm_stage2_root_table_count(void)
+{
+ /* Stage-2 has many concatenated page tables. */
+ return 4;
+}
+
+uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode)
+{
+ return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
+}
+
+uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode)
+{
+ return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
+}
+
+uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs)
+{
+ return attrs >> PTE_ATTR_MODE_SHIFT;
+}
+
+bool arch_mm_init(paddr_t table)
+{
+ /* No initialization required. */
+ (void)table;
+ return true;
+}
diff --git a/src/arch/fake/timer.c b/src/arch/fake/timer.c
new file mode 100644
index 0000000..1c0d449
--- /dev/null
+++ b/src/arch/fake/timer.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/timer.h"
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "hf/arch/types.h"
+
+bool arch_timer_pending(struct arch_regs *regs)
+{
+ /* TODO */
+ (void)regs;
+ return false;
+}
+
+void arch_timer_mask(struct arch_regs *regs)
+{
+ /* TODO */
+ (void)regs;
+}
+
+bool arch_timer_enabled(struct arch_regs *regs)
+{
+ /* TODO */
+ (void)regs;
+ return false;
+}
+
+uint64_t arch_timer_remaining_ns(struct arch_regs *regs)
+{
+ /* TODO */
+ (void)regs;
+ return 0;
+}
+
+bool arch_timer_enabled_current(void)
+{
+ /* TODO */
+ return false;
+}
+
+void arch_timer_disable_current(void)
+{
+ /* TODO */
+}
+
+uint64_t arch_timer_remaining_ns_current(void)
+{
+ /* TODO */
+ return 0;
+}
diff --git a/src/boot_flow/BUILD.gn b/src/boot_flow/BUILD.gn
new file mode 100644
index 0000000..9de339d
--- /dev/null
+++ b/src/boot_flow/BUILD.gn
@@ -0,0 +1,41 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/toolchain/platform.gni")
+
+source_set("common") {
+ sources = [
+ "common.c",
+ ]
+}
+
+source_set("android") {
+ sources = [
+ "android.c",
+ ]
+ deps = [
+ ":common",
+ "//src/arch/${plat_arch}/boot_flow:android",
+ ]
+}
+
+source_set("linux") {
+ sources = [
+ "linux.c",
+ ]
+ deps = [
+ ":common",
+ "//src/arch/${plat_arch}/boot_flow:linux",
+ ]
+}
diff --git a/src/boot_flow/android.c b/src/boot_flow/android.c
new file mode 100644
index 0000000..0221674
--- /dev/null
+++ b/src/boot_flow/android.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/layout.h"
+#include "hf/plat/boot_flow.h"
+
+/**
+ * FDT was compiled into Hafnium. Return physical address of the `.plat.fdt`
+ * section of Hafnium image.
+ */
+paddr_t plat_boot_flow_get_fdt_addr(void)
+{
+ return layout_fdt_begin();
+}
+
+/**
+ * Android boot flow does not use kernel arguments. Pass zero.
+ */
+uintreg_t plat_boot_flow_get_kernel_arg(void)
+{
+ return 0;
+}
+
+/**
+ * Initrd was compiled into Hafnium. Return range of the '.plat.initrd' section.
+ */
+bool plat_boot_flow_get_initrd_range(const struct fdt_node *fdt_root,
+ paddr_t *begin, paddr_t *end)
+{
+ (void)fdt_root;
+
+ *begin = layout_initrd_begin();
+ *end = layout_initrd_end();
+ return true;
+}
+
+/**
+ * Android boot flow does not change based on the updates.
+ */
+bool plat_boot_flow_update(struct mm_stage1_locked stage1_locked,
+ const struct manifest *manifest,
+ struct boot_params_update *p, struct memiter *cpio,
+ struct mpool *ppool)
+{
+ (void)stage1_locked;
+ (void)manifest;
+ (void)p;
+ (void)cpio;
+ (void)ppool;
+
+ return true;
+}
diff --git a/src/boot_flow/common.c b/src/boot_flow/common.c
new file mode 100644
index 0000000..834f746
--- /dev/null
+++ b/src/boot_flow/common.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/boot_flow.h"
+#include "hf/dlog.h"
+#include "hf/fdt_handler.h"
+#include "hf/plat/boot_flow.h"
+
+/**
+ * Extract the boot parameters from the FDT and the boot-flow driver.
+ */
+static bool boot_params_init(struct boot_params *p,
+ const struct fdt_node *fdt_root)
+{
+ p->mem_ranges_count = 0;
+ p->kernel_arg = plat_boot_flow_get_kernel_arg();
+
+ return plat_boot_flow_get_initrd_range(fdt_root, &p->initrd_begin,
+ &p->initrd_end) &&
+ fdt_find_cpus(fdt_root, p->cpu_ids, &p->cpu_count) &&
+ fdt_find_memory_ranges(fdt_root, p);
+}
+
+/**
+ * Parses information from FDT needed to initialize Hafnium.
+ * FDT is mapped at the beginning and unmapped before exiting the function.
+ */
+bool boot_flow_init(const struct fdt_node *fdt_root, struct manifest *manifest,
+ struct boot_params *boot_params)
+{
+ enum manifest_return_code manifest_ret;
+
+ /* Get the memory map from the FDT. */
+
+ manifest_ret = manifest_init(manifest, fdt_root);
+ if (manifest_ret != MANIFEST_SUCCESS) {
+ dlog("Could not parse manifest: %s.\n",
+ manifest_strerror(manifest_ret));
+ return false;
+ }
+
+ if (!boot_params_init(boot_params, fdt_root)) {
+ dlog("Could not parse boot params.\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Takes action on any updates that were generated.
+ */
+bool boot_flow_update(struct mm_stage1_locked stage1_locked,
+ const struct manifest *manifest,
+ struct boot_params_update *p, struct memiter *cpio,
+ struct mpool *ppool)
+{
+ return plat_boot_flow_update(stage1_locked, manifest, p, cpio, ppool);
+}
diff --git a/src/boot_flow/linux.c b/src/boot_flow/linux.c
new file mode 100644
index 0000000..e1e257c
--- /dev/null
+++ b/src/boot_flow/linux.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/check.h"
+#include "hf/cpio.h"
+#include "hf/dlog.h"
+#include "hf/fdt_handler.h"
+#include "hf/plat/boot_flow.h"
+#include "hf/std.h"
+
+/* Set by arch-specific boot-time hook. */
+uintreg_t plat_boot_flow_fdt_addr;
+
+/**
+ * Returns the physical address of board FDT. This was passed to Hafnium in the
+ * first kernel arg by the boot loader.
+ */
+paddr_t plat_boot_flow_get_fdt_addr(void)
+{
+ return pa_init((uintpaddr_t)plat_boot_flow_fdt_addr);
+}
+
+/**
+ * When handing over to the primary, give it the same FDT address that was given
+ * to Hafnium. The FDT may have been modified during Hafnium init.
+ */
+uintreg_t plat_boot_flow_get_kernel_arg(void)
+{
+ return plat_boot_flow_fdt_addr;
+}
+
+/**
+ * Load initrd range from the board FDT.
+ */
+bool plat_boot_flow_get_initrd_range(const struct fdt_node *fdt_root,
+ paddr_t *begin, paddr_t *end)
+{
+ return fdt_find_initrd(fdt_root, begin, end);
+}
+
+bool plat_boot_flow_update(struct mm_stage1_locked stage1_locked,
+ const struct manifest *manifest,
+ struct boot_params_update *update,
+ struct memiter *cpio, struct mpool *ppool)
+{
+ struct memiter primary_initrd;
+ const struct string *filename =
+ &manifest->vm[HF_PRIMARY_VM_INDEX].primary.ramdisk_filename;
+
+ if (string_is_empty(filename)) {
+ memiter_init(&primary_initrd, NULL, 0);
+ } else if (!cpio_get_file(cpio, filename, &primary_initrd)) {
+ dlog("Unable to find primary initrd \"%s\".\n",
+ string_data(filename));
+ return false;
+ }
+
+ update->initrd_begin = pa_from_va(va_from_ptr(primary_initrd.next));
+ update->initrd_end = pa_from_va(va_from_ptr(primary_initrd.limit));
+
+ return fdt_patch(stage1_locked, plat_boot_flow_get_fdt_addr(), update,
+ ppool);
+}
diff --git a/src/cpio.c b/src/cpio.c
new file mode 100644
index 0000000..58626ec
--- /dev/null
+++ b/src/cpio.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/cpio.h"
+
+#include <stdint.h>
+
+#include "hf/std.h"
+
+#pragma pack(push, 1)
+struct cpio_header {
+ uint16_t magic;
+ uint16_t dev;
+ uint16_t ino;
+ uint16_t mode;
+ uint16_t uid;
+ uint16_t gid;
+ uint16_t nlink;
+ uint16_t rdev;
+ uint16_t mtime[2];
+ uint16_t namesize;
+ uint16_t filesize[2];
+};
+#pragma pack(pop)
+
+/**
+ * Retrieves the next file stored in the cpio archive stored in the cpio, and
+ * advances the iterator such that another call to this function would return
+ * the following file.
+ */
+static bool cpio_next(struct memiter *iter, const char **name,
+ const void **contents, size_t *size)
+{
+ size_t len;
+ struct memiter lit = *iter;
+ const struct cpio_header *h = (const struct cpio_header *)lit.next;
+
+ if (!memiter_advance(&lit, sizeof(struct cpio_header))) {
+ return false;
+ }
+
+ *name = lit.next;
+
+ /* TODO: Check magic. */
+
+ len = (h->namesize + 1) & ~1;
+ if (!memiter_advance(&lit, len)) {
+ return false;
+ }
+
+ *contents = lit.next;
+
+ len = (size_t)h->filesize[0] << 16 | h->filesize[1];
+ if (!memiter_advance(&lit, (len + 1) & ~1)) {
+ return false;
+ }
+
+ /* TODO: Check that string is null-terminated. */
+
+ /* Stop enumerating files when we hit the end marker. */
+ if (!strcmp(*name, "TRAILER!!!")) {
+ return false;
+ }
+
+ *size = len;
+ *iter = lit;
+
+ return true;
+}
+
+/**
+ * Looks for a file in the given cpio archive. The file, if found, is returned
+ * in the "it" argument.
+ */
+bool cpio_get_file(const struct memiter *cpio, const struct string *name,
+ struct memiter *it)
+{
+ const char *fname;
+ const void *fcontents;
+ size_t fsize;
+ struct memiter iter = *cpio;
+
+ while (cpio_next(&iter, &fname, &fcontents, &fsize)) {
+ if (!strcmp(fname, string_data(name))) {
+ memiter_init(it, fcontents, fsize);
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/src/cpu.c b/src/cpu.c
new file mode 100644
index 0000000..f8beed6
--- /dev/null
+++ b/src/cpu.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/cpu.h"
+
+#include <stdalign.h>
+
+#include "hf/api.h"
+#include "hf/check.h"
+#include "hf/dlog.h"
+
+#include "vmapi/hf/call.h"
+
+#define STACK_SIZE PAGE_SIZE
+
+/**
+ * The stacks to be used by the CPUs.
+ *
+ * Align to page boundaries to ensure that cache lines are not shared between a
+ * CPU's stack and data that can be accessed from other CPUs. If this did
+ * happen, there may be coherency problems when the stack is being used before
+ * caching is enabled.
+ */
+alignas(PAGE_SIZE) static char callstacks[MAX_CPUS][STACK_SIZE];
+
+/* NOLINTNEXTLINE(misc-redundant-expression) */
+static_assert((STACK_SIZE % PAGE_SIZE) == 0, "Keep each stack page aligned.");
+static_assert((PAGE_SIZE % STACK_ALIGN) == 0,
+ "Page alignment is too weak for the stack.");
+
+/**
+ * Internal buffer used to store SPCI messages from a VM Tx. Its usage prevents
+ * TOCTOU issues while Hafnium performs actions on information that would
+ * otherwise be re-writable by the VM.
+ *
+ * Each buffer is owned by a single CPU. The buffer can only be used for
+ * spci_msg_send. The information stored in the buffer is only valid during the
+ * spci_msg_send request is performed.
+ */
+alignas(PAGE_SIZE) static uint8_t cpu_message_buffer[MAX_CPUS][PAGE_SIZE];
+
+uint8_t *cpu_get_buffer(cpu_id_t cpu_id)
+{
+ CHECK(cpu_id < MAX_CPUS);
+
+ return cpu_message_buffer[cpu_id];
+}
+
+uint32_t cpu_get_buffer_size(cpu_id_t cpu_id)
+{
+ CHECK(cpu_id < MAX_CPUS);
+
+ return sizeof(cpu_message_buffer[cpu_id]);
+}
+
+/* State of all supported CPUs. The stack of the first one is initialized. */
+struct cpu cpus[MAX_CPUS] = {
+ {
+ .is_on = 1,
+ .stack_bottom = &callstacks[0][STACK_SIZE],
+ },
+};
+
+static uint32_t cpu_count = 1;
+
+void cpu_module_init(const cpu_id_t *cpu_ids, size_t count)
+{
+ uint32_t i;
+ uint32_t j;
+ cpu_id_t boot_cpu_id = cpus[0].id;
+ bool found_boot_cpu = false;
+
+ cpu_count = count;
+
+ /*
+ * Initialize CPUs with the IDs from the configuration passed in. The
+ * CPUs after the boot CPU are initialized in reverse order. The boot
+ * CPU is initialized when it is found or in place of the last CPU if it
+ * is not found.
+ */
+ j = cpu_count;
+ for (i = 0; i < cpu_count; ++i) {
+ struct cpu *c;
+ cpu_id_t id = cpu_ids[i];
+
+ if (found_boot_cpu || id != boot_cpu_id) {
+ --j;
+ c = &cpus[j];
+ c->stack_bottom = &callstacks[j][STACK_SIZE];
+ } else {
+ found_boot_cpu = true;
+ c = &cpus[0];
+ CHECK(c->stack_bottom == &callstacks[0][STACK_SIZE]);
+ }
+
+ sl_init(&c->lock);
+ c->id = id;
+ }
+
+ if (!found_boot_cpu) {
+ /* Boot CPU was initialized but with wrong ID. */
+ dlog("Boot CPU's ID not found in config.\n");
+ cpus[0].id = boot_cpu_id;
+ }
+}
+
+size_t cpu_index(struct cpu *c)
+{
+ return c - cpus;
+}
+
+/**
+ * Turns CPU on and returns the previous state.
+ */
+bool cpu_on(struct cpu *c, ipaddr_t entry, uintreg_t arg)
+{
+ bool prev;
+
+ sl_lock(&c->lock);
+ prev = c->is_on;
+ c->is_on = true;
+ sl_unlock(&c->lock);
+
+ if (!prev) {
+ struct vm *vm = vm_find(HF_PRIMARY_VM_ID);
+ struct vcpu *vcpu = vm_get_vcpu(vm, cpu_index(c));
+ struct vcpu_locked vcpu_locked;
+
+ vcpu_locked = vcpu_lock(vcpu);
+ vcpu_on(vcpu_locked, entry, arg);
+ vcpu_unlock(&vcpu_locked);
+ }
+
+ return prev;
+}
+
+/**
+ * Prepares the CPU for turning itself off.
+ */
+void cpu_off(struct cpu *c)
+{
+ sl_lock(&c->lock);
+ c->is_on = false;
+ sl_unlock(&c->lock);
+}
+
+/**
+ * Searches for a CPU based on its ID.
+ */
+struct cpu *cpu_find(cpu_id_t id)
+{
+ size_t i;
+
+ for (i = 0; i < cpu_count; i++) {
+ if (cpus[i].id == id) {
+ return &cpus[i];
+ }
+ }
+
+ return NULL;
+}
diff --git a/src/dlog.c b/src/dlog.c
new file mode 100644
index 0000000..1bda568
--- /dev/null
+++ b/src/dlog.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/dlog.h"
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include "hf/plat/console.h"
+#include "hf/spci.h"
+#include "hf/spinlock.h"
+#include "hf/std.h"
+
+/* Keep macro alignment */
+/* clang-format off */
+
+#define FLAG_SPACE 0x01
+#define FLAG_ZERO 0x02
+#define FLAG_MINUS 0x04
+#define FLAG_PLUS 0x08
+#define FLAG_ALT 0x10
+#define FLAG_UPPER 0x20
+#define FLAG_NEG 0x40
+
+#define DLOG_MAX_STRING_LENGTH 64
+
+/* clang-format on */
+
+static bool dlog_lock_enabled = false;
+static struct spinlock sl = SPINLOCK_INIT;
+
+/*
+ * These global variables for the log buffer are not static because a test needs
+ * to access them directly.
+ */
+size_t dlog_buffer_offset;
+char dlog_buffer[DLOG_BUFFER_SIZE];
+
+/**
+ * Takes the lock, if it is enabled.
+ */
+static void lock(void)
+{
+ if (dlog_lock_enabled) {
+ sl_lock(&sl);
+ }
+}
+
+/**
+ * Releases the lock, if it is enabled.
+ */
+static void unlock(void)
+{
+ if (dlog_lock_enabled) {
+ sl_unlock(&sl);
+ }
+}
+
+/**
+ * Enables the lock protecting the serial device.
+ */
+void dlog_enable_lock(void)
+{
+ dlog_lock_enabled = true;
+}
+
+static void dlog_putchar(char c)
+{
+ dlog_buffer[dlog_buffer_offset] = c;
+ dlog_buffer_offset = (dlog_buffer_offset + 1) % DLOG_BUFFER_SIZE;
+ plat_console_putchar(c);
+}
+
+/**
+ * Prints a raw string to the debug log and returns its length.
+ */
+static size_t print_raw_string(const char *str)
+{
+ const char *c = str;
+
+ while (*c != '\0') {
+ dlog_putchar(*c++);
+ }
+
+ return c - str;
+}
+
+/**
+ * Prints a formatted string to the debug log. The format includes a minimum
+ * width, the fill character, and flags (whether to align to left or right).
+ *
+ * str is the full string, while suffix is a pointer within str that indicates
+ * where the suffix begins. This is used when printing right-aligned numbers
+ * with a zero fill; for example, -10 with width 4 should be padded to -010,
+ * so suffix would point to index one of the "-10" string .
+ */
+static void print_string(const char *str, const char *suffix, size_t width,
+ int flags, char fill)
+{
+ size_t len = suffix - str;
+
+ /* Print the string up to the beginning of the suffix. */
+ while (str != suffix) {
+ dlog_putchar(*str++);
+ }
+
+ if (flags & FLAG_MINUS) {
+ /* Left-aligned. Print suffix, then print padding if needed. */
+ len += print_raw_string(suffix);
+ while (len < width) {
+ dlog_putchar(' ');
+ len++;
+ }
+ return;
+ }
+
+ /* Fill until we reach the desired length. */
+ len += strnlen_s(suffix, DLOG_MAX_STRING_LENGTH);
+ while (len < width) {
+ dlog_putchar(fill);
+ len++;
+ }
+
+ /* Now print the rest of the string. */
+ print_raw_string(suffix);
+}
+
+/**
+ * Prints a number to the debug log. The caller specifies the base, its minimum
+ * width and printf-style flags.
+ */
+static void print_num(size_t v, size_t base, size_t width, int flags)
+{
+ static const char *digits_lower = "0123456789abcdefx";
+ static const char *digits_upper = "0123456789ABCDEFX";
+ const char *d = (flags & FLAG_UPPER) ? digits_upper : digits_lower;
+ char buf[DLOG_MAX_STRING_LENGTH];
+ char *ptr = &buf[sizeof(buf) - 1];
+ char *num;
+ *ptr = '\0';
+ do {
+ --ptr;
+ *ptr = d[v % base];
+ v /= base;
+ } while (v);
+
+ /* Num stores where the actual number begins. */
+ num = ptr;
+
+ /* Add prefix if requested. */
+ if (flags & FLAG_ALT) {
+ switch (base) {
+ case 16:
+ ptr -= 2;
+ ptr[0] = '0';
+ ptr[1] = d[16];
+ break;
+
+ case 8:
+ ptr--;
+ *ptr = '0';
+ break;
+ }
+ }
+
+ /* Add sign if requested. */
+ if (flags & FLAG_NEG) {
+ *--ptr = '-';
+ } else if (flags & FLAG_PLUS) {
+ *--ptr = '+';
+ } else if (flags & FLAG_SPACE) {
+ *--ptr = ' ';
+ }
+ if (flags & FLAG_ZERO) {
+ print_string(ptr, num, width, flags, '0');
+ } else {
+ print_string(ptr, ptr, width, flags, ' ');
+ }
+}
+
+/**
+ * Parses the optional flags field of a printf-style format. It returns the spot
+ * on the string where a non-flag character was found.
+ */
+static const char *parse_flags(const char *p, int *flags)
+{
+ for (;;) {
+ switch (*p) {
+ case ' ':
+ *flags |= FLAG_SPACE;
+ break;
+
+ case '0':
+ *flags |= FLAG_ZERO;
+ break;
+
+ case '-':
+ *flags |= FLAG_MINUS;
+ break;
+
+ case '+':
+ *flags |= FLAG_PLUS;
+
+ case '#':
+ *flags |= FLAG_ALT;
+ break;
+
+ default:
+ return p;
+ }
+ p++;
+ }
+}
+
+/**
+ * Send the contents of the given VM's log buffer to the log, preceded by the VM
+ * ID and followed by a newline.
+ */
+void dlog_flush_vm_buffer(spci_vm_id_t id, char buffer[], size_t length)
+{
+ lock();
+
+ print_raw_string("VM ");
+ print_num(id, 10, 0, 0);
+ print_raw_string(": ");
+
+ for (size_t i = 0; i < length; ++i) {
+ dlog_putchar(buffer[i]);
+ buffer[i] = '\0';
+ }
+ dlog_putchar('\n');
+
+ unlock();
+}
+
+/**
+ * Same as "dlog", except that arguments are passed as a va_list
+ */
+void vdlog(const char *fmt, va_list args)
+{
+ const char *p;
+ size_t w;
+ int flags;
+ char buf[2];
+
+ lock();
+
+ for (p = fmt; *p; p++) {
+ switch (*p) {
+ default:
+ dlog_putchar(*p);
+ break;
+
+ case '%':
+ /* Read optional flags. */
+ flags = 0;
+ p = parse_flags(p + 1, &flags) - 1;
+
+ /* Read the minimum width, if one is specified. */
+ w = 0;
+ while (p[1] >= '0' && p[1] <= '9') {
+ w = (w * 10) + (p[1] - '0');
+ p++;
+ }
+
+ /* Read minimum width from arguments. */
+ if (w == 0 && p[1] == '*') {
+ int v = va_arg(args, int);
+
+ if (v >= 0) {
+ w = v;
+ } else {
+ w = -v;
+ flags |= FLAG_MINUS;
+ }
+ p++;
+ }
+
+ /* Handle the format specifier. */
+ switch (p[1]) {
+ case 's': {
+ char *str = va_arg(args, char *);
+
+ print_string(str, str, w, flags, ' ');
+ p++;
+ } break;
+
+ case 'd':
+ case 'i': {
+ int v = va_arg(args, int);
+
+ if (v < 0) {
+ flags |= FLAG_NEG;
+ v = -v;
+ }
+
+ print_num((size_t)v, 10, w, flags);
+ p++;
+ } break;
+
+ case 'X':
+ flags |= FLAG_UPPER;
+ print_num(va_arg(args, size_t), 16, w, flags);
+ p++;
+ break;
+
+ case 'p':
+ print_num(va_arg(args, size_t), 16,
+ sizeof(size_t) * 2, FLAG_ZERO);
+ p++;
+ break;
+
+ case 'x':
+ print_num(va_arg(args, size_t), 16, w, flags);
+ p++;
+ break;
+
+ case 'u':
+ print_num(va_arg(args, size_t), 10, w, flags);
+ p++;
+ break;
+
+ case 'o':
+ print_num(va_arg(args, size_t), 8, w, flags);
+ p++;
+ break;
+
+ case 'c':
+ buf[1] = 0;
+ buf[0] = va_arg(args, int);
+ print_string(buf, buf, w, flags, ' ');
+ p++;
+ break;
+
+ case '%':
+ break;
+
+ default:
+ dlog_putchar('%');
+ }
+
+ break;
+ }
+ }
+
+ unlock();
+}
+
+/**
+ * Prints the given format string to the debug log.
+ */
+void dlog(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vdlog(fmt, args);
+ va_end(args);
+}
diff --git a/src/fdt.c b/src/fdt.c
new file mode 100644
index 0000000..d12faf2
--- /dev/null
+++ b/src/fdt.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/fdt.h"
+
+#include <stdalign.h>
+#include <stdint.h>
+
+#include "hf/check.h"
+#include "hf/dlog.h"
+#include "hf/std.h"
+
+struct fdt_header {
+ uint32_t magic;
+ uint32_t totalsize;
+ uint32_t off_dt_struct;
+ uint32_t off_dt_strings;
+ uint32_t off_mem_rsvmap;
+ uint32_t version;
+ uint32_t last_comp_version;
+ uint32_t boot_cpuid_phys;
+ uint32_t size_dt_strings;
+ uint32_t size_dt_struct;
+};
+
+struct fdt_reserve_entry {
+ uint64_t address;
+ uint64_t size;
+};
+
+enum fdt_token {
+ FDT_BEGIN_NODE = 1,
+ FDT_END_NODE = 2,
+ FDT_PROP = 3,
+ FDT_NOP = 4,
+ FDT_END = 9,
+};
+
+struct fdt_tokenizer {
+ const char *cur;
+ const char *end;
+ const char *strs;
+};
+
+#define FDT_VERSION 17
+#define FDT_MAGIC 0xd00dfeed
+
+#define FDT_TOKEN_ALIGNMENT sizeof(uint32_t)
+
+static void fdt_tokenizer_init(struct fdt_tokenizer *t, const char *strs,
+ const char *begin, const char *end)
+{
+ t->strs = strs;
+ t->cur = begin;
+ t->end = end;
+}
+
+static void fdt_tokenizer_align(struct fdt_tokenizer *t)
+{
+ t->cur = (char *)align_up(t->cur, FDT_TOKEN_ALIGNMENT);
+}
+
+static bool fdt_tokenizer_uint32(struct fdt_tokenizer *t, uint32_t *res)
+{
+ const char *next = t->cur + sizeof(*res);
+
+ if (next > t->end) {
+ return false;
+ }
+
+ *res = be32toh(*(uint32_t *)t->cur);
+ t->cur = next;
+
+ return true;
+}
+
+static bool fdt_tokenizer_token(struct fdt_tokenizer *t, uint32_t *res)
+{
+ uint32_t v;
+
+ while (fdt_tokenizer_uint32(t, &v)) {
+ if (v != FDT_NOP) {
+ *res = v;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool fdt_tokenizer_bytes(struct fdt_tokenizer *t, const char **res,
+ size_t size)
+{
+ const char *next = t->cur + size;
+
+ if (next > t->end) {
+ return false;
+ }
+
+ *res = t->cur;
+ t->cur = next;
+ fdt_tokenizer_align(t);
+
+ return true;
+}
+
+static bool fdt_tokenizer_str(struct fdt_tokenizer *t, const char **res)
+{
+ const char *p;
+
+ for (p = t->cur; p < t->end; p++) {
+ if (!*p) {
+ /* Found the end of the string. */
+ *res = t->cur;
+ t->cur = p + 1;
+ fdt_tokenizer_align(t);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool fdt_root_node(struct fdt_node *node, const struct fdt_header *hdr)
+{
+ uint32_t max_ver;
+ uint32_t min_ver;
+ uint32_t begin = be32toh(hdr->off_dt_struct);
+ uint32_t size = be32toh(hdr->size_dt_struct);
+
+ memset_s(node, sizeof(*node), 0, sizeof(*node));
+
+ /* Check the magic number before anything else. */
+ if (hdr->magic != be32toh(FDT_MAGIC)) {
+ return false;
+ }
+
+ /* Check the version. */
+ max_ver = be32toh(hdr->version);
+ min_ver = be32toh(hdr->last_comp_version);
+ if (FDT_VERSION < min_ver || FDT_VERSION > max_ver) {
+ return false;
+ }
+
+ /* TODO: Verify that it is all within the fdt. */
+ node->begin = (const char *)hdr + begin;
+ node->end = node->begin + size;
+
+ /* TODO: Verify strings as well. */
+ node->strs = (char *)hdr + be32toh(hdr->off_dt_strings);
+
+ return true;
+}
+
+static bool fdt_next_property(struct fdt_tokenizer *t, const char **name,
+ const char **buf, uint32_t *size)
+{
+ uint32_t token;
+ uint32_t nameoff;
+
+ if (!fdt_tokenizer_token(t, &token)) {
+ return false;
+ }
+
+ if (token != FDT_PROP) {
+ /* Rewind so that caller will get the same token. */
+ t->cur -= sizeof(uint32_t);
+ return false;
+ }
+
+ if (!fdt_tokenizer_uint32(t, size) ||
+ !fdt_tokenizer_uint32(t, &nameoff) ||
+ !fdt_tokenizer_bytes(t, buf, *size)) {
+ /*
+ * Move cursor to the end so that caller won't get any new
+ * tokens.
+ */
+ t->cur = t->end;
+ return false;
+ }
+
+ /* TODO: Need to verify the strings. */
+ *name = t->strs + nameoff;
+
+ return true;
+}
+
+static bool fdt_next_subnode(struct fdt_tokenizer *t, const char **name)
+{
+ uint32_t token;
+
+ if (!fdt_tokenizer_token(t, &token)) {
+ return false;
+ }
+
+ if (token != FDT_BEGIN_NODE) {
+ /* Rewind so that caller will get the same token. */
+ t->cur -= sizeof(uint32_t);
+ return false;
+ }
+
+ if (!fdt_tokenizer_str(t, name)) {
+ /*
+ * Move cursor to the end so that caller won't get any new
+ * tokens.
+ */
+ t->cur = t->end;
+ return false;
+ }
+
+ return true;
+}
+
+static void fdt_skip_properties(struct fdt_tokenizer *t)
+{
+ const char *name;
+ const char *buf;
+ uint32_t size;
+
+ while (fdt_next_property(t, &name, &buf, &size)) {
+ /* do nothing */
+ }
+}
+
+static bool fdt_skip_node(struct fdt_tokenizer *t)
+{
+ const char *name;
+ uint32_t token;
+ size_t pending = 1;
+
+ fdt_skip_properties(t);
+
+ do {
+ while (fdt_next_subnode(t, &name)) {
+ fdt_skip_properties(t);
+ pending++;
+ }
+
+ if (!fdt_tokenizer_token(t, &token)) {
+ return false;
+ }
+
+ if (token != FDT_END_NODE) {
+ t->cur = t->end;
+ return false;
+ }
+
+ pending--;
+ } while (pending);
+
+ return true;
+}
+
+bool fdt_read_property(const struct fdt_node *node, const char *name,
+ const char **buf, uint32_t *size)
+{
+ struct fdt_tokenizer t;
+ const char *prop_name;
+
+ fdt_tokenizer_init(&t, node->strs, node->begin, node->end);
+
+ while (fdt_next_property(&t, &prop_name, buf, size)) {
+ if (!strcmp(prop_name, name)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * Helper method for parsing 32/64-bit uints from FDT data.
+ */
+bool fdt_parse_number(const char *data, uint32_t size, uint64_t *value)
+{
+ union {
+ volatile uint64_t v;
+ char a[8];
+ } t;
+
+ /* FDT values should be aligned to 32-bit boundary. */
+ CHECK(is_aligned(data, FDT_TOKEN_ALIGNMENT));
+
+ switch (size) {
+ case sizeof(uint32_t):
+ /*
+ * Assert that `data` is already sufficiently aligned to
+ * dereference as uint32_t. We cannot use static_assert()
+ * because alignof() is not an expression under ISO C11.
+ */
+ CHECK(alignof(uint32_t) <= FDT_TOKEN_ALIGNMENT);
+ *value = be32toh(*(uint32_t *)data);
+ return true;
+ case sizeof(uint64_t):
+ /*
+ * Armv8 requires `data` to be realigned to 64-bit boundary
+ * to dereference as uint64_t. May not be needed on other
+ * architectures.
+ */
+ memcpy_s(t.a, sizeof(t.a), data, sizeof(uint64_t));
+ *value = be64toh(t.v);
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool fdt_first_child(struct fdt_node *node, const char **child_name)
+{
+ struct fdt_tokenizer t;
+
+ fdt_tokenizer_init(&t, node->strs, node->begin, node->end);
+
+ fdt_skip_properties(&t);
+
+ if (!fdt_next_subnode(&t, child_name)) {
+ return false;
+ }
+
+ node->begin = t.cur;
+
+ return true;
+}
+
+bool fdt_next_sibling(struct fdt_node *node, const char **sibling_name)
+{
+ struct fdt_tokenizer t;
+
+ fdt_tokenizer_init(&t, node->strs, node->begin, node->end);
+
+ if (!fdt_skip_node(&t)) {
+ return false;
+ }
+
+ if (!fdt_next_subnode(&t, sibling_name)) {
+ return false;
+ }
+
+ node->begin = t.cur;
+
+ return true;
+}
+
+bool fdt_find_child(struct fdt_node *node, const char *child)
+{
+ struct fdt_tokenizer t;
+ const char *name;
+
+ fdt_tokenizer_init(&t, node->strs, node->begin, node->end);
+
+ fdt_skip_properties(&t);
+
+ while (fdt_next_subnode(&t, &name)) {
+ if (!strcmp(name, child)) {
+ node->begin = t.cur;
+ return true;
+ }
+
+ fdt_skip_node(&t);
+ }
+
+ return false;
+}
+
+void fdt_dump(const struct fdt_header *hdr)
+{
+ uint32_t token;
+ size_t depth = 0;
+ const char *name;
+ struct fdt_tokenizer t;
+ struct fdt_node node;
+
+ /* Traverse the whole thing. */
+ if (!fdt_root_node(&node, hdr)) {
+ dlog("FDT failed validation.\n");
+ return;
+ }
+
+ fdt_tokenizer_init(&t, node.strs, node.begin, node.end);
+
+ do {
+ while (fdt_next_subnode(&t, &name)) {
+ const char *buf;
+ uint32_t size;
+
+ dlog("%*sNew node: \"%s\"\n", 2 * depth, "", name);
+ depth++;
+ while (fdt_next_property(&t, &name, &buf, &size)) {
+ uint32_t i;
+
+ dlog("%*sproperty: \"%s\" (", 2 * depth, "",
+ name);
+ for (i = 0; i < size; i++) {
+ dlog("%s%02x", i == 0 ? "" : " ",
+ buf[i]);
+ }
+ dlog(")\n");
+ }
+ }
+
+ if (!fdt_tokenizer_token(&t, &token)) {
+ return;
+ }
+
+ if (token != FDT_END_NODE) {
+ return;
+ }
+
+ depth--;
+ } while (depth);
+
+ dlog("fdt: off_mem_rsvmap=%u\n", be32toh(hdr->off_mem_rsvmap));
+ {
+ struct fdt_reserve_entry *e =
+ (struct fdt_reserve_entry
+ *)((uintptr_t)hdr +
+ be32toh(hdr->off_mem_rsvmap));
+ while (e->address || e->size) {
+ dlog("Entry: %p (%#x bytes)\n", be64toh(e->address),
+ be64toh(e->size));
+ e++;
+ }
+ }
+}
+
+void fdt_add_mem_reservation(struct fdt_header *hdr, uint64_t addr,
+ uint64_t len)
+{
+ /* TODO: Clean this up. */
+ uint8_t *begin = (uint8_t *)hdr + be32toh(hdr->off_mem_rsvmap);
+ struct fdt_reserve_entry *e = (struct fdt_reserve_entry *)begin;
+ size_t old_size =
+ be32toh(hdr->totalsize) - be32toh(hdr->off_mem_rsvmap);
+
+ hdr->totalsize = htobe32(be32toh(hdr->totalsize) +
+ sizeof(struct fdt_reserve_entry));
+ hdr->off_dt_struct = htobe32(be32toh(hdr->off_dt_struct) +
+ sizeof(struct fdt_reserve_entry));
+ hdr->off_dt_strings = htobe32(be32toh(hdr->off_dt_strings) +
+ sizeof(struct fdt_reserve_entry));
+ memmove_s(begin + sizeof(struct fdt_reserve_entry), old_size, begin,
+ old_size);
+ e->address = htobe64(addr);
+ e->size = htobe64(len);
+}
+
+size_t fdt_header_size(void)
+{
+ return sizeof(struct fdt_header);
+}
+
+uint32_t fdt_total_size(struct fdt_header *hdr)
+{
+ return be32toh(hdr->totalsize);
+}
diff --git a/src/fdt_handler.c b/src/fdt_handler.c
new file mode 100644
index 0000000..057a2ca
--- /dev/null
+++ b/src/fdt_handler.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/fdt_handler.h"
+
+#include "hf/boot_params.h"
+#include "hf/check.h"
+#include "hf/cpu.h"
+#include "hf/dlog.h"
+#include "hf/fdt.h"
+#include "hf/layout.h"
+#include "hf/mm.h"
+#include "hf/std.h"
+
+static bool fdt_read_number(const struct fdt_node *node, const char *name,
+ uint64_t *value)
+{
+ const char *data;
+ uint32_t size;
+
+ if (!fdt_read_property(node, name, &data, &size)) {
+ return false;
+ }
+
+ switch (size) {
+ case sizeof(uint32_t):
+ case sizeof(uint64_t):
+ CHECK(fdt_parse_number(data, size, value));
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool fdt_write_number(struct fdt_node *node, const char *name,
+ uint64_t value)
+{
+ const char *data;
+ uint32_t size;
+ union {
+ volatile uint64_t v;
+ char a[8];
+ } t;
+
+ if (!fdt_read_property(node, name, &data, &size)) {
+ return false;
+ }
+
+ switch (size) {
+ case sizeof(uint32_t):
+ *(uint32_t *)data = be32toh(value);
+ break;
+
+ case sizeof(uint64_t):
+ t.v = be64toh(value);
+ memcpy_s((void *)data, size, t.a, sizeof(uint64_t));
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Finds the memory region where initrd is stored.
+ */
+bool fdt_find_initrd(const struct fdt_node *root, paddr_t *begin, paddr_t *end)
+{
+ struct fdt_node n = *root;
+ uint64_t initrd_begin;
+ uint64_t initrd_end;
+
+ if (!fdt_find_child(&n, "chosen")) {
+ dlog("Unable to find 'chosen'\n");
+ return false;
+ }
+
+ if (!fdt_read_number(&n, "linux,initrd-start", &initrd_begin)) {
+ dlog("Unable to read linux,initrd-start\n");
+ return false;
+ }
+
+ if (!fdt_read_number(&n, "linux,initrd-end", &initrd_end)) {
+ dlog("Unable to read linux,initrd-end\n");
+ return false;
+ }
+
+ *begin = pa_init(initrd_begin);
+ *end = pa_init(initrd_end);
+
+ return true;
+}
+
+bool fdt_find_cpus(const struct fdt_node *root, cpu_id_t *cpu_ids,
+ size_t *cpu_count)
+{
+ struct fdt_node n = *root;
+ const char *name;
+ uint64_t address_size;
+
+ *cpu_count = 0;
+
+ if (!fdt_find_child(&n, "cpus")) {
+ dlog("Unable to find 'cpus'\n");
+ return false;
+ }
+
+ if (fdt_read_number(&n, "#address-cells", &address_size)) {
+ address_size *= sizeof(uint32_t);
+ } else {
+ address_size = sizeof(uint32_t);
+ }
+
+ if (!fdt_first_child(&n, &name)) {
+ return false;
+ }
+
+ do {
+ const char *data;
+ uint32_t size;
+
+ if (!fdt_read_property(&n, "device_type", &data, &size) ||
+ size != sizeof("cpu") ||
+ memcmp(data, "cpu", sizeof("cpu")) != 0 ||
+ !fdt_read_property(&n, "reg", &data, &size)) {
+ continue;
+ }
+
+ /* Get all entries for this CPU. */
+ while (size >= address_size) {
+ uint64_t value;
+
+ if (*cpu_count >= MAX_CPUS) {
+ dlog("Found more than %d CPUs\n", MAX_CPUS);
+ return false;
+ }
+
+ if (!fdt_parse_number(data, address_size, &value)) {
+ dlog("Could not parse CPU id\n");
+ return false;
+ }
+ cpu_ids[(*cpu_count)++] = value;
+
+ size -= address_size;
+ data += address_size;
+ }
+ } while (fdt_next_sibling(&n, &name));
+
+ return true;
+}
+
+bool fdt_find_memory_ranges(const struct fdt_node *root, struct boot_params *p)
+{
+ struct fdt_node n = *root;
+ const char *name;
+ uint64_t address_size;
+ uint64_t size_size;
+ uint64_t entry_size;
+ size_t mem_range_index = 0;
+
+ /* Get the sizes of memory range addresses and sizes. */
+ if (fdt_read_number(&n, "#address-cells", &address_size)) {
+ address_size *= sizeof(uint32_t);
+ } else {
+ address_size = sizeof(uint32_t);
+ }
+
+ if (fdt_read_number(&n, "#size-cells", &size_size)) {
+ size_size *= sizeof(uint32_t);
+ } else {
+ size_size = sizeof(uint32_t);
+ }
+
+ entry_size = address_size + size_size;
+
+ /* Look for nodes with the device_type set to "memory". */
+ if (!fdt_first_child(&n, &name)) {
+ return false;
+ }
+
+ do {
+ const char *data;
+ uint32_t size;
+
+ if (!fdt_read_property(&n, "device_type", &data, &size) ||
+ size != sizeof("memory") ||
+ memcmp(data, "memory", sizeof("memory")) != 0 ||
+ !fdt_read_property(&n, "reg", &data, &size)) {
+ continue;
+ }
+
+ /* Traverse all memory ranges within this node. */
+ while (size >= entry_size) {
+ uintpaddr_t addr;
+ size_t len;
+
+ CHECK(fdt_parse_number(data, address_size, &addr));
+ CHECK(fdt_parse_number(data + address_size, size_size,
+ &len));
+
+ if (mem_range_index < MAX_MEM_RANGES) {
+ p->mem_ranges[mem_range_index].begin =
+ pa_init(addr);
+ p->mem_ranges[mem_range_index].end =
+ pa_init(addr + len);
+ ++mem_range_index;
+ } else {
+ dlog("Found memory range %u in FDT but only "
+ "%u supported, ignoring additional range "
+ "of size %u.\n",
+ mem_range_index, MAX_MEM_RANGES, len);
+ }
+
+ size -= entry_size;
+ data += entry_size;
+ }
+ } while (fdt_next_sibling(&n, &name));
+ p->mem_ranges_count = mem_range_index;
+
+ /* TODO: Check for "reserved-memory" nodes. */
+
+ return true;
+}
+
+struct fdt_header *fdt_map(struct mm_stage1_locked stage1_locked,
+ paddr_t fdt_addr, struct fdt_node *n,
+ struct mpool *ppool)
+{
+ struct fdt_header *fdt;
+
+ /* Map the fdt header in. */
+ fdt = mm_identity_map(stage1_locked, fdt_addr,
+ pa_add(fdt_addr, fdt_header_size()), MM_MODE_R,
+ ppool);
+ if (!fdt) {
+ dlog("Unable to map FDT header.\n");
+ return NULL;
+ }
+
+ if (!fdt_root_node(n, fdt)) {
+ dlog("FDT failed validation.\n");
+ goto fail;
+ }
+
+ /* Map the rest of the fdt in. */
+ fdt = mm_identity_map(stage1_locked, fdt_addr,
+ pa_add(fdt_addr, fdt_total_size(fdt)), MM_MODE_R,
+ ppool);
+ if (!fdt) {
+ dlog("Unable to map full FDT.\n");
+ goto fail;
+ }
+
+ return fdt;
+
+fail:
+ mm_unmap(stage1_locked, fdt_addr, pa_add(fdt_addr, fdt_header_size()),
+ ppool);
+ return NULL;
+}
+
+bool fdt_unmap(struct mm_stage1_locked stage1_locked, struct fdt_header *fdt,
+ struct mpool *ppool)
+{
+ paddr_t fdt_addr = pa_from_va(va_from_ptr(fdt));
+
+ return mm_unmap(stage1_locked, fdt_addr,
+ pa_add(fdt_addr, fdt_total_size(fdt)), ppool);
+}
+
+bool fdt_patch(struct mm_stage1_locked stage1_locked, paddr_t fdt_addr,
+ struct boot_params_update *p, struct mpool *ppool)
+{
+ struct fdt_header *fdt;
+ struct fdt_node n;
+ bool ret = false;
+ size_t i;
+
+ /* Map the fdt header in. */
+ fdt = mm_identity_map(stage1_locked, fdt_addr,
+ pa_add(fdt_addr, fdt_header_size()), MM_MODE_R,
+ ppool);
+ if (!fdt) {
+ dlog("Unable to map FDT header.\n");
+ return false;
+ }
+
+ if (!fdt_root_node(&n, fdt)) {
+ dlog("FDT failed validation.\n");
+ goto err_unmap_fdt_header;
+ }
+
+ /* Map the fdt (+ a page) in r/w mode in preparation for updating it. */
+ fdt = mm_identity_map(stage1_locked, fdt_addr,
+ pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE),
+ MM_MODE_R | MM_MODE_W, ppool);
+ if (!fdt) {
+ dlog("Unable to map FDT in r/w mode.\n");
+ goto err_unmap_fdt_header;
+ }
+
+ if (!fdt_find_child(&n, "")) {
+ dlog("Unable to find FDT root node.\n");
+ goto out_unmap_fdt;
+ }
+
+ if (!fdt_find_child(&n, "chosen")) {
+ dlog("Unable to find 'chosen'\n");
+ goto out_unmap_fdt;
+ }
+
+ /* Patch FDT to point to new ramdisk. */
+ if (!fdt_write_number(&n, "linux,initrd-start",
+ pa_addr(p->initrd_begin))) {
+ dlog("Unable to write linux,initrd-start\n");
+ goto out_unmap_fdt;
+ }
+
+ if (!fdt_write_number(&n, "linux,initrd-end", pa_addr(p->initrd_end))) {
+ dlog("Unable to write linux,initrd-end\n");
+ goto out_unmap_fdt;
+ }
+
+ /*
+ * Patch FDT to reserve hypervisor memory so the primary VM doesn't try
+ * to use it.
+ */
+ fdt_add_mem_reservation(
+ fdt, pa_addr(layout_text_begin()),
+ pa_difference(layout_text_begin(), layout_text_end()));
+ fdt_add_mem_reservation(
+ fdt, pa_addr(layout_rodata_begin()),
+ pa_difference(layout_rodata_begin(), layout_rodata_end()));
+ fdt_add_mem_reservation(
+ fdt, pa_addr(layout_data_begin()),
+ pa_difference(layout_data_begin(), layout_data_end()));
+
+ /* Patch FDT to reserve memory for secondary VMs. */
+ for (i = 0; i < p->reserved_ranges_count; ++i) {
+ fdt_add_mem_reservation(
+ fdt, pa_addr(p->reserved_ranges[i].begin),
+ pa_addr(p->reserved_ranges[i].end) -
+ pa_addr(p->reserved_ranges[i].begin));
+ }
+
+ ret = true;
+
+out_unmap_fdt:
+ /* Unmap FDT. */
+ if (!mm_unmap(stage1_locked, fdt_addr,
+ pa_add(fdt_addr, fdt_total_size(fdt) + PAGE_SIZE),
+ ppool)) {
+ dlog("Unable to unmap writable FDT.\n");
+ return false;
+ }
+ return ret;
+
+err_unmap_fdt_header:
+ mm_unmap(stage1_locked, fdt_addr, pa_add(fdt_addr, fdt_header_size()),
+ ppool);
+ return false;
+}
diff --git a/src/fdt_handler_test.cc b/src/fdt_handler_test.cc
new file mode 100644
index 0000000..acd355e
--- /dev/null
+++ b/src/fdt_handler_test.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gmock/gmock.h>
+
+extern "C" {
+#include "hf/boot_params.h"
+#include "hf/fdt_handler.h"
+#include "hf/mpool.h"
+}
+
+#include <memory>
+
+namespace
+{
+using ::testing::Eq;
+using ::testing::NotNull;
+
+constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 10;
+
+/*
+ * /dts-v1/;
+ *
+ * / {
+ * #address-cells = <2>;
+ * #size-cells = <2>;
+ *
+ * memory@0 {
+ * device_type = "memory";
+ * reg = <0x00000000 0x00000000 0x00000000 0x20000000
+ * 0x00000000 0x30000000 0x00000000 0x00010000>;
+ * };
+ * memory@1 {
+ * device_type = "memory";
+ * reg = <0x00000000 0x30020000 0x00000000 0x00010000>;
+ * };
+ *
+ * chosen {
+ * linux,initrd-start = <0x00000000>;
+ * linux,initrd-end = <0x00000000>;
+ * };
+ * };
+ *
+ * $ dtc --boot-cpu 0 --in-format dts --out-format dtb --out-version 17 test.dts
+ * | xxd -i
+ */
+
+constexpr uint8_t test_dtb[] = {
+ 0xd0, 0x0d, 0xfe, 0xed, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x00, 0x38,
+ 0x00, 0x00, 0x01, 0x30, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x11,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f,
+ 0x00, 0x00, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x01, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x40, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07,
+ 0x00, 0x00, 0x00, 0x1b, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x01, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x40, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07,
+ 0x00, 0x00, 0x00, 0x1b, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x30, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01,
+ 0x63, 0x68, 0x6f, 0x73, 0x65, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x09, 0x23, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x2d, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x00, 0x23, 0x73, 0x69, 0x7a, 0x65,
+ 0x2d, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x00, 0x64, 0x65, 0x76, 0x69, 0x63,
+ 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x00, 0x72, 0x65, 0x67, 0x00, 0x6c,
+ 0x69, 0x6e, 0x75, 0x78, 0x2c, 0x69, 0x6e, 0x69, 0x74, 0x72, 0x64, 0x2d,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x00, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x2c,
+ 0x69, 0x6e, 0x69, 0x74, 0x72, 0x64, 0x2d, 0x65, 0x6e, 0x64, 0x00};
+
+TEST(fdt, find_memory_ranges)
+{
+ struct mpool ppool;
+ std::unique_ptr<uint8_t[]> test_heap(new uint8_t[TEST_HEAP_SIZE]);
+
+ mpool_init(&ppool, sizeof(struct mm_page_table));
+ mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
+ mm_init(&ppool);
+
+ struct fdt_header *fdt;
+ struct fdt_node n;
+ struct boot_params params = {};
+
+ struct mm_stage1_locked mm_stage1_locked = mm_lock_stage1();
+ fdt = fdt_map(mm_stage1_locked, pa_init((uintpaddr_t)&test_dtb), &n,
+ &ppool);
+ ASSERT_THAT(fdt, NotNull());
+ ASSERT_TRUE(fdt_find_child(&n, ""));
+ fdt_find_memory_ranges(&n, ¶ms);
+ ASSERT_TRUE(fdt_unmap(mm_stage1_locked, fdt, &ppool));
+ mm_unlock_stage1(&mm_stage1_locked);
+
+ EXPECT_THAT(params.mem_ranges_count, Eq(3));
+ EXPECT_THAT(pa_addr(params.mem_ranges[0].begin), Eq(0x00000000));
+ EXPECT_THAT(pa_addr(params.mem_ranges[0].end), Eq(0x20000000));
+ EXPECT_THAT(pa_addr(params.mem_ranges[1].begin), Eq(0x30000000));
+ EXPECT_THAT(pa_addr(params.mem_ranges[1].end), Eq(0x30010000));
+ EXPECT_THAT(pa_addr(params.mem_ranges[2].begin), Eq(0x30020000));
+ EXPECT_THAT(pa_addr(params.mem_ranges[2].end), Eq(0x30030000));
+}
+
+} /* namespace */
diff --git a/src/fdt_test.cc b/src/fdt_test.cc
new file mode 100644
index 0000000..0ee534d
--- /dev/null
+++ b/src/fdt_test.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+extern "C" {
+
+#include "hf/fdt.h"
+}
+
+#include <gmock/gmock.h>
+
+namespace
+{
+using ::testing::Eq;
+
+/*
+ * /dts-v1/;
+ *
+ * / {
+ * model = "SomeModel";
+ * compatible = "Nothing";
+ * #address-cells = <2>;
+ * #size-cells = <2>;
+ *
+ * memory@0 {
+ * device_type = "memory";
+ * reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
+ * };
+ *
+ * cpus {
+ * #address-cells = <1>;
+ * #size-cells = <0>;
+ * };
+ *
+ * };
+ *
+ * $ dtc --boot-cpu 0 --in-format dts --out-format dtb --out-version 17 test.dts
+ * | xxd -i
+ */
+
+const uint8_t test_dtb[] = {
+ 0xd0, 0x0d, 0xfe, 0xed, 0x00, 0x00, 0x01, 0x44, 0x00, 0x00, 0x00, 0x38,
+ 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x11,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c,
+ 0x00, 0x00, 0x00, 0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x53, 0x6f, 0x6d, 0x65, 0x4d, 0x6f, 0x64, 0x65,
+ 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x06, 0x4e, 0x6f, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x00,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x11,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01,
+ 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x40, 0x30, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x2c,
+ 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x63, 0x70, 0x75, 0x73,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x09,
+ 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x00, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74,
+ 0x69, 0x62, 0x6c, 0x65, 0x00, 0x23, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x2d, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x00, 0x23, 0x73, 0x69, 0x7a,
+ 0x65, 0x2d, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x00, 0x64, 0x65, 0x76, 0x69,
+ 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x00, 0x72, 0x65, 0x67, 0x00};
+
+TEST(fdt, total_size)
+{
+ EXPECT_THAT(fdt_total_size((struct fdt_header *)&test_dtb[0]),
+ Eq(sizeof(test_dtb)));
+}
+
+} /* namespace */
diff --git a/src/init.c b/src/init.c
new file mode 100644
index 0000000..897039f
--- /dev/null
+++ b/src/init.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/init.h"
+
+#include <stdalign.h>
+#include <stddef.h>
+
+#include "hf/api.h"
+#include "hf/boot_flow.h"
+#include "hf/boot_params.h"
+#include "hf/cpio.h"
+#include "hf/cpu.h"
+#include "hf/dlog.h"
+#include "hf/fdt_handler.h"
+#include "hf/load.h"
+#include "hf/mm.h"
+#include "hf/mpool.h"
+#include "hf/panic.h"
+#include "hf/plat/boot_flow.h"
+#include "hf/plat/console.h"
+#include "hf/plat/iommu.h"
+#include "hf/std.h"
+#include "hf/vm.h"
+
+#include "vmapi/hf/call.h"
+
+alignas(alignof(
+ struct mm_page_table)) char ptable_buf[sizeof(struct mm_page_table) *
+ HEAP_PAGES];
+
+static struct mpool ppool;
+
+/**
+ * Performs one-time initialisation of memory management for the hypervisor.
+ *
+ * This is the only C code entry point called with MMU and caching disabled. The
+ * page table returned is used to set up the MMU and caches for all subsequent
+ * code.
+ */
+void one_time_init_mm(void)
+{
+ /* Make sure the console is initialised before calling dlog. */
+ plat_console_init();
+
+ dlog("Initialising hafnium\n");
+
+ mpool_init(&ppool, sizeof(struct mm_page_table));
+ mpool_add_chunk(&ppool, ptable_buf, sizeof(ptable_buf));
+
+ if (!mm_init(&ppool)) {
+ panic("mm_init failed");
+ }
+}
+
+/**
+ * Performs one-time initialisation of the hypervisor.
+ */
+void one_time_init(void)
+{
+ struct fdt_header *fdt;
+ struct fdt_node fdt_root;
+ struct manifest manifest;
+ struct boot_params params;
+ struct boot_params_update update;
+ struct memiter cpio;
+ void *initrd;
+ size_t i;
+ struct mm_stage1_locked mm_stage1_locked;
+
+ arch_one_time_init();
+
+ /* Enable locks now that mm is initialised. */
+ dlog_enable_lock();
+ mpool_enable_locks();
+
+ mm_stage1_locked = mm_lock_stage1();
+
+ fdt = fdt_map(mm_stage1_locked, plat_boot_flow_get_fdt_addr(),
+ &fdt_root, &ppool);
+ if (fdt == NULL) {
+ panic("Unable to map FDT.\n");
+ }
+
+ if (!fdt_find_child(&fdt_root, "")) {
+ panic("Unable to find FDT root node.\n");
+ }
+
+ if (!boot_flow_init(&fdt_root, &manifest, ¶ms)) {
+ panic("Could not parse data from FDT.");
+ }
+
+ if (!plat_iommu_init(&fdt_root, mm_stage1_locked, &ppool)) {
+ panic("Could not initialize IOMMUs.");
+ }
+
+ if (!fdt_unmap(mm_stage1_locked, fdt, &ppool)) {
+ panic("Unable to unmap FDT.\n");
+ }
+
+ cpu_module_init(params.cpu_ids, params.cpu_count);
+
+ for (i = 0; i < params.mem_ranges_count; ++i) {
+ dlog("Memory range: %#x - %#x\n",
+ pa_addr(params.mem_ranges[i].begin),
+ pa_addr(params.mem_ranges[i].end) - 1);
+ }
+
+ dlog("Ramdisk range: %#x - %#x\n", pa_addr(params.initrd_begin),
+ pa_addr(params.initrd_end) - 1);
+
+ /* Map initrd in, and initialise cpio parser. */
+ initrd = mm_identity_map(mm_stage1_locked, params.initrd_begin,
+ params.initrd_end, MM_MODE_R, &ppool);
+ if (!initrd) {
+ panic("Unable to map initrd.");
+ }
+
+ memiter_init(&cpio, initrd,
+ pa_difference(params.initrd_begin, params.initrd_end));
+
+ /* Load all VMs. */
+ update.reserved_ranges_count = 0;
+ if (!load_vms(mm_stage1_locked, &manifest, &cpio, ¶ms, &update,
+ &ppool)) {
+ panic("Unable to load VMs.");
+ }
+
+ if (!boot_flow_update(mm_stage1_locked, &manifest, &update, &cpio,
+ &ppool)) {
+ panic("Unable to update boot flow.");
+ }
+
+ mm_defrag(mm_stage1_locked, &ppool);
+ mm_unlock_stage1(&mm_stage1_locked);
+
+ /* Initialise the API page pool. ppool will be empty from now on. */
+ api_init(&ppool);
+
+ /* Enable TLB invalidation for VM page table updates. */
+ mm_vm_enable_invalidation();
+
+ dlog("Hafnium initialisation completed\n");
+}
diff --git a/src/iommu/BUILD.gn b/src/iommu/BUILD.gn
new file mode 100644
index 0000000..00004f3
--- /dev/null
+++ b/src/iommu/BUILD.gn
@@ -0,0 +1,19 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source_set("absent") {
+ sources = [
+ "absent.c",
+ ]
+}
diff --git a/src/iommu/absent.c b/src/iommu/absent.c
new file mode 100644
index 0000000..c89e9bb
--- /dev/null
+++ b/src/iommu/absent.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/plat/iommu.h"
+
+bool plat_iommu_init(const struct fdt_node *fdt_root,
+ struct mm_stage1_locked stage1_locked, struct mpool *ppool)
+{
+ (void)fdt_root;
+ (void)stage1_locked;
+ (void)ppool;
+
+ return true;
+}
+
+void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin,
+ paddr_t end, uint32_t mode)
+{
+ (void)vm_locked;
+ (void)begin;
+ (void)end;
+ (void)mode;
+}
diff --git a/src/layout.c b/src/layout.c
new file mode 100644
index 0000000..edb583d
--- /dev/null
+++ b/src/layout.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/layout.h"
+
+#include "hf/std.h"
+
+/**
+ * Get the address the .text section begins at.
+ */
+paddr_t layout_text_begin(void)
+{
+ extern uint8_t text_begin[];
+
+ return pa_init((uintpaddr_t)text_begin);
+}
+
+/**
+ * Get the address the .text section ends at.
+ */
+paddr_t layout_text_end(void)
+{
+ extern uint8_t text_end[];
+
+ return pa_init((uintpaddr_t)text_end);
+}
+
+/**
+ * Get the address the .rodata section begins at.
+ */
+paddr_t layout_rodata_begin(void)
+{
+ extern uint8_t rodata_begin[];
+
+ return pa_init((uintpaddr_t)rodata_begin);
+}
+
+/**
+ * Get the address the .rodata section ends at.
+ */
+paddr_t layout_rodata_end(void)
+{
+ extern uint8_t rodata_end[];
+
+ return pa_init((uintpaddr_t)rodata_end);
+}
+
+/**
+ * Get the address the .data section begins at.
+ */
+paddr_t layout_data_begin(void)
+{
+ extern uint8_t data_begin[];
+
+ return pa_init((uintpaddr_t)data_begin);
+}
+
+/**
+ * Get the address the .data section ends at.
+ */
+paddr_t layout_data_end(void)
+{
+ extern uint8_t data_end[];
+
+ return pa_init((uintpaddr_t)data_end);
+}
+
+/**
+ * Get the address the .initrd section begins at.
+ */
+paddr_t layout_initrd_begin(void)
+{
+ extern uint8_t initrd_begin[];
+
+ return pa_init((uintpaddr_t)initrd_begin);
+}
+
+/**
+ * Get the address the .initrd section ends at.
+ */
+paddr_t layout_initrd_end(void)
+{
+ extern uint8_t initrd_end[];
+
+ return pa_init((uintpaddr_t)initrd_end);
+}
+
+/**
+ * Get the address the .fdt section begins at.
+ */
+paddr_t layout_fdt_begin(void)
+{
+ extern uint8_t fdt_begin[];
+
+ return pa_init((uintpaddr_t)fdt_begin);
+}
+
+/**
+ * Get the address the .fdt section ends at.
+ */
+paddr_t layout_fdt_end(void)
+{
+ extern uint8_t fdt_end[];
+
+ return pa_init((uintpaddr_t)fdt_end);
+}
+
+/**
+ * Get the address the loaded image ends at.
+ */
+paddr_t layout_image_end(void)
+{
+ extern uint8_t image_end[];
+
+ return pa_init((uintpaddr_t)image_end);
+}
+
+/**
+ * Get the address to load the primary VM at.
+ *
+ * This is placed just after the image.
+ */
+paddr_t layout_primary_begin(void)
+{
+ paddr_t image_end = layout_image_end();
+
+ /*
+ * Linux usually expects to be loaded at offset 0x80000 into a 2MB
+ * aligned address.
+ * TODO: This is a hack, and isn't always correct. We should really read
+ * the alignment from the header of the binary, or have a bootloader
+ * within the VM do so.
+ */
+ return pa_init(align_up(pa_addr(image_end), 0x200000) + 0x80000);
+}
diff --git a/src/layout_fake.c b/src/layout_fake.c
new file mode 100644
index 0000000..542f92a
--- /dev/null
+++ b/src/layout_fake.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/layout.h"
+
+paddr_t layout_text_begin(void)
+{
+ return pa_init(0);
+}
+
+paddr_t layout_text_end(void)
+{
+ return pa_init(100);
+}
+
+paddr_t layout_rodata_begin(void)
+{
+ return pa_init(200);
+}
+
+paddr_t layout_rodata_end(void)
+{
+ return pa_init(300);
+}
+
+paddr_t layout_data_begin(void)
+{
+ return pa_init(400);
+}
+
+paddr_t layout_data_end(void)
+{
+ return pa_init(500);
+}
+
+paddr_t layout_image_end(void)
+{
+ return pa_init(600);
+}
+
+paddr_t layout_primary_begin(void)
+{
+ return pa_init(0x80000);
+}
diff --git a/src/load.c b/src/load.c
new file mode 100644
index 0000000..d69b36d
--- /dev/null
+++ b/src/load.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/load.h"
+
+#include <stdbool.h>
+
+#include "hf/arch/vm.h"
+
+#include "hf/api.h"
+#include "hf/boot_params.h"
+#include "hf/check.h"
+#include "hf/dlog.h"
+#include "hf/layout.h"
+#include "hf/memiter.h"
+#include "hf/mm.h"
+#include "hf/plat/console.h"
+#include "hf/static_assert.h"
+#include "hf/std.h"
+#include "hf/vm.h"
+
+#include "vmapi/hf/call.h"
+
+/**
+ * Copies data to an unmapped location by mapping it for write, copying the
+ * data, then unmapping it.
+ *
+ * The data is written so that it is available to all cores with the cache
+ * disabled. When switching to the partitions, the caching is initially disabled
+ * so the data must be available without the cache.
+ */
+static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
+ struct memiter *from_it, struct mpool *ppool)
+{
+ const void *from = memiter_base(from_it);
+ size_t size = memiter_size(from_it);
+ paddr_t to_end = pa_add(to, size);
+ void *ptr;
+
+ ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
+ if (!ptr) {
+ return false;
+ }
+
+ memcpy_s(ptr, size, from, size);
+ arch_mm_flush_dcache(ptr, size);
+
+ CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
+
+ return true;
+}
+
+static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
+ paddr_t end, const struct manifest_vm *manifest_vm,
+ const struct memiter *cpio, struct mpool *ppool)
+{
+ struct memiter kernel;
+
+ if (string_is_empty(&manifest_vm->kernel_filename)) {
+ /* This signals the kernel has been preloaded. */
+ return true;
+ }
+
+ if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
+ dlog("Could not find kernel file \"%s\".\n",
+ string_data(&manifest_vm->kernel_filename));
+ return false;
+ }
+
+ if (pa_difference(begin, end) < memiter_size(&kernel)) {
+ dlog("Kernel is larger than available memory.\n");
+ return false;
+ }
+
+ if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
+ dlog("Unable to copy kernel.\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Performs VM loading activities that are common between the primary and
+ * secondaries.
+ */
+static bool load_common(const struct manifest_vm *manifest_vm, struct vm *vm)
+{
+ vm->smc_whitelist = manifest_vm->smc_whitelist;
+
+ /* Initialize architecture-specific features. */
+ arch_vm_features_set(vm);
+
+ return true;
+}
+
+/**
+ * Loads the primary VM.
+ */
+static bool load_primary(struct mm_stage1_locked stage1_locked,
+ const struct manifest_vm *manifest_vm,
+ const struct memiter *cpio,
+ const struct boot_params *params, struct mpool *ppool)
+{
+ paddr_t primary_begin = layout_primary_begin();
+ struct vm *vm;
+ struct vm_locked vm_locked;
+ struct vcpu_locked vcpu_locked;
+ size_t i;
+ bool ret;
+
+ /*
+ * TODO: This bound is currently meaningless but will be addressed when
+ * the manifest specifies the load address.
+ */
+ paddr_t primary_end = pa_add(primary_begin, 0x8000000);
+
+ if (!load_kernel(stage1_locked, primary_begin, primary_end, manifest_vm,
+ cpio, ppool)) {
+ dlog("Unable to load primary kernel.");
+ return false;
+ }
+
+ if (!vm_init(MAX_CPUS, ppool, &vm)) {
+ dlog("Unable to initialise primary vm\n");
+ return false;
+ }
+
+ if (vm->id != HF_PRIMARY_VM_ID) {
+ dlog("Primary vm was not given correct id\n");
+ return false;
+ }
+
+ vm_locked = vm_lock(vm);
+
+ if (!load_common(manifest_vm, vm)) {
+ ret = false;
+ goto out;
+ }
+
+ /*
+ * Map 1TB of address space as device memory to, most likely, make all
+ * devices available to the primary VM.
+ *
+ * TODO: We should do a whitelist rather than a blacklist.
+ */
+ if (!vm_identity_map(vm_locked, pa_init(0),
+ pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
+ MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
+ dlog("Unable to initialise address space for primary vm\n");
+ ret = false;
+ goto out;
+ }
+
+ /* Map normal memory as such to permit caching, execution, etc. */
+ for (i = 0; i < params->mem_ranges_count; ++i) {
+ if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
+ params->mem_ranges[i].end,
+ MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
+ NULL)) {
+ dlog("Unable to initialise memory for primary vm\n");
+ ret = false;
+ goto out;
+ }
+ }
+
+ if (!vm_unmap_hypervisor(vm_locked, ppool)) {
+ dlog("Unable to unmap hypervisor from primary vm\n");
+ ret = false;
+ goto out;
+ }
+
+ vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
+ vcpu_on(vcpu_locked, ipa_from_pa(primary_begin), params->kernel_arg);
+ vcpu_unlock(&vcpu_locked);
+ ret = true;
+
+out:
+ vm_unlock(&vm_locked);
+
+ return ret;
+}
+
+/*
+ * Loads a secondary VM.
+ */
+static bool load_secondary(struct mm_stage1_locked stage1_locked,
+ paddr_t mem_begin, paddr_t mem_end,
+ const struct manifest_vm *manifest_vm,
+ const struct memiter *cpio, struct mpool *ppool)
+{
+ struct vm *vm;
+ struct vm_locked vm_locked;
+ struct vcpu *vcpu;
+ ipaddr_t secondary_entry;
+ bool ret;
+
+ if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm, cpio,
+ ppool)) {
+ dlog("Unable to load kernel.\n");
+ return false;
+ }
+
+ if (!vm_init(manifest_vm->secondary.vcpu_count, ppool, &vm)) {
+ dlog("Unable to initialise VM.\n");
+ return false;
+ }
+
+ if (!load_common(manifest_vm, vm)) {
+ return false;
+ }
+
+ vm_locked = vm_lock(vm);
+
+ /* Grant the VM access to the memory. */
+ if (!vm_identity_map(vm_locked, mem_begin, mem_end,
+ MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
+ &secondary_entry)) {
+ dlog("Unable to initialise memory.\n");
+ ret = false;
+ goto out;
+ }
+
+ dlog("Loaded with %u vCPUs, entry at %#x.\n",
+ manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
+
+ vcpu = vm_get_vcpu(vm, 0);
+ vcpu_secondary_reset_and_start(vcpu, secondary_entry,
+ pa_difference(mem_begin, mem_end));
+ ret = true;
+
+out:
+ vm_unlock(&vm_locked);
+
+ return ret;
+}
+
+/**
+ * Try to find a memory range of the given size within the given ranges, and
+ * remove it from them. Return true on success, or false if no large enough
+ * contiguous range is found.
+ */
+static bool carve_out_mem_range(struct mem_range *mem_ranges,
+ size_t mem_ranges_count, uint64_t size_to_find,
+ paddr_t *found_begin, paddr_t *found_end)
+{
+ size_t i;
+
+ /*
+ * TODO(b/116191358): Consider being cleverer about how we pack VMs
+ * together, with a non-greedy algorithm.
+ */
+ for (i = 0; i < mem_ranges_count; ++i) {
+ if (size_to_find <=
+ pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
+ /*
+ * This range is big enough, take some of it from the
+ * end and reduce its size accordingly.
+ */
+ *found_end = mem_ranges[i].end;
+ *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
+ size_to_find);
+ mem_ranges[i].end = *found_begin;
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * Given arrays of memory ranges before and after memory was removed for
+ * secondary VMs, add the difference to the reserved ranges of the given update.
+ * Return true on success, or false if there would be more than MAX_MEM_RANGES
+ * reserved ranges after adding the new ones.
+ * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
+ */
+static bool update_reserved_ranges(struct boot_params_update *update,
+ const struct mem_range *before,
+ const struct mem_range *after,
+ size_t mem_ranges_count)
+{
+ size_t i;
+
+ for (i = 0; i < mem_ranges_count; ++i) {
+ if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
+ if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
+ dlog("Too many reserved ranges after loading "
+ "secondary VMs.\n");
+ return false;
+ }
+ update->reserved_ranges[update->reserved_ranges_count]
+ .begin = before[i].begin;
+ update->reserved_ranges[update->reserved_ranges_count]
+ .end = after[i].begin;
+ update->reserved_ranges_count++;
+ }
+ if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
+ if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
+ dlog("Too many reserved ranges after loading "
+ "secondary VMs.\n");
+ return false;
+ }
+ update->reserved_ranges[update->reserved_ranges_count]
+ .begin = after[i].end;
+ update->reserved_ranges[update->reserved_ranges_count]
+ .end = before[i].end;
+ update->reserved_ranges_count++;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Loads alls VMs from the manifest.
+ */
+bool load_vms(struct mm_stage1_locked stage1_locked,
+ const struct manifest *manifest, const struct memiter *cpio,
+ const struct boot_params *params,
+ struct boot_params_update *update, struct mpool *ppool)
+{
+ struct vm *primary;
+ struct mem_range mem_ranges_available[MAX_MEM_RANGES];
+ struct vm_locked primary_vm_locked;
+ size_t i;
+ bool success = true;
+
+ if (!load_primary(stage1_locked, &manifest->vm[HF_PRIMARY_VM_INDEX],
+ cpio, params, ppool)) {
+ dlog("Unable to load primary VM.\n");
+ return false;
+ }
+
+ static_assert(
+ sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
+ "mem_range arrays must be the same size for memcpy.");
+ static_assert(sizeof(mem_ranges_available) < 500,
+ "This will use too much stack, either make "
+ "MAX_MEM_RANGES smaller or change this.");
+ memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
+ params->mem_ranges, sizeof(params->mem_ranges));
+
+ /* Round the last addresses down to the page size. */
+ for (i = 0; i < params->mem_ranges_count; ++i) {
+ mem_ranges_available[i].end = pa_init(align_down(
+ pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
+ }
+
+ primary = vm_find(HF_PRIMARY_VM_ID);
+ primary_vm_locked = vm_lock(primary);
+
+ for (i = 0; i < manifest->vm_count; ++i) {
+ const struct manifest_vm *manifest_vm = &manifest->vm[i];
+ spci_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
+ uint64_t mem_size;
+ paddr_t secondary_mem_begin;
+ paddr_t secondary_mem_end;
+
+ if (vm_id == HF_PRIMARY_VM_ID) {
+ continue;
+ }
+
+ dlog("Loading VM%d: %s.\n", (int)vm_id,
+ manifest_vm->debug_name);
+
+ mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
+ if (!carve_out_mem_range(mem_ranges_available,
+ params->mem_ranges_count, mem_size,
+ &secondary_mem_begin,
+ &secondary_mem_end)) {
+ dlog("Not enough memory (%u bytes).\n", mem_size);
+ continue;
+ }
+
+ if (!load_secondary(stage1_locked, secondary_mem_begin,
+ secondary_mem_end, manifest_vm, cpio,
+ ppool)) {
+ dlog("Unable to load VM.\n");
+ continue;
+ }
+
+ /* Deny the primary VM access to this memory. */
+ if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
+ secondary_mem_end, ppool)) {
+ dlog("Unable to unmap secondary VM from primary VM.\n");
+ success = false;
+ break;
+ }
+ }
+
+ vm_unlock(&primary_vm_locked);
+
+ if (!success) {
+ return false;
+ }
+
+ /*
+ * Add newly reserved areas to update params by looking at the
+ * difference between the available ranges from the original params and
+ * the updated mem_ranges_available. We assume that the number and order
+ * of available ranges is the same, i.e. we don't remove any ranges
+ * above only make them smaller.
+ */
+ return update_reserved_ranges(update, params->mem_ranges,
+ mem_ranges_available,
+ params->mem_ranges_count);
+}
diff --git a/src/main.c b/src/main.c
new file mode 100644
index 0000000..b12032e
--- /dev/null
+++ b/src/main.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/cpu.h"
+#include "hf/vm.h"
+
+/**
+ * The entry point of CPUs when they are turned on. It is supposed to initialise
+ * all state and return the first vCPU to run.
+ */
+struct vcpu *cpu_main(struct cpu *c)
+{
+ struct vcpu *vcpu;
+
+ vcpu = vm_get_vcpu(vm_find(HF_PRIMARY_VM_ID), cpu_index(c));
+ vcpu->cpu = c;
+
+ arch_cpu_init();
+
+ /* Reset the registers to give a clean start for the primary's vCPU. */
+ arch_regs_reset(vcpu);
+
+ return vcpu;
+}
diff --git a/src/manifest.c b/src/manifest.c
new file mode 100644
index 0000000..c3b5566
--- /dev/null
+++ b/src/manifest.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/manifest.h"
+
+#include "hf/addr.h"
+#include "hf/check.h"
+#include "hf/dlog.h"
+#include "hf/fdt.h"
+#include "hf/static_assert.h"
+#include "hf/std.h"
+
+#define TRY(expr) \
+ do { \
+ enum manifest_return_code ret_code = (expr); \
+ if (ret_code != MANIFEST_SUCCESS) { \
+ return ret_code; \
+ } \
+ } while (0)
+
+#define VM_NAME_BUF_SIZE (2 + 5 + 1) /* "vm" + number + null terminator */
+static_assert(MAX_VMS <= 99999, "Insufficient VM_NAME_BUF_SIZE");
+
+/**
+ * Generates a string with the two letters "vm" followed by an integer.
+ * Assumes `buf` is of size VM_NAME_BUF_SIZE.
+ */
+static const char *generate_vm_node_name(char *buf, spci_vm_id_t vm_id)
+{
+ static const char *digits = "0123456789";
+ char *ptr = buf + VM_NAME_BUF_SIZE;
+
+ *(--ptr) = '\0';
+ do {
+ *(--ptr) = digits[vm_id % 10];
+ vm_id /= 10;
+ } while (vm_id);
+ *(--ptr) = 'm';
+ *(--ptr) = 'v';
+
+ return ptr;
+}
+
+/**
+ * Read a boolean property: true if present; false if not. If present, the value
+ * of the property must be empty else it is considered malformed.
+ */
+static enum manifest_return_code read_bool(const struct fdt_node *node,
+ const char *property, bool *out)
+{
+ const char *data;
+ uint32_t size;
+ bool present = fdt_read_property(node, property, &data, &size);
+
+ if (present && size != 0) {
+ return MANIFEST_ERROR_MALFORMED_BOOLEAN;
+ }
+
+ *out = present;
+ return MANIFEST_SUCCESS;
+}
+
+static enum manifest_return_code read_string(const struct fdt_node *node,
+ const char *property,
+ struct string *out)
+{
+ const char *data;
+ uint32_t size;
+
+ if (!fdt_read_property(node, property, &data, &size)) {
+ return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
+ }
+
+ switch (string_init(out, data, size)) {
+ case STRING_SUCCESS:
+ return MANIFEST_SUCCESS;
+ case STRING_ERROR_INVALID_INPUT:
+ return MANIFEST_ERROR_MALFORMED_STRING;
+ case STRING_ERROR_TOO_LONG:
+ return MANIFEST_ERROR_STRING_TOO_LONG;
+ }
+}
+
+static enum manifest_return_code read_optional_string(
+ const struct fdt_node *node, const char *property, struct string *out)
+{
+ enum manifest_return_code ret;
+
+ ret = read_string(node, property, out);
+ if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
+ string_init_empty(out);
+ ret = MANIFEST_SUCCESS;
+ }
+ return ret;
+}
+
+static enum manifest_return_code read_uint64(const struct fdt_node *node,
+ const char *property,
+ uint64_t *out)
+{
+ const char *data;
+ uint32_t size;
+
+ if (!fdt_read_property(node, property, &data, &size)) {
+ return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
+ }
+
+ if (!fdt_parse_number(data, size, out)) {
+ return MANIFEST_ERROR_MALFORMED_INTEGER;
+ }
+
+ return MANIFEST_SUCCESS;
+}
+
+static enum manifest_return_code read_uint16(const struct fdt_node *node,
+ const char *property,
+ uint16_t *out)
+{
+ uint64_t value;
+
+ TRY(read_uint64(node, property, &value));
+
+ if (value > UINT16_MAX) {
+ return MANIFEST_ERROR_INTEGER_OVERFLOW;
+ }
+
+ *out = (uint16_t)value;
+ return MANIFEST_SUCCESS;
+}
+
+struct uint32list_iter {
+ struct memiter mem_it;
+};
+
+static enum manifest_return_code read_optional_uint32list(
+ const struct fdt_node *node, const char *property,
+ struct uint32list_iter *out)
+{
+ const char *data;
+ uint32_t size;
+
+ if (!fdt_read_property(node, property, &data, &size)) {
+ memiter_init(&out->mem_it, NULL, 0);
+ return MANIFEST_SUCCESS;
+ }
+
+ if ((size % sizeof(uint32_t)) != 0) {
+ return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
+ }
+
+ memiter_init(&out->mem_it, data, size);
+ return MANIFEST_SUCCESS;
+}
+
+/**
+ * Represents the value of property whose type is a list of strings. These are
+ * encoded as one contiguous byte buffer with NULL-separated entries.
+ */
+struct stringlist_iter {
+ struct memiter mem_it;
+};
+
+static enum manifest_return_code read_stringlist(const struct fdt_node *node,
+ const char *property,
+ struct stringlist_iter *out)
+{
+ const char *data;
+ uint32_t size;
+
+ if (!fdt_read_property(node, property, &data, &size)) {
+ return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
+ }
+
+ /*
+ * Require that the value ends with a NULL terminator. Other NULL
+ * characters separate the string list entries.
+ */
+ if (data[size - 1] != '\0') {
+ return MANIFEST_ERROR_MALFORMED_STRING_LIST;
+ }
+
+ memiter_init(&out->mem_it, data, size - 1);
+ return MANIFEST_SUCCESS;
+}
+
+static bool uint32list_has_next(const struct uint32list_iter *list)
+{
+ return memiter_size(&list->mem_it) > 0;
+}
+
+static uint32_t uint32list_get_next(struct uint32list_iter *list)
+{
+ const char *mem_base = memiter_base(&list->mem_it);
+ uint64_t num;
+
+ CHECK(uint32list_has_next(list));
+
+ if (!fdt_parse_number(mem_base, sizeof(uint32_t), &num)) {
+ return MANIFEST_ERROR_MALFORMED_INTEGER;
+ }
+
+ memiter_advance(&list->mem_it, sizeof(uint32_t));
+ return num;
+}
+
+static bool stringlist_has_next(const struct stringlist_iter *list)
+{
+ return memiter_size(&list->mem_it) > 0;
+}
+
+static void stringlist_get_next(struct stringlist_iter *list,
+ struct memiter *out)
+{
+ const char *mem_base = memiter_base(&list->mem_it);
+ size_t mem_size = memiter_size(&list->mem_it);
+ const char *null_term;
+
+ CHECK(stringlist_has_next(list));
+
+ null_term = memchr(mem_base, '\0', mem_size);
+ if (null_term == NULL) {
+ /*
+ * NULL terminator not found, this is the last entry.
+ * Set entry memiter to the entire byte range and advance list
+ * memiter to the end of the byte range.
+ */
+ memiter_init(out, mem_base, mem_size);
+ memiter_advance(&list->mem_it, mem_size);
+ } else {
+ /*
+ * Found NULL terminator. Set entry memiter to byte range
+ * [base, null) and move list memiter past the terminator.
+ */
+ size_t entry_size = null_term - mem_base;
+
+ memiter_init(out, mem_base, entry_size);
+ memiter_advance(&list->mem_it, entry_size + 1);
+ }
+}
+
+static bool stringlist_contains(const struct stringlist_iter *list,
+ const char *str)
+{
+ struct stringlist_iter it = *list;
+ struct memiter entry;
+
+ while (stringlist_has_next(&it)) {
+ stringlist_get_next(&it, &entry);
+ if (memiter_iseq(&entry, str)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static enum manifest_return_code parse_vm(struct fdt_node *node,
+ struct manifest_vm *vm,
+ spci_vm_id_t vm_id)
+{
+ struct uint32list_iter smcs;
+
+ TRY(read_string(node, "debug_name", &vm->debug_name));
+ TRY(read_optional_string(node, "kernel_filename",
+ &vm->kernel_filename));
+
+ TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
+ while (uint32list_has_next(&smcs) &&
+ vm->smc_whitelist.smc_count < MAX_SMCS) {
+ vm->smc_whitelist.smcs[vm->smc_whitelist.smc_count++] =
+ uint32list_get_next(&smcs);
+ }
+
+ if (uint32list_has_next(&smcs)) {
+ dlog("%s SMC whitelist too long.\n", vm->debug_name);
+ }
+
+ TRY(read_bool(node, "smc_whitelist_permissive",
+ &vm->smc_whitelist.permissive));
+
+ if (vm_id == HF_PRIMARY_VM_ID) {
+ TRY(read_optional_string(node, "ramdisk_filename",
+ &vm->primary.ramdisk_filename));
+ } else {
+ TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
+ TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
+ }
+ return MANIFEST_SUCCESS;
+}
+
+/**
+ * Parse manifest from FDT.
+ */
+enum manifest_return_code manifest_init(struct manifest *manifest,
+ const struct fdt_node *fdt_root)
+{
+ char vm_name_buf[VM_NAME_BUF_SIZE];
+ struct fdt_node hyp_node;
+ struct stringlist_iter compatible_list;
+ size_t i = 0;
+ bool found_primary_vm = false;
+
+ memset_s(manifest, sizeof(*manifest), 0, sizeof(*manifest));
+
+ /* Find hypervisor node. */
+ hyp_node = *fdt_root;
+ if (!fdt_find_child(&hyp_node, "hypervisor")) {
+ return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
+ }
+
+ /* Check "compatible" property. */
+ TRY(read_stringlist(&hyp_node, "compatible", &compatible_list));
+ if (!stringlist_contains(&compatible_list, "hafnium,hafnium")) {
+ return MANIFEST_ERROR_NOT_COMPATIBLE;
+ }
+
+ /* Iterate over reserved VM IDs and check no such nodes exist. */
+ for (i = 0; i < HF_VM_ID_OFFSET; i++) {
+ spci_vm_id_t vm_id = (spci_vm_id_t)i;
+ struct fdt_node vm_node = hyp_node;
+ const char *vm_name = generate_vm_node_name(vm_name_buf, vm_id);
+
+ if (fdt_find_child(&vm_node, vm_name)) {
+ return MANIFEST_ERROR_RESERVED_VM_ID;
+ }
+ }
+
+ /* Iterate over VM nodes until we find one that does not exist. */
+ for (i = 0; i <= MAX_VMS; ++i) {
+ spci_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
+ struct fdt_node vm_node = hyp_node;
+ const char *vm_name = generate_vm_node_name(vm_name_buf, vm_id);
+
+ if (!fdt_find_child(&vm_node, vm_name)) {
+ break;
+ }
+
+ if (i == MAX_VMS) {
+ return MANIFEST_ERROR_TOO_MANY_VMS;
+ }
+
+ if (vm_id == HF_PRIMARY_VM_ID) {
+ CHECK(found_primary_vm == false); /* sanity check */
+ found_primary_vm = true;
+ }
+
+ manifest->vm_count = i + 1;
+ TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
+ }
+
+ if (!found_primary_vm) {
+ return MANIFEST_ERROR_NO_PRIMARY_VM;
+ }
+
+ return MANIFEST_SUCCESS;
+}
+
+const char *manifest_strerror(enum manifest_return_code ret_code)
+{
+ switch (ret_code) {
+ case MANIFEST_SUCCESS:
+ return "Success";
+ case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
+ return "Could not find \"hypervisor\" node in manifest";
+ case MANIFEST_ERROR_NOT_COMPATIBLE:
+ return "Hypervisor manifest entry not compatible with Hafnium";
+ case MANIFEST_ERROR_RESERVED_VM_ID:
+ return "Manifest defines a VM with a reserved ID";
+ case MANIFEST_ERROR_NO_PRIMARY_VM:
+ return "Manifest does not contain a primary VM entry";
+ case MANIFEST_ERROR_TOO_MANY_VMS:
+ return "Manifest specifies more VMs than Hafnium has "
+ "statically allocated space for";
+ case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
+ return "Property not found";
+ case MANIFEST_ERROR_MALFORMED_STRING:
+ return "Malformed string property";
+ case MANIFEST_ERROR_STRING_TOO_LONG:
+ return "String too long";
+ case MANIFEST_ERROR_MALFORMED_STRING_LIST:
+ return "Malformed string list property";
+ case MANIFEST_ERROR_MALFORMED_INTEGER:
+ return "Malformed integer property";
+ case MANIFEST_ERROR_INTEGER_OVERFLOW:
+ return "Integer overflow";
+ case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
+ return "Malformed integer list property";
+ case MANIFEST_ERROR_MALFORMED_BOOLEAN:
+ return "Malformed boolean property";
+ }
+
+ panic("Unexpected manifest return code.");
+}
diff --git a/src/manifest_test.cc b/src/manifest_test.cc
new file mode 100644
index 0000000..fdfd7f5
--- /dev/null
+++ b/src/manifest_test.cc
@@ -0,0 +1,589 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <array>
+#include <cstdio>
+#include <span>
+#include <sstream>
+
+#include <gmock/gmock.h>
+
+extern "C" {
+#include "hf/manifest.h"
+}
+
+namespace
+{
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::NotNull;
+
+template <typename T>
+void exec(const char *program, const char *args[], const T &stdin,
+ std::vector<char> *stdout)
+{
+ /* Create two pipes, one for stdin and one for stdout. */
+ int pipes[2][2];
+ pipe(pipes[0]);
+ pipe(pipes[1]);
+
+ /* Assign FDs for reading/writing by the parent/child. */
+ int parent_read_fd = pipes[1][0]; /* stdout pipe, read FD */
+ int parent_write_fd = pipes[0][1]; /* stdin pipe, write FD */
+ int child_read_fd = pipes[0][0]; /* stdin pipe, read FD */
+ int child_write_fd = pipes[1][1]; /* stdout pipe, write FD */
+
+ if (fork()) {
+ /* Parent process. */
+ std::array<char, 128> buf;
+ ssize_t res;
+
+ /* Close child FDs which won't be used. */
+ close(child_read_fd);
+ close(child_write_fd);
+
+ /* Write to stdin. */
+ for (size_t count = 0; count < stdin.size();) {
+ res = write(parent_write_fd, stdin.data() + count,
+ stdin.size() - count);
+ if (res < 0) {
+ std::cerr << "IO error" << std::endl;
+ exit(1);
+ }
+ count += res;
+ }
+ close(parent_write_fd);
+
+ /* Read from stdout. */
+ while (true) {
+ res = read(parent_read_fd, buf.data(), buf.size());
+ if (res == 0) {
+ /* EOF */
+ break;
+ } else if (res < 0) {
+ std::cerr << "IO error" << std::endl;
+ exit(1);
+ }
+ stdout->insert(stdout->end(), buf.begin(),
+ buf.begin() + res);
+ }
+ close(parent_read_fd);
+ } else {
+ /* Child process. */
+
+ /* Redirect stdin/stdout to read/write FDs. */
+ dup2(child_read_fd, STDIN_FILENO);
+ dup2(child_write_fd, STDOUT_FILENO);
+
+ /* Close all FDs which are now unused. */
+ close(child_read_fd);
+ close(child_write_fd);
+ close(parent_read_fd);
+ close(parent_write_fd);
+
+ /* Execute the given program. */
+ execv(program, const_cast<char *const *>(args));
+ }
+}
+
+/**
+ * Class for programatically building a Device Tree.
+ *
+ * Usage:
+ * std::vector<char> dtb = ManifestDtBuilder()
+ * .Command1()
+ * .Command2()
+ * ...
+ * .CommandN()
+ * .Build();
+ */
+class ManifestDtBuilder
+{
+ public:
+ ManifestDtBuilder()
+ {
+ dts_ << "/dts-v1/;" << std::endl;
+ dts_ << std::endl;
+
+ /* Start root node. */
+ StartChild("/");
+ }
+
+ std::vector<char> Build(bool dump = false)
+ {
+ const char *program = "./build/image/dtc.py";
+ const char *dtc_args[] = {program, "compile", NULL};
+ std::vector<char> dtc_stdout;
+
+ /* Finish root node. */
+ EndChild();
+
+ if (dump) {
+ Dump();
+ }
+
+ exec(program, dtc_args, dts_.str(), &dtc_stdout);
+ return dtc_stdout;
+ }
+
+ void Dump()
+ {
+ std::cerr << dts_.str() << std::endl;
+ }
+
+ ManifestDtBuilder &StartChild(const std::string_view &name)
+ {
+ dts_ << name << " {" << std::endl;
+ return *this;
+ }
+
+ ManifestDtBuilder &EndChild()
+ {
+ dts_ << "};" << std::endl;
+ return *this;
+ }
+
+ ManifestDtBuilder &Compatible(const std::vector<std::string_view>
+ &value = {"hafnium,hafnium"})
+ {
+ return StringListProperty("compatible", value);
+ }
+
+ ManifestDtBuilder &DebugName(const std::string_view &value)
+ {
+ return StringProperty("debug_name", value);
+ }
+
+ ManifestDtBuilder &KernelFilename(const std::string_view &value)
+ {
+ return StringProperty("kernel_filename", value);
+ }
+
+ ManifestDtBuilder &RamdiskFilename(const std::string_view &value)
+ {
+ return StringProperty("ramdisk_filename", value);
+ }
+
+ ManifestDtBuilder &VcpuCount(uint32_t value)
+ {
+ return IntegerProperty("vcpu_count", value);
+ }
+
+ ManifestDtBuilder &MemSize(uint32_t value)
+ {
+ return IntegerProperty("mem_size", value);
+ }
+
+ ManifestDtBuilder &SmcWhitelist(const std::vector<uint32_t> &value)
+ {
+ return IntegerListProperty("smc_whitelist", value);
+ }
+
+ ManifestDtBuilder &SmcWhitelistPermissive()
+ {
+ return BooleanProperty("smc_whitelist_permissive");
+ }
+
+ ManifestDtBuilder &Property(const std::string_view &name,
+ const std::string_view &value)
+ {
+ dts_ << name << " = " << value << ";" << std::endl;
+ return *this;
+ }
+
+ private:
+ ManifestDtBuilder &StringProperty(const std::string_view &name,
+ const std::string_view &value)
+ {
+ dts_ << name << " = \"" << value << "\";" << std::endl;
+ return *this;
+ }
+
+ ManifestDtBuilder &StringListProperty(
+ const std::string_view &name,
+ const std::vector<std::string_view> &value)
+ {
+ bool is_first = true;
+
+ dts_ << name << " = ";
+ for (const std::string_view &entry : value) {
+ if (is_first) {
+ is_first = false;
+ } else {
+ dts_ << ", ";
+ }
+ dts_ << "\"" << entry << "\"";
+ }
+ dts_ << ";" << std::endl;
+ return *this;
+ }
+
+ ManifestDtBuilder &IntegerProperty(const std::string_view &name,
+ uint32_t value)
+ {
+ dts_ << name << " = <" << value << ">;" << std::endl;
+ return *this;
+ }
+
+ ManifestDtBuilder &IntegerListProperty(
+ const std::string_view &name,
+ const std::vector<uint32_t> &value)
+ {
+ dts_ << name << " = < ";
+ for (const uint32_t entry : value) {
+ dts_ << entry << " ";
+ }
+ dts_ << ">;" << std::endl;
+ return *this;
+ }
+
+ ManifestDtBuilder &BooleanProperty(const std::string_view &name)
+ {
+ dts_ << name << ";" << std::endl;
+ return *this;
+ }
+
+ std::stringstream dts_;
+};
+
+static bool get_fdt_root(const std::vector<char> &dtb,
+ struct fdt_node *fdt_root)
+{
+ const struct fdt_header *fdt_header;
+
+ fdt_header = reinterpret_cast<const struct fdt_header *>(dtb.data());
+ return fdt_root_node(fdt_root, fdt_header) &&
+ fdt_find_child(fdt_root, "");
+}
+
+TEST(manifest, no_hypervisor_node)
+{
+ struct manifest m;
+ struct fdt_node fdt_root;
+ std::vector<char> dtb = ManifestDtBuilder().Build();
+
+ ASSERT_TRUE(get_fdt_root(dtb, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root),
+ MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE);
+}
+
+TEST(manifest, no_compatible_property)
+{
+ struct manifest m;
+ struct fdt_node fdt_root;
+
+ /* clang-format off */
+ std::vector<char> dtb = ManifestDtBuilder()
+ .StartChild("hypervisor")
+ .EndChild()
+ .Build();
+ /* clang-format on */
+
+ ASSERT_TRUE(get_fdt_root(dtb, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root),
+ MANIFEST_ERROR_PROPERTY_NOT_FOUND);
+}
+
+TEST(manifest, not_compatible)
+{
+ struct manifest m;
+ struct fdt_node fdt_root;
+
+ /* clang-format off */
+ std::vector<char> dtb = ManifestDtBuilder()
+ .StartChild("hypervisor")
+ .Compatible({ "foo,bar" })
+ .EndChild()
+ .Build();
+ /* clang-format on */
+
+ ASSERT_TRUE(get_fdt_root(dtb, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root), MANIFEST_ERROR_NOT_COMPATIBLE);
+}
+
+TEST(manifest, compatible_one_of_many)
+{
+ struct manifest m;
+ struct fdt_node fdt_root;
+
+ /* clang-format off */
+ std::vector<char> dtb = ManifestDtBuilder()
+ .StartChild("hypervisor")
+ .Compatible({ "foo,bar", "hafnium,hafnium" })
+ .StartChild("vm1")
+ .DebugName("primary")
+ .EndChild()
+ .EndChild()
+ .Build();
+ /* clang-format on */
+
+ ASSERT_TRUE(get_fdt_root(dtb, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root), MANIFEST_SUCCESS);
+}
+
+TEST(manifest, no_vm_nodes)
+{
+ struct manifest m;
+ struct fdt_node fdt_root;
+
+ /* clang-format off */
+ std::vector<char> dtb = ManifestDtBuilder()
+ .StartChild("hypervisor")
+ .Compatible()
+ .EndChild()
+ .Build();
+ /* clang-format on */
+
+ ASSERT_TRUE(get_fdt_root(dtb, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root), MANIFEST_ERROR_NO_PRIMARY_VM);
+}
+
+static std::vector<char> gen_long_string_dtb(bool valid)
+{
+ const char last_valid[] = "1234567890123456789012345678901";
+ const char first_invalid[] = "12345678901234567890123456789012";
+ static_assert(sizeof(last_valid) == STRING_MAX_SIZE);
+ static_assert(sizeof(first_invalid) == STRING_MAX_SIZE + 1);
+
+ /* clang-format off */
+ return ManifestDtBuilder()
+ .StartChild("hypervisor")
+ .Compatible()
+ .StartChild("vm1")
+ .DebugName(valid ? last_valid : first_invalid)
+ .EndChild()
+ .EndChild()
+ .Build();
+ /* clang-format on */
+}
+
+TEST(manifest, long_string)
+{
+ struct manifest m;
+ struct fdt_node fdt_root;
+
+ std::vector<char> dtb_last_valid = gen_long_string_dtb(true);
+ std::vector<char> dtb_first_invalid = gen_long_string_dtb(false);
+
+ ASSERT_TRUE(get_fdt_root(dtb_last_valid, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root), MANIFEST_SUCCESS);
+
+ ASSERT_TRUE(get_fdt_root(dtb_first_invalid, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root), MANIFEST_ERROR_STRING_TOO_LONG);
+}
+
+TEST(manifest, reserved_vm_id)
+{
+ struct manifest m;
+ struct fdt_node fdt_root;
+
+ /* clang-format off */
+ std::vector<char> dtb = ManifestDtBuilder()
+ .StartChild("hypervisor")
+ .Compatible()
+ .StartChild("vm1")
+ .DebugName("primary_vm")
+ .EndChild()
+ .StartChild("vm0")
+ .DebugName("reserved_vm")
+ .VcpuCount(1)
+ .MemSize(0x1000)
+ .KernelFilename("kernel")
+ .EndChild()
+ .EndChild()
+ .Build();
+ /* clang-format on */
+
+ ASSERT_TRUE(get_fdt_root(dtb, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root), MANIFEST_ERROR_RESERVED_VM_ID);
+}
+
+static std::vector<char> gen_vcpu_count_limit_dtb(uint32_t vcpu_count)
+{
+ /* clang-format off */
+ return ManifestDtBuilder()
+ .StartChild("hypervisor")
+ .Compatible()
+ .StartChild("vm1")
+ .DebugName("primary_vm")
+ .EndChild()
+ .StartChild("vm2")
+ .DebugName("secondary_vm")
+ .VcpuCount(vcpu_count)
+ .MemSize(0x1000)
+ .KernelFilename("kernel")
+ .EndChild()
+ .EndChild()
+ .Build();
+ /* clang-format on */
+}
+
+TEST(manifest, vcpu_count_limit)
+{
+ struct manifest m;
+ struct fdt_node fdt_root;
+ std::vector<char> dtb_last_valid = gen_vcpu_count_limit_dtb(UINT16_MAX);
+ std::vector<char> dtb_first_invalid =
+ gen_vcpu_count_limit_dtb(UINT16_MAX + 1);
+
+ ASSERT_TRUE(get_fdt_root(dtb_last_valid, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root), MANIFEST_SUCCESS);
+ ASSERT_EQ(m.vm_count, 2);
+ ASSERT_EQ(m.vm[1].secondary.vcpu_count, UINT16_MAX);
+
+ ASSERT_TRUE(get_fdt_root(dtb_first_invalid, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root),
+ MANIFEST_ERROR_INTEGER_OVERFLOW);
+}
+
+TEST(manifest, no_ramdisk_primary)
+{
+ struct manifest m;
+ struct fdt_node fdt_root;
+
+ /* clang-format off */
+ std::vector<char> dtb = ManifestDtBuilder()
+ .StartChild("hypervisor")
+ .Compatible()
+ .StartChild("vm1")
+ .DebugName("primary_vm")
+ .EndChild()
+ .EndChild()
+ .Build();
+ /* clang-format on */
+
+ ASSERT_TRUE(get_fdt_root(dtb, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root), MANIFEST_SUCCESS);
+ ASSERT_EQ(m.vm_count, 1);
+ ASSERT_STREQ(string_data(&m.vm[0].debug_name), "primary_vm");
+ ASSERT_STREQ(string_data(&m.vm[0].primary.ramdisk_filename), "");
+}
+
+static std::vector<char> gen_malformed_boolean_dtb(
+ const std::string_view &value)
+{
+ /* clang-format off */
+ return ManifestDtBuilder()
+ .StartChild("hypervisor")
+ .Compatible()
+ .StartChild("vm1")
+ .DebugName("primary_vm")
+ .Property("smc_whitelist_permissive", value)
+ .EndChild()
+ .EndChild()
+ .Build();
+ /* clang-format on */
+}
+
+TEST(manifest, malformed_booleans)
+{
+ struct manifest m;
+ struct fdt_node fdt_root;
+
+ std::vector<char> dtb_false = gen_malformed_boolean_dtb("\"false\"");
+ std::vector<char> dtb_true = gen_malformed_boolean_dtb("\"true\"");
+ std::vector<char> dtb_0 = gen_malformed_boolean_dtb("\"<0>\"");
+ std::vector<char> dtb_1 = gen_malformed_boolean_dtb("\"<1>\"");
+
+ ASSERT_TRUE(get_fdt_root(dtb_false, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root),
+ MANIFEST_ERROR_MALFORMED_BOOLEAN);
+
+ ASSERT_TRUE(get_fdt_root(dtb_true, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root),
+ MANIFEST_ERROR_MALFORMED_BOOLEAN);
+
+ ASSERT_TRUE(get_fdt_root(dtb_0, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root),
+ MANIFEST_ERROR_MALFORMED_BOOLEAN);
+
+ ASSERT_TRUE(get_fdt_root(dtb_1, &fdt_root));
+ ASSERT_EQ(manifest_init(&m, &fdt_root),
+ MANIFEST_ERROR_MALFORMED_BOOLEAN);
+}
+
+TEST(manifest, valid)
+{
+ struct manifest m;
+ struct manifest_vm *vm;
+ struct fdt_node fdt_root;
+
+ /* clang-format off */
+ std::vector<char> dtb = ManifestDtBuilder()
+ .StartChild("hypervisor")
+ .Compatible()
+ .StartChild("vm1")
+ .DebugName("primary_vm")
+ .KernelFilename("primary_kernel")
+ .RamdiskFilename("primary_ramdisk")
+ .SmcWhitelist({0x32000000, 0x33001111})
+ .EndChild()
+ .StartChild("vm3")
+ .DebugName("second_secondary_vm")
+ .VcpuCount(43)
+ .MemSize(0x12345)
+ .KernelFilename("second_secondary_kernel")
+ .EndChild()
+ .StartChild("vm2")
+ .DebugName("first_secondary_vm")
+ .VcpuCount(42)
+ .MemSize(12345)
+ .SmcWhitelist({0x04000000, 0x30002222, 0x31445566})
+ .SmcWhitelistPermissive()
+ .EndChild()
+ .EndChild()
+ .Build();
+ /* clang-format on */
+
+ ASSERT_TRUE(get_fdt_root(dtb, &fdt_root));
+
+ ASSERT_EQ(manifest_init(&m, &fdt_root), MANIFEST_SUCCESS);
+ ASSERT_EQ(m.vm_count, 3);
+
+ vm = &m.vm[0];
+ ASSERT_STREQ(string_data(&vm->debug_name), "primary_vm");
+ ASSERT_STREQ(string_data(&vm->kernel_filename), "primary_kernel");
+ ASSERT_STREQ(string_data(&vm->primary.ramdisk_filename),
+ "primary_ramdisk");
+ ASSERT_THAT(
+ std::span(vm->smc_whitelist.smcs, vm->smc_whitelist.smc_count),
+ ElementsAre(0x32000000, 0x33001111));
+ ASSERT_FALSE(vm->smc_whitelist.permissive);
+
+ vm = &m.vm[1];
+ ASSERT_STREQ(string_data(&vm->debug_name), "first_secondary_vm");
+ ASSERT_STREQ(string_data(&vm->kernel_filename), "");
+ ASSERT_EQ(vm->secondary.vcpu_count, 42);
+ ASSERT_EQ(vm->secondary.mem_size, 12345);
+ ASSERT_THAT(
+ std::span(vm->smc_whitelist.smcs, vm->smc_whitelist.smc_count),
+ ElementsAre(0x04000000, 0x30002222, 0x31445566));
+ ASSERT_TRUE(vm->smc_whitelist.permissive);
+
+ vm = &m.vm[2];
+ ASSERT_STREQ(string_data(&vm->debug_name), "second_secondary_vm");
+ ASSERT_STREQ(string_data(&vm->kernel_filename),
+ "second_secondary_kernel");
+ ASSERT_EQ(vm->secondary.vcpu_count, 43);
+ ASSERT_EQ(vm->secondary.mem_size, 0x12345);
+ ASSERT_THAT(
+ std::span(vm->smc_whitelist.smcs, vm->smc_whitelist.smc_count),
+ IsEmpty());
+ ASSERT_FALSE(vm->smc_whitelist.permissive);
+}
+
+} /* namespace */
diff --git a/src/memiter.c b/src/memiter.c
new file mode 100644
index 0000000..d72250c
--- /dev/null
+++ b/src/memiter.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/memiter.h"
+
+#include "hf/dlog.h"
+#include "hf/std.h"
+
+/**
+ * Initialises the given memory iterator.
+ */
+void memiter_init(struct memiter *it, const void *data, size_t size)
+{
+ it->next = data;
+ it->limit = it->next + size;
+}
+
+/**
+ * Determines if the next character is a whitespace.
+ */
+static bool memiter_isspace(struct memiter *it)
+{
+ switch (*it->next) {
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * Moves iterator to the next non-whitespace character.
+ */
+static void memiter_skip_space(struct memiter *it)
+{
+ while (it->next < it->limit && memiter_isspace(it)) {
+ it->next++;
+ }
+}
+
+/**
+ * Compares the iterator to a null-terminated string.
+ */
+bool memiter_iseq(const struct memiter *it, const char *str)
+{
+ size_t it_len = it->limit - it->next;
+ size_t len = strnlen_s(str, it_len + 1);
+
+ if (len != it_len) {
+ return false;
+ }
+
+ return memcmp(it->next, str, len) == 0;
+}
+
+/**
+ * Retrieves the next string that is delimited by whitespaces. The result is
+ * stored in "str".
+ */
+bool memiter_parse_str(struct memiter *it, struct memiter *str)
+{
+ /* Skip all white space and fail if we reach the end of the buffer. */
+ memiter_skip_space(it);
+ if (it->next >= it->limit) {
+ return false;
+ }
+
+ str->next = it->next;
+
+ /* Find the end of the string. */
+ while (it->next < it->limit && !memiter_isspace(it)) {
+ it->next++;
+ }
+
+ str->limit = it->next;
+
+ return true;
+}
+
+/**
+ * Parses the next string that represents a 64-bit number.
+ */
+bool memiter_parse_uint(struct memiter *it, uint64_t *value)
+{
+ uint64_t v = 0;
+
+ /* Skip all white space and fail if we reach the end of the buffer. */
+ memiter_skip_space(it);
+ if (it->next >= it->limit) {
+ return false;
+ }
+
+ /* Fail if it's not a number. */
+ if (*it->next < '0' || *it->next > '9') {
+ return false;
+ }
+
+ /* Parse the number. */
+ do {
+ v = v * 10 + *it->next - '0';
+ it->next++;
+ } while (it->next < it->limit && *it->next >= '0' && *it->next <= '9');
+
+ *value = v;
+
+ return true;
+}
+
+/**
+ * Advances the iterator by the given number of bytes. Returns true if the
+ * iterator was advanced without going over its limit; returns false and leaves
+ * the iterator unmodified otherwise.
+ */
+bool memiter_advance(struct memiter *it, size_t v)
+{
+ const char *p = it->next + v;
+
+ if (p < it->next || p > it->limit) {
+ return false;
+ }
+
+ it->next = p;
+
+ return true;
+}
+
+const void *memiter_base(const struct memiter *it)
+{
+ return (const void *)it->next;
+}
+
+/**
+ * Returns the number of bytes in interval [it.next, it.limit).
+ */
+size_t memiter_size(const struct memiter *it)
+{
+ return it->limit - it->next;
+}
diff --git a/src/mm.c b/src/mm.c
new file mode 100644
index 0000000..83bf9bd
--- /dev/null
+++ b/src/mm.c
@@ -0,0 +1,1053 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/mm.h"
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+#include "hf/check.h"
+#include "hf/dlog.h"
+#include "hf/layout.h"
+#include "hf/plat/console.h"
+#include "hf/static_assert.h"
+
+/**
+ * This file has functions for managing the level 1 and 2 page tables used by
+ * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
+ * and then a level 2 mapping per VM. The design assumes that all page tables
+ * contain only 1-1 mappings, aligned on the block boundaries.
+ */
+
+/*
+ * For stage 2, the input is an intermediate physical addresses rather than a
+ * virtual address so:
+ */
+static_assert(
+ sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
+ "Currently, the same code manages the stage 1 and stage 2 page tables "
+ "which only works if the virtual and intermediate physical addresses "
+ "are the same size. It looks like that assumption might not be holding "
+ "so we need to check that everything is going to be ok.");
+
+static struct mm_ptable ptable;
+static struct spinlock ptable_lock;
+
+static bool mm_stage2_invalidate = false;
+
+/**
+ * After calling this function, modifications to stage-2 page tables will use
+ * break-before-make and invalidate the TLB for the affected range.
+ */
+void mm_vm_enable_invalidation(void)
+{
+ mm_stage2_invalidate = true;
+}
+
+/**
+ * Get the page table from the physical address.
+ */
+static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
+{
+ return ptr_from_va(va_from_pa(pa));
+}
+
+/**
+ * Rounds an address down to a page boundary.
+ */
+static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
+{
+ return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
+}
+
+/**
+ * Rounds an address up to a page boundary.
+ */
+static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
+{
+ return mm_round_down_to_page(addr + PAGE_SIZE - 1);
+}
+
+/**
+ * Calculates the size of the address space represented by a page table entry at
+ * the given level.
+ */
+static size_t mm_entry_size(uint8_t level)
+{
+ return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
+}
+
+/**
+ * Gets the address of the start of the next block of the given size. The size
+ * must be a power of two.
+ */
+static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
+ size_t block_size)
+{
+ return (addr + block_size) & ~(block_size - 1);
+}
+
+/**
+ * Gets the physical address of the start of the next block of the given size.
+ * The size must be a power of two.
+ */
+static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size)
+{
+ return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1));
+}
+
+/**
+ * For a given address, calculates the maximum (plus one) address that can be
+ * represented by the same table at the given level.
+ */
+static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
+{
+ size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
+
+ return ((addr >> offset) + 1) << offset;
+}
+
+/**
+ * For a given address, calculates the index at which its entry is stored in a
+ * table at the given level.
+ */
+static size_t mm_index(ptable_addr_t addr, uint8_t level)
+{
+ ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
+
+ return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
+}
+
+/**
+ * Allocates a new page table.
+ */
+static struct mm_page_table *mm_alloc_page_tables(size_t count,
+ struct mpool *ppool)
+{
+ if (count == 1) {
+ return mpool_alloc(ppool);
+ }
+
+ return mpool_alloc_contiguous(ppool, count, count);
+}
+
+/**
+ * Returns the maximum level in the page table given the flags.
+ */
+static uint8_t mm_max_level(int flags)
+{
+ return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_max_level()
+ : arch_mm_stage2_max_level();
+}
+
+/**
+ * Returns the number of root-level tables given the flags.
+ */
+static uint8_t mm_root_table_count(int flags)
+{
+ return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_root_table_count()
+ : arch_mm_stage2_root_table_count();
+}
+
+/**
+ * Invalidates the TLB for the given address range.
+ */
+static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, int flags)
+{
+ if (flags & MM_FLAG_STAGE1) {
+ arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
+ } else {
+ arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
+ }
+}
+
+/**
+ * Frees all page-table-related memory associated with the given pte at the
+ * given level, including any subtables recursively.
+ */
+static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool)
+{
+ struct mm_page_table *table;
+ uint64_t i;
+
+ if (!arch_mm_pte_is_table(pte, level)) {
+ return;
+ }
+
+ /* Recursively free any subtables. */
+ table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
+ for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
+ mm_free_page_pte(table->entries[i], level - 1, ppool);
+ }
+
+ /* Free the table itself. */
+ mpool_free(ppool, table);
+}
+
+/**
+ * Returns the first address which cannot be encoded in page tables given by
+ * `flags`. It is the exclusive end of the address space created by the tables.
+ */
+ptable_addr_t mm_ptable_addr_space_end(int flags)
+{
+ return mm_root_table_count(flags) *
+ mm_entry_size(mm_max_level(flags) + 1);
+}
+
+/**
+ * Initialises the given page table.
+ */
+bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool)
+{
+ uint8_t i;
+ size_t j;
+ struct mm_page_table *tables;
+ uint8_t root_table_count = mm_root_table_count(flags);
+
+ tables = mm_alloc_page_tables(root_table_count, ppool);
+ if (tables == NULL) {
+ return false;
+ }
+
+ for (i = 0; i < root_table_count; i++) {
+ for (j = 0; j < MM_PTE_PER_PAGE; j++) {
+ tables[i].entries[j] =
+ arch_mm_absent_pte(mm_max_level(flags));
+ }
+ }
+
+ /*
+ * TODO: halloc could return a virtual or physical address if mm not
+ * enabled?
+ */
+ t->root = pa_init((uintpaddr_t)tables);
+
+ return true;
+}
+
+/**
+ * Frees all memory associated with the give page table.
+ */
+static void mm_ptable_fini(struct mm_ptable *t, int flags, struct mpool *ppool)
+{
+ struct mm_page_table *tables = mm_page_table_from_pa(t->root);
+ uint8_t level = mm_max_level(flags);
+ uint8_t root_table_count = mm_root_table_count(flags);
+ uint8_t i;
+ uint64_t j;
+
+ for (i = 0; i < root_table_count; ++i) {
+ for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
+ mm_free_page_pte(tables[i].entries[j], level, ppool);
+ }
+ }
+
+ mpool_add_chunk(ppool, tables,
+ sizeof(struct mm_page_table) * root_table_count);
+}
+
+/**
+ * Replaces a page table entry with the given value. If both old and new values
+ * are valid, it performs a break-before-make sequence where it first writes an
+ * invalid value to the PTE, flushes the TLB, then writes the actual new value.
+ * This is to prevent cases where CPUs have different 'valid' values in their
+ * TLBs, which may result in issues for example in cache coherency.
+ */
+static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
+ uint8_t level, int flags, struct mpool *ppool)
+{
+ pte_t v = *pte;
+
+ /*
+ * We need to do the break-before-make sequence if both values are
+ * present and the TLB is being invalidated.
+ */
+ if (((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) &&
+ arch_mm_pte_is_valid(v, level) &&
+ arch_mm_pte_is_valid(new_pte, level)) {
+ *pte = arch_mm_absent_pte(level);
+ mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags);
+ }
+
+ /* Assign the new pte. */
+ *pte = new_pte;
+
+ /* Free pages that aren't in use anymore. */
+ mm_free_page_pte(v, level, ppool);
+}
+
+/**
+ * Populates the provided page table entry with a reference to another table if
+ * needed, that is, if it does not yet point to another table.
+ *
+ * Returns a pointer to the table the entry now points to.
+ */
+static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
+ pte_t *pte, uint8_t level,
+ int flags,
+ struct mpool *ppool)
+{
+ struct mm_page_table *ntable;
+ pte_t v = *pte;
+ pte_t new_pte;
+ size_t i;
+ size_t inc;
+ uint8_t level_below = level - 1;
+
+ /* Just return pointer to table if it's already populated. */
+ if (arch_mm_pte_is_table(v, level)) {
+ return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
+ }
+
+ /* Allocate a new table. */
+ ntable = mm_alloc_page_tables(1, ppool);
+ if (ntable == NULL) {
+ dlog("Failed to allocate memory for page table\n");
+ return NULL;
+ }
+
+ /* Determine template for new pte and its increment. */
+ if (arch_mm_pte_is_block(v, level)) {
+ inc = mm_entry_size(level_below);
+ new_pte = arch_mm_block_pte(level_below,
+ arch_mm_block_from_pte(v, level),
+ arch_mm_pte_attrs(v, level));
+ } else {
+ inc = 0;
+ new_pte = arch_mm_absent_pte(level_below);
+ }
+
+ /* Initialise entries in the new table. */
+ for (i = 0; i < MM_PTE_PER_PAGE; i++) {
+ ntable->entries[i] = new_pte;
+ new_pte += inc;
+ }
+
+ /* Ensure initialisation is visible before updating the pte. */
+ atomic_thread_fence(memory_order_release);
+
+ /* Replace the pte entry, doing a break-before-make if needed. */
+ mm_replace_entry(begin, pte,
+ arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
+ level, flags, ppool);
+
+ return ntable;
+}
+
+/**
+ * Updates the page table at the given level to map the given address range to a
+ * physical range using the provided (architecture-specific) attributes. Or if
+ * MM_FLAG_UNMAP is set, unmap the given range instead.
+ *
+ * This function calls itself recursively if it needs to update additional
+ * levels, but the recursion is bound by the maximum number of levels in a page
+ * table.
+ */
+static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
+ uint64_t attrs, struct mm_page_table *table,
+ uint8_t level, int flags, struct mpool *ppool)
+{
+ pte_t *pte = &table->entries[mm_index(begin, level)];
+ ptable_addr_t level_end = mm_level_end(begin, level);
+ size_t entry_size = mm_entry_size(level);
+ bool commit = flags & MM_FLAG_COMMIT;
+ bool unmap = flags & MM_FLAG_UNMAP;
+
+ /* Cap end so that we don't go over the current level max. */
+ if (end > level_end) {
+ end = level_end;
+ }
+
+ /* Fill each entry in the table. */
+ while (begin < end) {
+ if (unmap ? !arch_mm_pte_is_present(*pte, level)
+ : arch_mm_pte_is_block(*pte, level) &&
+ arch_mm_pte_attrs(*pte, level) == attrs) {
+ /*
+ * If the entry is already mapped with the right
+ * attributes, or already absent in the case of
+ * unmapping, no need to do anything; carry on to the
+ * next entry.
+ */
+ } else if ((end - begin) >= entry_size &&
+ (unmap || arch_mm_is_block_allowed(level)) &&
+ (begin & (entry_size - 1)) == 0) {
+ /*
+ * If the entire entry is within the region we want to
+ * map, map/unmap the whole entry.
+ */
+ if (commit) {
+ pte_t new_pte =
+ unmap ? arch_mm_absent_pte(level)
+ : arch_mm_block_pte(level, pa,
+ attrs);
+ mm_replace_entry(begin, pte, new_pte, level,
+ flags, ppool);
+ }
+ } else {
+ /*
+ * If the entry is already a subtable get it; otherwise
+ * replace it with an equivalent subtable and get that.
+ */
+ struct mm_page_table *nt = mm_populate_table_pte(
+ begin, pte, level, flags, ppool);
+ if (nt == NULL) {
+ return false;
+ }
+
+ /*
+ * Recurse to map/unmap the appropriate entries within
+ * the subtable.
+ */
+ if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
+ flags, ppool)) {
+ return false;
+ }
+ }
+
+ begin = mm_start_of_next_block(begin, entry_size);
+ pa = mm_pa_start_of_next_block(pa, entry_size);
+ pte++;
+ }
+
+ return true;
+}
+
+/**
+ * Updates the page table from the root to map the given address range to a
+ * physical range using the provided (architecture-specific) attributes. Or if
+ * MM_FLAG_UNMAP is set, unmap the given range instead.
+ */
+static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
+ ptable_addr_t end, uint64_t attrs, uint8_t root_level,
+ int flags, struct mpool *ppool)
+{
+ size_t root_table_size = mm_entry_size(root_level);
+ struct mm_page_table *table =
+ &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
+
+ while (begin < end) {
+ if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
+ root_level - 1, flags, ppool)) {
+ return false;
+ }
+ begin = mm_start_of_next_block(begin, root_table_size);
+ table++;
+ }
+
+ return true;
+}
+
+/**
+ * Updates the given table such that the given physical address range is mapped
+ * or not mapped into the address space with the architecture-agnostic mode
+ * provided. Only commits the change if MM_FLAG_COMMIT is set.
+ */
+static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
+ paddr_t pa_end, uint64_t attrs, int flags,
+ struct mpool *ppool)
+{
+ uint8_t root_level = mm_max_level(flags) + 1;
+ ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
+ ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
+ ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
+
+ /*
+ * Assert condition to communicate the API constraint of mm_max_level(),
+ * that isn't encoded in the types, to the static analyzer.
+ */
+ CHECK(root_level >= 2);
+
+ /* Cap end to stay within the bounds of the page table. */
+ if (end > ptable_end) {
+ end = ptable_end;
+ }
+
+ if (!mm_map_root(t, begin, end, attrs, root_level, flags, ppool)) {
+ return false;
+ }
+
+ /* Invalidate the TLB. */
+ if ((flags & MM_FLAG_COMMIT) &&
+ ((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate)) {
+ mm_invalidate_tlb(begin, end, flags);
+ }
+
+ return true;
+}
+
+/*
+ * Prepares the given page table for the given address mapping such that it
+ * will be able to commit the change without failure. It does so by ensuring
+ * the smallest granularity needed is available. This remains valid provided
+ * subsequent operations no not decrease the granularity.
+ *
+ * In particular, multiple calls to this function will result in the
+ * corresponding calls to commit the changes to succeed.
+ */
+static bool mm_ptable_identity_prepare(struct mm_ptable *t, paddr_t pa_begin,
+ paddr_t pa_end, uint64_t attrs,
+ int flags, struct mpool *ppool)
+{
+ flags &= ~MM_FLAG_COMMIT;
+ return mm_ptable_identity_map(t, pa_begin, pa_end, attrs, flags, ppool);
+}
+
+/**
+ * Commits the given address mapping to the page table assuming the operation
+ * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to
+ * ensure this condition.
+ *
+ * Without the table being properly prepared, the commit may only partially
+ * complete if it runs out of memory resulting in an inconsistent state that
+ * isn't handled.
+ *
+ * Since the non-failure assumtion is used in the reasoning about the atomicity
+ * of higher level memory operations, any detected violations result in a panic.
+ *
+ * TODO: remove ppool argument to be sure no changes are made.
+ */
+static void mm_ptable_identity_commit(struct mm_ptable *t, paddr_t pa_begin,
+ paddr_t pa_end, uint64_t attrs, int flags,
+ struct mpool *ppool)
+{
+ CHECK(mm_ptable_identity_map(t, pa_begin, pa_end, attrs,
+ flags | MM_FLAG_COMMIT, ppool));
+}
+
+/**
+ * Updates the given table such that the given physical address range is mapped
+ * or not mapped into the address space with the architecture-agnostic mode
+ * provided.
+ *
+ * The page table is updated using the separate prepare and commit stages so
+ * that, on failure, a partial update of the address space cannot happen. The
+ * table may be left with extra internal tables but the address space is
+ * unchanged.
+ */
+static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
+ paddr_t pa_end, uint64_t attrs, int flags,
+ struct mpool *ppool)
+{
+ if (!mm_ptable_identity_prepare(t, pa_begin, pa_end, attrs, flags,
+ ppool)) {
+ return false;
+ }
+
+ mm_ptable_identity_commit(t, pa_begin, pa_end, attrs, flags, ppool);
+
+ return true;
+}
+
+/**
+ * Writes the given table to the debug log, calling itself recursively to
+ * write sub-tables.
+ */
+static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
+ int max_level)
+{
+ uint64_t i;
+
+ for (i = 0; i < MM_PTE_PER_PAGE; i++) {
+ if (!arch_mm_pte_is_present(table->entries[i], level)) {
+ continue;
+ }
+
+ dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
+ table->entries[i]);
+
+ if (arch_mm_pte_is_table(table->entries[i], level)) {
+ mm_dump_table_recursive(
+ mm_page_table_from_pa(arch_mm_table_from_pte(
+ table->entries[i], level)),
+ level - 1, max_level);
+ }
+ }
+}
+
+/**
+ * Writes the given table to the debug log.
+ */
+static void mm_ptable_dump(struct mm_ptable *t, int flags)
+{
+ struct mm_page_table *tables = mm_page_table_from_pa(t->root);
+ uint8_t max_level = mm_max_level(flags);
+ uint8_t root_table_count = mm_root_table_count(flags);
+ uint8_t i;
+
+ for (i = 0; i < root_table_count; ++i) {
+ mm_dump_table_recursive(&tables[i], max_level, max_level);
+ }
+}
+
+/**
+ * Given the table PTE entries all have identical attributes, returns the single
+ * entry with which it can be replaced. Note that the table PTE will no longer
+ * be valid after calling this function as the table may have been freed.
+ *
+ * If the table is freed, the memory is freed directly rather than calling
+ * `mm_free_page_pte()` as it is known to not have subtables.
+ */
+static pte_t mm_merge_table_pte(pte_t table_pte, uint8_t level,
+ struct mpool *ppool)
+{
+ struct mm_page_table *table;
+ uint64_t block_attrs;
+ uint64_t table_attrs;
+ uint64_t combined_attrs;
+ paddr_t block_address;
+
+ table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
+
+ if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
+ /* Free the table and return an absent entry. */
+ mpool_free(ppool, table);
+ return arch_mm_absent_pte(level);
+ }
+
+ /* Might not be possible to merge the table into a single block. */
+ if (!arch_mm_is_block_allowed(level)) {
+ return table_pte;
+ }
+
+ /* Replace table with a single block, with equivalent attributes. */
+ block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
+ table_attrs = arch_mm_pte_attrs(table_pte, level);
+ combined_attrs =
+ arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
+ block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
+
+ /* Free the table and return a block. */
+ mpool_free(ppool, table);
+ return arch_mm_block_pte(level, block_address, combined_attrs);
+}
+
+/**
+ * Defragments the given PTE by recursively replacing any tables with blocks or
+ * absent entries where possible.
+ */
+static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level,
+ struct mpool *ppool)
+{
+ struct mm_page_table *table;
+ uint64_t i;
+ bool mergeable;
+ bool base_present;
+ uint64_t base_attrs;
+
+ if (!arch_mm_pte_is_table(entry, level)) {
+ return entry;
+ }
+
+ table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
+
+ /* Defrag the first entry in the table and use it as the base entry. */
+ static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
+ table->entries[0] =
+ mm_ptable_defrag_entry(table->entries[0], level - 1, ppool);
+ base_present = arch_mm_pte_is_present(table->entries[0], level - 1);
+ base_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
+
+ /*
+ * Defrag the remaining entries in the table and check whether they are
+ * compatible with the base entry meaning the table can be merged into a
+ * block entry. It assumes addresses are contiguous due to identity
+ * mapping.
+ */
+ mergeable = true;
+ for (i = 1; i < MM_PTE_PER_PAGE; ++i) {
+ bool present;
+
+ table->entries[i] = mm_ptable_defrag_entry(table->entries[i],
+ level - 1, ppool);
+ present = arch_mm_pte_is_present(table->entries[i], level - 1);
+
+ if (present != base_present) {
+ mergeable = false;
+ continue;
+ }
+
+ if (!present) {
+ continue;
+ }
+
+ if (!arch_mm_pte_is_block(table->entries[i], level - 1)) {
+ mergeable = false;
+ continue;
+ }
+
+ if (arch_mm_pte_attrs(table->entries[i], level - 1) !=
+ base_attrs) {
+ mergeable = false;
+ continue;
+ }
+ }
+
+ if (mergeable) {
+ return mm_merge_table_pte(entry, level, ppool);
+ }
+
+ return entry;
+}
+
+/**
+ * Defragments the given page table by converting page table references to
+ * blocks whenever possible.
+ */
+static void mm_ptable_defrag(struct mm_ptable *t, int flags,
+ struct mpool *ppool)
+{
+ struct mm_page_table *tables = mm_page_table_from_pa(t->root);
+ uint8_t level = mm_max_level(flags);
+ uint8_t root_table_count = mm_root_table_count(flags);
+ uint8_t i;
+ uint64_t j;
+
+ /*
+ * Loop through each entry in the table. If it points to another table,
+ * check if that table can be replaced by a block or an absent entry.
+ */
+ for (i = 0; i < root_table_count; ++i) {
+ for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
+ tables[i].entries[j] = mm_ptable_defrag_entry(
+ tables[i].entries[j], level, ppool);
+ }
+ }
+}
+
+/**
+ * Gets the attributes applied to the given range of stage-2 addresses at the
+ * given level.
+ *
+ * The `got_attrs` argument is initially passed as false until `attrs` contains
+ * attributes of the memory region at which point it is passed as true.
+ *
+ * The value returned in `attrs` is only valid if the function returns true.
+ *
+ * Returns true if the whole range has the same attributes and false otherwise.
+ */
+static bool mm_ptable_get_attrs_level(struct mm_page_table *table,
+ ptable_addr_t begin, ptable_addr_t end,
+ uint8_t level, bool got_attrs,
+ uint64_t *attrs)
+{
+ pte_t *pte = &table->entries[mm_index(begin, level)];
+ ptable_addr_t level_end = mm_level_end(begin, level);
+ size_t entry_size = mm_entry_size(level);
+
+ /* Cap end so that we don't go over the current level max. */
+ if (end > level_end) {
+ end = level_end;
+ }
+
+ /* Check that each entry is owned. */
+ while (begin < end) {
+ if (arch_mm_pte_is_table(*pte, level)) {
+ if (!mm_ptable_get_attrs_level(
+ mm_page_table_from_pa(
+ arch_mm_table_from_pte(*pte,
+ level)),
+ begin, end, level - 1, got_attrs, attrs)) {
+ return false;
+ }
+ got_attrs = true;
+ } else {
+ if (!got_attrs) {
+ *attrs = arch_mm_pte_attrs(*pte, level);
+ got_attrs = true;
+ } else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
+ return false;
+ }
+ }
+
+ begin = mm_start_of_next_block(begin, entry_size);
+ pte++;
+ }
+
+ /* The entry is a valid block. */
+ return got_attrs;
+}
+
+/**
+ * Gets the attributes applies to the given range of addresses in the stage-2
+ * table.
+ *
+ * The value returned in `attrs` is only valid if the function returns true.
+ *
+ * Returns true if the whole range has the same attributes and false otherwise.
+ */
+static bool mm_vm_get_attrs(struct mm_ptable *t, ptable_addr_t begin,
+ ptable_addr_t end, uint64_t *attrs)
+{
+ int flags = 0;
+ uint8_t max_level = mm_max_level(flags);
+ uint8_t root_level = max_level + 1;
+ size_t root_table_size = mm_entry_size(root_level);
+ ptable_addr_t ptable_end =
+ mm_root_table_count(flags) * mm_entry_size(root_level);
+ struct mm_page_table *table;
+ bool got_attrs = false;
+
+ begin = mm_round_down_to_page(begin);
+ end = mm_round_up_to_page(end);
+
+ /* Fail if the addresses are out of range. */
+ if (end > ptable_end) {
+ return false;
+ }
+
+ table = &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
+ while (begin < end) {
+ if (!mm_ptable_get_attrs_level(table, begin, end, max_level,
+ got_attrs, attrs)) {
+ return false;
+ }
+
+ got_attrs = true;
+ begin = mm_start_of_next_block(begin, root_table_size);
+ table++;
+ }
+
+ return got_attrs;
+}
+
+bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool)
+{
+ return mm_ptable_init(t, 0, ppool);
+}
+
+void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool)
+{
+ mm_ptable_fini(t, 0, ppool);
+}
+
+/**
+ * Selects flags to pass to the page table manipulation operation based on the
+ * mapping mode.
+ */
+static int mm_mode_to_flags(uint32_t mode)
+{
+ if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) {
+ return MM_FLAG_UNMAP;
+ }
+
+ return 0;
+}
+
+/**
+ * See `mm_ptable_identity_prepare`.
+ *
+ * This must be called before `mm_vm_identity_commit` for the same mapping.
+ *
+ * Returns true on success, or false if the update would fail.
+ */
+bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool)
+{
+ int flags = mm_mode_to_flags(mode);
+
+ return mm_ptable_identity_prepare(t, begin, end,
+ arch_mm_mode_to_stage2_attrs(mode),
+ flags, ppool);
+}
+
+/**
+ * See `mm_ptable_identity_commit`.
+ *
+ * `mm_vm_identity_prepare` must be called before this for the same mapping.
+ */
+void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
+{
+ int flags = mm_mode_to_flags(mode);
+
+ mm_ptable_identity_commit(t, begin, end,
+ arch_mm_mode_to_stage2_attrs(mode), flags,
+ ppool);
+
+ if (ipa != NULL) {
+ *ipa = ipa_from_pa(begin);
+ }
+}
+
+/**
+ * Updates a VM's page table such that the given physical address range is
+ * mapped in the address space at the corresponding address range in the
+ * architecture-agnostic mode provided.
+ *
+ * mm_vm_defrag should always be called after a series of page table updates,
+ * whether they succeed or fail. This is because on failure extra page table
+ * entries may have been allocated and then not used, while on success it may be
+ * possible to compact the page table by merging several entries into a block.
+ *
+ * Returns true on success, or false if the update failed and no changes were
+ * made.
+ */
+bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
+{
+ int flags = mm_mode_to_flags(mode);
+ bool success = mm_ptable_identity_update(
+ t, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
+ ppool);
+
+ if (success && ipa != NULL) {
+ *ipa = ipa_from_pa(begin);
+ }
+
+ return success;
+}
+
+/**
+ * Updates the VM's table such that the given physical address range has no
+ * connection to the VM.
+ */
+bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
+ struct mpool *ppool)
+{
+ uint32_t mode = MM_MODE_UNMAPPED_MASK;
+
+ return mm_vm_identity_map(t, begin, end, mode, ppool, NULL);
+}
+
+/**
+ * Write the given page table of a VM to the debug log.
+ */
+void mm_vm_dump(struct mm_ptable *t)
+{
+ mm_ptable_dump(t, 0);
+}
+
+/**
+ * Defragments the VM page table.
+ */
+void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool)
+{
+ mm_ptable_defrag(t, 0, ppool);
+}
+
+/**
+ * Gets the mode of the give range of intermediate physical addresses if they
+ * are mapped with the same mode.
+ *
+ * Returns true if the range is mapped with the same mode and false otherwise.
+ */
+bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
+ uint32_t *mode)
+{
+ uint64_t attrs;
+ bool ret;
+
+ ret = mm_vm_get_attrs(t, ipa_addr(begin), ipa_addr(end), &attrs);
+ if (ret) {
+ *mode = arch_mm_stage2_attrs_to_mode(attrs);
+ }
+
+ return ret;
+}
+
+static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
+{
+ return (struct mm_stage1_locked){.ptable = &ptable};
+}
+
+struct mm_stage1_locked mm_lock_stage1(void)
+{
+ sl_lock(&ptable_lock);
+ return mm_stage1_lock_unsafe();
+}
+
+void mm_unlock_stage1(struct mm_stage1_locked *lock)
+{
+ CHECK(lock->ptable == &ptable);
+ sl_unlock(&ptable_lock);
+ lock->ptable = NULL;
+}
+
+/**
+ * Updates the hypervisor page table such that the given physical address range
+ * is mapped into the address space at the corresponding address range in the
+ * architecture-agnostic mode provided.
+ */
+void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
+ paddr_t end, uint32_t mode, struct mpool *ppool)
+{
+ int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
+
+ if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
+ arch_mm_mode_to_stage1_attrs(mode), flags,
+ ppool)) {
+ return ptr_from_va(va_from_pa(begin));
+ }
+
+ return NULL;
+}
+
+/**
+ * Updates the hypervisor table such that the given physical address range is
+ * not mapped in the address space.
+ */
+bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
+ struct mpool *ppool)
+{
+ uint32_t mode = MM_MODE_UNMAPPED_MASK;
+
+ return mm_identity_map(stage1_locked, begin, end, mode, ppool);
+}
+
+/**
+ * Defragments the hypervisor page table.
+ */
+void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
+{
+ mm_ptable_defrag(stage1_locked.ptable, MM_FLAG_STAGE1, ppool);
+}
+
+/**
+ * Initialises memory management for the hypervisor itself.
+ */
+bool mm_init(struct mpool *ppool)
+{
+ /* Locking is not enabled yet so fake it, */
+ struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
+
+ dlog("text: %#x - %#x\n", pa_addr(layout_text_begin()),
+ pa_addr(layout_text_end()));
+ dlog("rodata: %#x - %#x\n", pa_addr(layout_rodata_begin()),
+ pa_addr(layout_rodata_end()));
+ dlog("data: %#x - %#x\n", pa_addr(layout_data_begin()),
+ pa_addr(layout_data_end()));
+
+ if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, ppool)) {
+ dlog("Unable to allocate memory for page table.\n");
+ return false;
+ }
+
+ /* Let console driver map pages for itself. */
+ plat_console_mm_init(stage1_locked, ppool);
+
+ /* Map each section. */
+ mm_identity_map(stage1_locked, layout_text_begin(), layout_text_end(),
+ MM_MODE_X, ppool);
+
+ mm_identity_map(stage1_locked, layout_rodata_begin(),
+ layout_rodata_end(), MM_MODE_R, ppool);
+
+ mm_identity_map(stage1_locked, layout_data_begin(), layout_data_end(),
+ MM_MODE_R | MM_MODE_W, ppool);
+
+ return arch_mm_init(ptable.root);
+}
diff --git a/src/mm_test.cc b/src/mm_test.cc
new file mode 100644
index 0000000..887d16c
--- /dev/null
+++ b/src/mm_test.cc
@@ -0,0 +1,1195 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gmock/gmock.h>
+
+extern "C" {
+#include "hf/arch/mm.h"
+
+#include "hf/mm.h"
+#include "hf/mpool.h"
+}
+
+#include <limits>
+#include <memory>
+#include <span>
+#include <vector>
+
+#include "mm_test.hh"
+
+namespace
+{
+using namespace ::std::placeholders;
+
+using ::testing::AllOf;
+using ::testing::Contains;
+using ::testing::Each;
+using ::testing::Eq;
+using ::testing::Not;
+using ::testing::SizeIs;
+using ::testing::Truly;
+
+using ::mm_test::get_ptable;
+
+constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
+const int TOP_LEVEL = arch_mm_stage2_max_level();
+const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
+
+/**
+ * Calculates the size of the address space represented by a page table entry at
+ * the given level.
+ */
+size_t mm_entry_size(int level)
+{
+ return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
+}
+
+/**
+ * Checks whether the address is mapped in the address space.
+ */
+bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
+{
+ uint32_t mode;
+ return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
+ (mode & MM_MODE_INVALID) == 0;
+}
+
+/**
+ * Get an STL representation of the page table.
+ */
+std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
+{
+ auto table = reinterpret_cast<struct mm_page_table *>(
+ ptr_from_va(va_from_pa(pa)));
+ return std::span<pte_t>(table->entries, std::end(table->entries));
+}
+
+class mm : public ::testing::Test
+{
+ void SetUp() override
+ {
+ /*
+ * TODO: replace with direct use of stdlib allocator so
+ * sanitizers are more effective.
+ */
+ test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
+ mpool_init(&ppool, sizeof(struct mm_page_table));
+ mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
+ }
+
+ std::unique_ptr<uint8_t[]> test_heap;
+
+ protected:
+ struct mpool ppool;
+};
+
+/**
+ * A new table is initially empty.
+ */
+TEST_F(mm, ptable_init_empty)
+{
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Each new concatenated table is initially empty.
+ */
+TEST_F(mm, ptable_init_concatenated_empty)
+{
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Only the first page is mapped with all others left absent.
+ */
+TEST_F(mm, map_first_page)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t page_begin = pa_init(0);
+ const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+ &ppool, nullptr));
+
+ auto tables = get_ptable(ptable);
+ EXPECT_THAT(tables, SizeIs(4));
+ ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+ /* Check that the first page is mapped and nothing else. */
+ EXPECT_THAT(std::span(tables).last(3),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+
+ auto table_l2 = tables.front();
+ EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
+
+ auto table_l1 =
+ get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
+ EXPECT_THAT(table_l1.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
+
+ auto table_l0 =
+ get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table_l0.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+ ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
+ Eq(pa_addr(page_begin)));
+
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * The start address is rounded down and the end address is rounded up to page
+ * boundaries.
+ */
+TEST_F(mm, map_round_to_page)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
+ const paddr_t map_end = pa_add(map_begin, 268);
+ ipaddr_t ipa = ipa_init(-1);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
+ &ppool, &ipa));
+ EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
+
+ auto tables = get_ptable(ptable);
+ EXPECT_THAT(tables, SizeIs(4));
+ ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+ /* Check that the last page is mapped, and nothing else. */
+ EXPECT_THAT(std::span(tables).first(3),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+
+ auto table_l2 = tables.back();
+ EXPECT_THAT(table_l2.first(table_l2.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
+
+ auto table_l1 = get_table(
+ arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
+ EXPECT_THAT(table_l1.first(table_l1.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
+
+ auto table_l0 = get_table(
+ arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table_l0.first(table_l0.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+ ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
+ TOP_LEVEL - 2)),
+ Eq(0x200'0000'0000 - PAGE_SIZE));
+
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Map a two page range over the boundary of two tables.
+ */
+TEST_F(mm, map_across_tables)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
+ const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
+ &ppool, nullptr));
+
+ auto tables = get_ptable(ptable);
+ EXPECT_THAT(tables, SizeIs(4));
+ EXPECT_THAT(std::span(tables).last(2),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+ ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+ /* Check only the last page of the first table is mapped. */
+ auto table0_l2 = tables.front();
+ EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
+
+ auto table0_l1 = get_table(
+ arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
+ EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
+
+ auto table0_l0 = get_table(
+ arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+ ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
+ TOP_LEVEL - 2)),
+ Eq(pa_addr(map_begin)));
+
+ /* Check only the first page of the second table is mapped. */
+ auto table1_l2 = tables[1];
+ EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
+
+ auto table1_l1 =
+ get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
+ EXPECT_THAT(table1_l1.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
+
+ auto table1_l0 =
+ get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table1_l0.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+ ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
+ EXPECT_THAT(
+ pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
+ Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
+
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Mapping all of memory creates blocks at the highest level.
+ */
+TEST_F(mm, map_all_at_top_level)
+{
+ constexpr uint32_t mode = 0;
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ auto tables = get_ptable(ptable);
+ EXPECT_THAT(
+ tables,
+ AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+ _1, TOP_LEVEL))))));
+ for (uint64_t i = 0; i < tables.size(); ++i) {
+ for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
+ TOP_LEVEL)),
+ Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
+ (j * mm_entry_size(TOP_LEVEL))))
+ << "i=" << i << " j=" << j;
+ }
+ }
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Map all memory then trying to map a page again doesn't introduce a special
+ * mapping for that particular page.
+ */
+TEST_F(mm, map_already_mapped)
+{
+ constexpr uint32_t mode = 0;
+ ipaddr_t ipa = ipa_init(-1);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
+ mode, &ppool, &ipa));
+ EXPECT_THAT(ipa_addr(ipa), Eq(0));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+ _1, TOP_LEVEL))))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Mapping a reverse range, i.e. the end comes before the start, is treated as
+ * an empty range so no mappings are made.
+ */
+TEST_F(mm, map_reverse_range)
+{
+ constexpr uint32_t mode = 0;
+ ipaddr_t ipa = ipa_init(-1);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
+ pa_init(0x5000), mode, &ppool, &ipa));
+ EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Mapping a reverse range in the same page will map the page because the start
+ * of the range is rounded down and the end is rounded up.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, map_reverse_range_quirk)
+{
+ constexpr uint32_t mode = 0;
+ ipaddr_t ipa = ipa_init(-1);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
+ &ppool, &ipa));
+ EXPECT_THAT(ipa_addr(ipa), Eq(20));
+ EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Mapping a range up to the maximum address causes the range end to wrap to
+ * zero as it is rounded up to a page boundary meaning no memory is mapped.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, map_last_address_quirk)
+{
+ constexpr uint32_t mode = 0;
+ ipaddr_t ipa = ipa_init(-1);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(
+ &ptable, pa_init(0),
+ pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool,
+ &ipa));
+ EXPECT_THAT(ipa_addr(ipa), Eq(0));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Mapping a range that goes beyond the available memory clamps to the available
+ * range.
+ */
+TEST_F(mm, map_clamp_to_range)
+{
+ constexpr uint32_t mode = 0;
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
+ pa_init(0xf32'0000'0000'0000), mode,
+ &ppool, nullptr));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+ _1, TOP_LEVEL))))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Mapping a range outside of the available memory is ignored and doesn't alter
+ * the page tables.
+ */
+TEST_F(mm, map_ignore_out_of_range)
+{
+ constexpr uint32_t mode = 0;
+ ipaddr_t ipa = ipa_init(-1);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
+ pa_init(0xf0'0000'0000'0000), mode,
+ &ppool, &ipa));
+ EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Map a single page and then map all of memory which replaces the single page
+ * mapping with a higher level block mapping.
+ */
+TEST_F(mm, map_block_replaces_table)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
+ const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+ &ppool, nullptr));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+ _1, TOP_LEVEL))))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Map all memory at the top level, unmapping a page and remapping at a lower
+ * level does not result in all memory being mapped at the top level again.
+ */
+TEST_F(mm, map_does_not_defrag)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
+ const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+ &ppool, nullptr));
+ EXPECT_THAT(get_ptable(ptable),
+ AllOf(SizeIs(4),
+ Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
+ TOP_LEVEL)))),
+ Contains(Contains(Truly(std::bind(
+ arch_mm_pte_is_block, _1, TOP_LEVEL)))),
+ Contains(Contains(Truly(std::bind(
+ arch_mm_pte_is_table, _1, TOP_LEVEL))))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Mapping with a mode that indicates unmapping results in the addresses being
+ * unmapped with absent entries.
+ */
+TEST_F(mm, map_to_unmap)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
+ const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
+ const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
+ const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
+ nullptr));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
+ nullptr));
+ EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
+ MM_MODE_UNMAPPED_MASK, &ppool, nullptr));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/*
+ * Preparing and committing an address range works the same as mapping it.
+ */
+TEST_F(mm, prepare_and_commit_first_page)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t page_begin = pa_init(0);
+ const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
+ &ppool));
+ mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool,
+ nullptr);
+
+ auto tables = get_ptable(ptable);
+ EXPECT_THAT(tables, SizeIs(4));
+ ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+ /* Check that the first page is mapped and nothing else. */
+ EXPECT_THAT(std::span(tables).last(3),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+
+ auto table_l2 = tables.front();
+ EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
+
+ auto table_l1 =
+ get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
+ EXPECT_THAT(table_l1.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
+
+ auto table_l0 =
+ get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table_l0.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+ ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
+ Eq(pa_addr(page_begin)));
+
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Disjoint address ranges can be prepared and committed together.
+ */
+TEST_F(mm, prepare_and_commit_disjoint_regions)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t first_begin = pa_init(0);
+ const paddr_t first_end = pa_add(first_begin, PAGE_SIZE);
+ const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
+ const paddr_t last_end = VM_MEM_END;
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end,
+ mode, &ppool));
+ ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
+ &ppool));
+ mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool,
+ nullptr);
+ mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool,
+ nullptr);
+
+ auto tables = get_ptable(ptable);
+ EXPECT_THAT(tables, SizeIs(4));
+ ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+ /* Check that the first and last pages are mapped and nothing else. */
+ EXPECT_THAT(std::span(tables).subspan(1, 2),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+
+ /* Check the first page. */
+ auto table0_l2 = tables.front();
+ EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL));
+
+ auto table0_l1 =
+ get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL));
+ EXPECT_THAT(table0_l1.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1));
+
+ auto table0_l0 =
+ get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table0_l0.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+ ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2));
+ EXPECT_THAT(
+ pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)),
+ Eq(pa_addr(first_begin)));
+
+ /* Check the last page. */
+ auto table3_l2 = tables.back();
+ EXPECT_THAT(table3_l2.first(table3_l2.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL));
+
+ auto table3_l1 = get_table(
+ arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL));
+ EXPECT_THAT(table3_l1.first(table3_l1.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1));
+
+ auto table3_l0 = get_table(
+ arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table3_l0.first(table3_l0.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+ ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2));
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0],
+ TOP_LEVEL - 2)),
+ Eq(pa_addr(last_begin)));
+
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Overlapping address ranges can be prepared and committed together.
+ */
+TEST_F(mm, prepare_and_commit_overlapping_regions)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
+ const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
+ const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode,
+ &ppool));
+ ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
+ &ppool));
+ mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool,
+ nullptr);
+ mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool,
+ nullptr);
+
+ auto tables = get_ptable(ptable);
+ EXPECT_THAT(tables, SizeIs(4));
+ EXPECT_THAT(std::span(tables).last(2),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+ ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+ /* Check only the last page of the first table is mapped. */
+ auto table0_l2 = tables.front();
+ EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
+
+ auto table0_l1 = get_table(
+ arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
+ EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
+
+ auto table0_l0 = get_table(
+ arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+ ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
+ EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
+ TOP_LEVEL - 2)),
+ Eq(pa_addr(low_begin)));
+
+ /* Check only the first page of the second table is mapped. */
+ auto table1_l2 = tables[1];
+ EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
+
+ auto table1_l1 =
+ get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
+ EXPECT_THAT(table1_l1.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
+
+ auto table1_l0 =
+ get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table1_l0.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+ ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
+ EXPECT_THAT(
+ pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
+ Eq(pa_addr(high_begin)));
+
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * If range is not mapped, unmapping has no effect.
+ */
+TEST_F(mm, unmap_not_mapped)
+{
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ EXPECT_TRUE(
+ mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Unmapping everything should result in an empty page table with no subtables.
+ */
+TEST_F(mm, unmap_all)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
+ const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
+ const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
+ const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
+ nullptr));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
+ nullptr));
+ EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Unmap range is rounded to the containing pages.
+ */
+TEST_F(mm, unmap_round_to_page)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
+ const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
+ struct mm_ptable ptable;
+
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
+ &ppool, nullptr));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
+ pa_add(map_begin, 99), &ppool));
+
+ auto tables = get_ptable(ptable);
+ constexpr auto l3_index = 2;
+
+ /* Check all other top level entries are empty... */
+ EXPECT_THAT(std::span(tables).first(l3_index),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+ EXPECT_THAT(std::span(tables).subspan(l3_index + 1),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+
+ /* Except the mapped page which is absent. */
+ auto table_l2 = tables[l3_index];
+ constexpr auto l2_index = 384;
+ EXPECT_THAT(table_l2.first(l2_index),
+ Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL));
+ EXPECT_THAT(table_l2.subspan(l2_index + 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL)));
+
+ auto table_l1 = get_table(
+ arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL));
+ ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table_l1.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+
+ auto table_l0 = get_table(
+ arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Unmap a range that of page mappings that spans multiple concatenated tables.
+ */
+TEST_F(mm, unmap_across_tables)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
+ const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
+ struct mm_ptable ptable;
+
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
+ &ppool, nullptr));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
+
+ auto tables = get_ptable(ptable);
+
+ /* Check the untouched tables are empty. */
+ EXPECT_THAT(std::span(tables).first(2),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+
+ /* Check the last page is explicity marked as absent. */
+ auto table2_l2 = tables[2];
+ EXPECT_THAT(table2_l2.first(table2_l2.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL));
+
+ auto table2_l1 = get_table(
+ arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL));
+ EXPECT_THAT(table2_l1.first(table2_l1.size() - 1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+ ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1));
+
+ auto table2_l0 = get_table(
+ arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+
+ /* Check the first page is explicitly marked as absent. */
+ auto table3_l2 = tables[3];
+ ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL));
+ EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
+
+ auto table3_l1 = get_table(
+ arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL));
+ ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table3_l1.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+
+ auto table3_l0 = get_table(
+ arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Unmapping outside the range of memory had no effect.
+ */
+TEST_F(mm, unmap_out_of_range)
+{
+ constexpr uint32_t mode = 0;
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
+ &ppool));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+ _1, TOP_LEVEL))))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Unmapping a reverse range, i.e. the end comes before the start, is treated as
+ * an empty range so no change is made.
+ */
+TEST_F(mm, unmap_reverse_range)
+{
+ constexpr uint32_t mode = 0;
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
+ &ppool));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+ _1, TOP_LEVEL))))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Unmapping a reverse range in the same page will unmap the page because the
+ * start of the range is rounded down and the end is rounded up.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, unmap_reverse_range_quirk)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t page_begin = pa_init(0x180'0000'0000);
+ const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+ &ppool, nullptr));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
+ pa_add(page_begin, 50), &ppool));
+
+ auto tables = get_ptable(ptable);
+ constexpr auto l3_index = 3;
+
+ /* Check all other top level entries are empty... */
+ EXPECT_THAT(std::span(tables).first(l3_index),
+ Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
+
+ /* Except the mapped page which is absent. */
+ auto table_l2 = tables[l3_index];
+ ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL));
+ EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
+
+ auto table_l1 = get_table(
+ arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL));
+ ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table_l1.subspan(1),
+ Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
+
+ auto table_l0 = get_table(
+ arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
+ EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
+
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Unmapping a range up to the maximum address causes the range end to wrap to
+ * zero as it is rounded up to a page boundary meaning no change is made.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, unmap_last_address_quirk)
+{
+ constexpr uint32_t mode = 0;
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ ASSERT_TRUE(mm_vm_unmap(
+ &ptable, pa_init(0),
+ pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+ _1, TOP_LEVEL))))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Mapping then unmapping a page does not defrag the table.
+ */
+TEST_F(mm, unmap_does_not_defrag)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
+ const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
+ const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
+ const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
+ nullptr));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
+ nullptr));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
+ EXPECT_THAT(get_ptable(ptable),
+ AllOf(SizeIs(4),
+ Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL))))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Nothing is mapped in an empty table.
+ */
+TEST_F(mm, is_mapped_empty)
+{
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
+ EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
+ EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Everything is mapped in a full table.
+ */
+TEST_F(mm, is_mapped_all)
+{
+ constexpr uint32_t mode = 0;
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
+ EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
+ EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * A page is mapped for the range [begin, end).
+ */
+TEST_F(mm, is_mapped_page)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t page_begin = pa_init(0x100'0000'0000);
+ const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+ &ppool, nullptr));
+ EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
+ EXPECT_TRUE(
+ mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
+ EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Everything out of range is not mapped.
+ */
+TEST_F(mm, is_mapped_out_of_range)
+{
+ constexpr uint32_t mode = 0;
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
+ EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
+ EXPECT_FALSE(mm_vm_is_mapped(
+ &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * The mode of unmapped addresses can be retrieved and is set to invalid,
+ * unowned and shared.
+ */
+TEST_F(mm, get_mode_empty)
+{
+ constexpr int default_mode =
+ MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
+ struct mm_ptable ptable;
+ uint32_t read_mode;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+
+ read_mode = 0;
+ EXPECT_TRUE(
+ mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
+ EXPECT_THAT(read_mode, Eq(default_mode));
+
+ read_mode = 0;
+ EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
+ ipa_init(0x3c97'e000), &read_mode));
+ EXPECT_THAT(read_mode, Eq(default_mode));
+
+ read_mode = 0;
+ EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
+ ipa_init(0x1ff'ffff'ffff), &read_mode));
+ EXPECT_THAT(read_mode, Eq(default_mode));
+
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Get the mode of a range comprised of individual pages which are either side
+ * of a root table boundary.
+ */
+TEST_F(mm, get_mode_pages_across_tables)
+{
+ constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
+ const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
+ const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
+ struct mm_ptable ptable;
+ uint32_t read_mode;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
+ &ppool, nullptr));
+
+ read_mode = 0;
+ EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
+ ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
+ &read_mode));
+ EXPECT_THAT(read_mode, Eq(mode));
+
+ EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
+ ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
+ &read_mode));
+
+ read_mode = 0;
+ EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
+ ipa_from_pa(map_end), &read_mode));
+ EXPECT_THAT(read_mode, Eq(mode));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Anything out of range fail to retrieve the mode.
+ */
+TEST_F(mm, get_mode_out_of_range)
+{
+ constexpr uint32_t mode = MM_MODE_UNOWNED;
+ struct mm_ptable ptable;
+ uint32_t read_mode;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
+ ipa_from_pa(pa_add(VM_MEM_END, 1)),
+ &read_mode));
+ EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
+ ipa_from_pa(pa_add(VM_MEM_END, 1)),
+ &read_mode));
+ EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
+ ipa_init(2'0000'0000'0000), &read_mode));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Defragging an entirely empty table has no effect.
+ */
+TEST_F(mm, defrag_empty)
+{
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ mm_vm_defrag(&ptable, &ppool);
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Defragging a table with some empty subtables (even nested) results in
+ * an empty table.
+ */
+TEST_F(mm, defrag_empty_subtables)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
+ const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
+ const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
+ const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
+ nullptr));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
+ nullptr));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
+ mm_vm_defrag(&ptable, &ppool);
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+/**
+ * Any subtable with all blocks with the same attributes should be replaced
+ * with a single block.
+ */
+TEST_F(mm, defrag_block_subtables)
+{
+ constexpr uint32_t mode = 0;
+ const paddr_t begin = pa_init(39456 * mm_entry_size(1));
+ const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
+ const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
+ struct mm_ptable ptable;
+ ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+ &ppool, nullptr));
+ ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool,
+ nullptr));
+ ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool,
+ nullptr));
+ mm_vm_defrag(&ptable, &ppool);
+ EXPECT_THAT(
+ get_ptable(ptable),
+ AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+ _1, TOP_LEVEL))))));
+ mm_vm_fini(&ptable, &ppool);
+}
+
+} /* namespace */
+
+namespace mm_test
+{
+/**
+ * Get an STL representation of the ptable.
+ */
+std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
+ const struct mm_ptable &ptable)
+{
+ std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
+ const uint8_t root_table_count = arch_mm_stage2_root_table_count();
+ for (uint8_t i = 0; i < root_table_count; ++i) {
+ all.push_back(get_table(
+ pa_add(ptable.root, i * sizeof(struct mm_page_table))));
+ }
+ return all;
+}
+
+} /* namespace mm_test */
diff --git a/src/mm_test.hh b/src/mm_test.hh
new file mode 100644
index 0000000..2e906aa
--- /dev/null
+++ b/src/mm_test.hh
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <span>
+#include <vector>
+
+namespace mm_test
+{
+std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
+ const struct mm_ptable &ptable);
+
+} /* namespace mm_test */
diff --git a/src/mpool.c b/src/mpool.c
new file mode 100644
index 0000000..df954e8
--- /dev/null
+++ b/src/mpool.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/mpool.h"
+
+#include <stdbool.h>
+
+struct mpool_chunk {
+ struct mpool_chunk *next_chunk;
+ struct mpool_chunk *limit;
+};
+
+struct mpool_entry {
+ struct mpool_entry *next;
+};
+
+static bool mpool_locks_enabled = false;
+
+/**
+ * Enables the locks protecting memory pools. Before this function is called,
+ * the locks are disabled, that is, acquiring/releasing them is a no-op.
+ */
+void mpool_enable_locks(void)
+{
+ mpool_locks_enabled = true;
+}
+
+/**
+ * Acquires the lock protecting the given memory pool, if locks are enabled.
+ */
+static void mpool_lock(struct mpool *p)
+{
+ if (mpool_locks_enabled) {
+ sl_lock(&p->lock);
+ }
+}
+
+/**
+ * Releases the lock protecting the given memory pool, if locks are enabled.
+ */
+static void mpool_unlock(struct mpool *p)
+{
+ if (mpool_locks_enabled) {
+ sl_unlock(&p->lock);
+ }
+}
+
+/**
+ * Initialises the given memory pool with the given entry size, which must be
+ * at least the size of two pointers.
+ *
+ * All entries stored in the memory pool will be aligned to at least the entry
+ * size.
+ */
+void mpool_init(struct mpool *p, size_t entry_size)
+{
+ p->entry_size = entry_size;
+ p->chunk_list = NULL;
+ p->entry_list = NULL;
+ p->fallback = NULL;
+ sl_init(&p->lock);
+}
+
+/**
+ * Initialises the given memory pool by replicating the properties of `from`. It
+ * also pulls the chunk and free lists from `from`, consuming all its resources
+ * and making them available via the new memory pool.
+ */
+void mpool_init_from(struct mpool *p, struct mpool *from)
+{
+ mpool_init(p, from->entry_size);
+
+ mpool_lock(from);
+ p->chunk_list = from->chunk_list;
+ p->entry_list = from->entry_list;
+ p->fallback = from->fallback;
+
+ from->chunk_list = NULL;
+ from->entry_list = NULL;
+ from->fallback = NULL;
+ mpool_unlock(from);
+}
+
+/**
+ * Initialises the given memory pool with a fallback memory pool if this pool
+ * runs out of memory.
+ */
+void mpool_init_with_fallback(struct mpool *p, struct mpool *fallback)
+{
+ mpool_init(p, fallback->entry_size);
+ p->fallback = fallback;
+}
+
+/**
+ * Finishes the given memory pool, giving all free memory to the fallback pool
+ * if there is one.
+ */
+void mpool_fini(struct mpool *p)
+{
+ struct mpool_entry *entry;
+ struct mpool_chunk *chunk;
+
+ if (!p->fallback) {
+ return;
+ }
+
+ mpool_lock(p);
+
+ /* Merge the freelist into the fallback. */
+ entry = p->entry_list;
+ while (entry != NULL) {
+ void *ptr = entry;
+
+ entry = entry->next;
+ mpool_free(p->fallback, ptr);
+ }
+
+ /* Merge the chunk list into the fallback. */
+ chunk = p->chunk_list;
+ while (chunk != NULL) {
+ void *ptr = chunk;
+ size_t size = (uintptr_t)chunk->limit - (uintptr_t)chunk;
+
+ chunk = chunk->next_chunk;
+ mpool_add_chunk(p->fallback, ptr, size);
+ }
+
+ p->chunk_list = NULL;
+ p->entry_list = NULL;
+ p->fallback = NULL;
+
+ mpool_unlock(p);
+}
+
+/**
+ * Adds a contiguous chunk of memory to the given memory pool. The chunk will
+ * eventually be broken up into entries of the size held by the memory pool.
+ *
+ * Only the portions aligned to the entry size will be added to the pool.
+ *
+ * Returns true if at least a portion of the chunk was added to pool, or false
+ * if none of the buffer was usable in the pool.
+ */
+bool mpool_add_chunk(struct mpool *p, void *begin, size_t size)
+{
+ struct mpool_chunk *chunk;
+ uintptr_t new_begin;
+ uintptr_t new_end;
+
+ /* Round begin address up, and end address down. */
+ new_begin = ((uintptr_t)begin + p->entry_size - 1) / p->entry_size *
+ p->entry_size;
+ new_end = ((uintptr_t)begin + size) / p->entry_size * p->entry_size;
+
+ /* Nothing to do if there isn't enough room for an entry. */
+ if (new_begin >= new_end || new_end - new_begin < p->entry_size) {
+ return false;
+ }
+
+ chunk = (struct mpool_chunk *)new_begin;
+ chunk->limit = (struct mpool_chunk *)new_end;
+
+ mpool_lock(p);
+ chunk->next_chunk = p->chunk_list;
+ p->chunk_list = chunk;
+ mpool_unlock(p);
+
+ return true;
+}
+
+/**
+ * Allocates an entry from the given memory pool, if one is available. The
+ * fallback will not be used even if there is one.
+ */
+static void *mpool_alloc_no_fallback(struct mpool *p)
+{
+ void *ret;
+ struct mpool_chunk *chunk;
+ struct mpool_chunk *new_chunk;
+
+ /* Fetch an entry from the free list if one is available. */
+ mpool_lock(p);
+ if (p->entry_list != NULL) {
+ struct mpool_entry *entry = p->entry_list;
+
+ p->entry_list = entry->next;
+ ret = entry;
+ goto exit;
+ }
+
+ /* There was no free list available. Try a chunk instead. */
+ chunk = p->chunk_list;
+ if (chunk == NULL) {
+ /* The chunk list is also empty, we're out of entries. */
+ ret = NULL;
+ goto exit;
+ }
+
+ new_chunk = (struct mpool_chunk *)((uintptr_t)chunk + p->entry_size);
+ if (new_chunk >= chunk->limit) {
+ p->chunk_list = chunk->next_chunk;
+ } else {
+ *new_chunk = *chunk;
+ p->chunk_list = new_chunk;
+ }
+
+ ret = chunk;
+
+exit:
+ mpool_unlock(p);
+
+ return ret;
+}
+
+/**
+ * Allocates an entry from the given memory pool, if one is available. If there
+ * isn't one available, try and allocate from the fallback if there is one.
+ */
+void *mpool_alloc(struct mpool *p)
+{
+ do {
+ void *ret = mpool_alloc_no_fallback(p);
+
+ if (ret != NULL) {
+ return ret;
+ }
+
+ p = p->fallback;
+ } while (p != NULL);
+
+ return NULL;
+}
+
+/**
+ * Frees an entry back into the memory pool, making it available for reuse.
+ *
+ * This is meant to be used for freeing single entries. To free multiple
+ * entries, one must call mpool_add_chunk instead.
+ */
+void mpool_free(struct mpool *p, void *ptr)
+{
+ struct mpool_entry *e = ptr;
+
+ /* Store the newly freed entry in the front of the free list. */
+ mpool_lock(p);
+ e->next = p->entry_list;
+ p->entry_list = e;
+ mpool_unlock(p);
+}
+
+/**
+ * Allocates a number of contiguous and aligned entries. If a suitable
+ * allocation could not be found, the fallback will not be used even if there is
+ * one.
+ */
+void *mpool_alloc_contiguous_no_fallback(struct mpool *p, size_t count,
+ size_t align)
+{
+ struct mpool_chunk **prev;
+ void *ret = NULL;
+
+ align *= p->entry_size;
+
+ mpool_lock(p);
+
+ /*
+ * Go through the chunk list in search of one with enough room for the
+ * requested allocation
+ */
+ prev = &p->chunk_list;
+ while (*prev != NULL) {
+ uintptr_t start;
+ struct mpool_chunk *new_chunk;
+ struct mpool_chunk *chunk = *prev;
+
+ /* Round start address up to the required alignment. */
+ start = (((uintptr_t)chunk + align - 1) / align) * align;
+
+ /*
+ * Calculate where the new chunk would be if we consume the
+ * requested number of entries. Then check if this chunk is big
+ * enough to satisfy the request.
+ */
+ new_chunk =
+ (struct mpool_chunk *)(start + (count * p->entry_size));
+ if (new_chunk <= chunk->limit) {
+ /* Remove the consumed area. */
+ if (new_chunk == chunk->limit) {
+ *prev = chunk->next_chunk;
+ } else {
+ *new_chunk = *chunk;
+ *prev = new_chunk;
+ }
+
+ /*
+ * Add back the space consumed by the alignment
+ * requirement, if it's big enough to fit an entry.
+ */
+ if (start - (uintptr_t)chunk >= p->entry_size) {
+ chunk->next_chunk = *prev;
+ *prev = chunk;
+ chunk->limit = (struct mpool_chunk *)start;
+ }
+
+ ret = (void *)start;
+ break;
+ }
+
+ prev = &chunk->next_chunk;
+ }
+
+ mpool_unlock(p);
+
+ return ret;
+}
+
+/**
+ * Allocates a number of contiguous and aligned entries. This is a best-effort
+ * operation and only succeeds if such entries can be found in the chunks list
+ * or the chunks of the fallbacks (i.e., the entry list is never used to satisfy
+ * these allocations).
+ *
+ * The alignment is specified as the number of entries, that is, if `align` is
+ * 4, the alignment in bytes will be 4 * entry_size.
+ *
+ * The caller can enventually free the returned entries by calling
+ * mpool_add_chunk.
+ */
+void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
+{
+ do {
+ void *ret = mpool_alloc_contiguous_no_fallback(p, count, align);
+
+ if (ret != NULL) {
+ return ret;
+ }
+
+ p = p->fallback;
+ } while (p != NULL);
+
+ return NULL;
+}
diff --git a/src/mpool_test.cc b/src/mpool_test.cc
new file mode 100644
index 0000000..53a45ed
--- /dev/null
+++ b/src/mpool_test.cc
@@ -0,0 +1,360 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdalign.h>
+
+#include <gmock/gmock.h>
+
+extern "C" {
+#include "hf/mpool.h"
+}
+
+namespace
+{
+using ::testing::Eq;
+using ::testing::IsNull;
+using ::testing::NotNull;
+
+/**
+ * Checks that the given allocations come from the given chunks.
+ */
+bool check_allocs(std::vector<std::unique_ptr<char[]>>& chunks,
+ std::vector<uintptr_t>& allocs, size_t entries_per_chunk,
+ size_t entry_size)
+{
+ size_t i, j;
+
+ if (allocs.size() != chunks.size() * entries_per_chunk) {
+ return false;
+ }
+
+ sort(allocs.begin(), allocs.end());
+ sort(chunks.begin(), chunks.end(),
+ [](const std::unique_ptr<char[]>& a,
+ const std::unique_ptr<char[]>& b) {
+ return a.get() < b.get();
+ });
+
+ for (i = 0; i < chunks.size(); i++) {
+ if ((uintptr_t)chunks[i].get() !=
+ allocs[i * entries_per_chunk]) {
+ return false;
+ }
+
+ for (j = 1; j < entries_per_chunk; j++) {
+ size_t k = i * entries_per_chunk + j;
+ if (allocs[k] != allocs[k - 1] + entry_size) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Add chunks to the given mem pool and chunk vector.
+ */
+static void add_chunks(std::vector<std::unique_ptr<char[]>>& chunks,
+ struct mpool* p, size_t count, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < count; i++) {
+ chunks.emplace_back(std::make_unique<char[]>(size));
+ mpool_add_chunk(p, chunks.back().get(), size);
+ }
+}
+
+/**
+ * Validates allocations from a memory pool.
+ */
+TEST(mpool, allocation)
+{
+ struct mpool p;
+ constexpr size_t entry_size = 16;
+ constexpr size_t entries_per_chunk = 10;
+ constexpr size_t chunk_count = 10;
+ std::vector<std::unique_ptr<char[]>> chunks;
+ std::vector<uintptr_t> allocs;
+ void* ret;
+
+ mpool_init(&p, entry_size);
+
+ /* Allocate from an empty pool. */
+ EXPECT_THAT(mpool_alloc(&p), IsNull());
+
+ /*
+ * Add a chunk that is too small, it should be ignored, and allocation
+ * should return NULL.
+ */
+ mpool_add_chunk(&p, NULL, entry_size - 1);
+ EXPECT_THAT(mpool_alloc(&p), IsNull());
+
+ /* Allocate a number of chunks and add them to the pool. */
+ add_chunks(chunks, &p, chunk_count, entries_per_chunk * entry_size);
+
+ /* Allocate from the pool until we run out of memory. */
+ while ((ret = mpool_alloc(&p))) {
+ allocs.push_back((uintptr_t)ret);
+ }
+
+ /* Check that returned entries are within chunks that were added. */
+ ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size),
+ true);
+}
+
+/**
+ * Validates frees into a memory pool.
+ */
+TEST(mpool, freeing)
+{
+ struct mpool p;
+ constexpr size_t entry_size = 16;
+ constexpr size_t entries_per_chunk = 12;
+ constexpr size_t chunk_count = 10;
+ std::vector<std::unique_ptr<char[]>> chunks;
+ std::vector<uintptr_t> allocs;
+ size_t i;
+ alignas(entry_size) char entry[entry_size];
+ void* ret;
+
+ mpool_init(&p, entry_size);
+
+ /* Allocate from an empty pool. */
+ EXPECT_THAT(mpool_alloc(&p), IsNull());
+
+ /* Free an entry into the pool, then allocate it back. */
+ mpool_free(&p, &entry[0]);
+ EXPECT_THAT(mpool_alloc(&p), (void*)&entry[0]);
+ EXPECT_THAT(mpool_alloc(&p), IsNull());
+
+ /* Allocate a number of chunks and add them to the pool. */
+ add_chunks(chunks, &p, chunk_count, entries_per_chunk * entry_size);
+
+ /*
+ * Free again into the pool. Ensure that we get entry back on next
+ * allocation instead of something from the chunks.
+ */
+ mpool_free(&p, &entry[0]);
+ EXPECT_THAT(mpool_alloc(&p), (void*)&entry[0]);
+
+ /* Allocate from the pool until we run out of memory. */
+ while ((ret = mpool_alloc(&p))) {
+ allocs.push_back((uintptr_t)ret);
+ }
+
+ /*
+ * Free again into the pool. Ensure that we get entry back on next
+ * allocation instead of something from the chunks.
+ */
+ mpool_free(&p, &entry[0]);
+ EXPECT_THAT(mpool_alloc(&p), (void*)&entry[0]);
+
+ /* Add entries back to the pool by freeing them. */
+ for (i = 0; i < allocs.size(); i++) {
+ mpool_free(&p, (void*)allocs[i]);
+ }
+ allocs.clear();
+
+ /* Allocate from the pool until we run out of memory. */
+ while ((ret = mpool_alloc(&p))) {
+ allocs.push_back((uintptr_t)ret);
+ }
+
+ /* Check that returned entries are within chunks that were added. */
+ ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size),
+ true);
+}
+
+/**
+ * Initialises a memory pool from an existing one.
+ */
+TEST(mpool, init_from)
+{
+ struct mpool p, q;
+ constexpr size_t entry_size = 16;
+ constexpr size_t entries_per_chunk = 10;
+ constexpr size_t chunk_count = 10;
+ std::vector<std::unique_ptr<char[]>> chunks;
+ std::vector<uintptr_t> allocs;
+ size_t i;
+ void* ret;
+
+ mpool_init(&p, entry_size);
+
+ /* Allocate a number of chunks and add them to the pool. */
+ add_chunks(chunks, &p, chunk_count, entries_per_chunk * entry_size);
+
+ /* Allocate half of the elements. */
+ for (i = 0; i < entries_per_chunk * chunk_count / 2; i++) {
+ void* ret = mpool_alloc(&p);
+ ASSERT_THAT(ret, NotNull());
+ allocs.push_back((uintptr_t)ret);
+ }
+
+ /* Add entries back to the pool by freeing them. */
+ for (i = 0; i < allocs.size(); i++) {
+ mpool_free(&p, (void*)allocs[i]);
+ }
+ allocs.clear();
+
+ /* Initialise q from p. */
+ mpool_init_from(&q, &p);
+
+ /* Allocation from p must now fail. */
+ EXPECT_THAT(mpool_alloc(&p), IsNull());
+
+ /* Allocate from q until we run out of memory. */
+ while ((ret = mpool_alloc(&q))) {
+ allocs.push_back((uintptr_t)ret);
+ }
+
+ /* Check that returned entries are within chunks that were added. */
+ ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size),
+ true);
+}
+
+/**
+ * Initialises a memory pool from an existing one.
+ */
+TEST(mpool, alloc_contiguous)
+{
+ struct mpool p;
+ constexpr size_t entry_size = 16;
+ constexpr size_t entries_per_chunk = 12;
+ constexpr size_t chunk_count = 10;
+ std::vector<std::unique_ptr<char[]>> chunks;
+ std::vector<uintptr_t> allocs;
+ size_t i;
+ void* ret;
+ uintptr_t next;
+
+ mpool_init(&p, entry_size);
+
+ /* Allocate a number of chunks and add them to the pool. */
+ add_chunks(chunks, &p, chunk_count, entries_per_chunk * entry_size);
+
+ /*
+ * Allocate entries until the remaining chunk is aligned to 2 entries,
+ * but not aligned to 4 entries.
+ */
+ do {
+ ret = mpool_alloc(&p);
+ ASSERT_THAT(ret, NotNull());
+ allocs.push_back((uintptr_t)ret);
+ next = ((uintptr_t)ret / entry_size) + 1;
+ } while ((next % 4) != 2);
+
+ /* Allocate 5 entries with an alignment of 4. So two must be skipped. */
+ ret = mpool_alloc_contiguous(&p, 5, 4);
+ ASSERT_THAT(ret, NotNull());
+ ASSERT_THAT((uintptr_t)ret, (next + 2) * entry_size);
+ for (i = 0; i < 5; i++) {
+ allocs.push_back((uintptr_t)ret + i * entry_size);
+ }
+
+ /* Allocate a whole chunk. */
+ ret = mpool_alloc_contiguous(&p, entries_per_chunk, 1);
+ ASSERT_THAT(ret, NotNull());
+ for (i = 0; i < entries_per_chunk; i++) {
+ allocs.push_back((uintptr_t)ret + i * entry_size);
+ }
+
+ /* Allocate 2 entries that are already aligned. */
+ ret = mpool_alloc_contiguous(&p, 2, 1);
+ ASSERT_THAT(ret, NotNull());
+ allocs.push_back((uintptr_t)ret);
+ allocs.push_back((uintptr_t)ret + entry_size);
+
+ /* Allocate from p until we run out of memory. */
+ while ((ret = mpool_alloc(&p))) {
+ allocs.push_back((uintptr_t)ret);
+ }
+
+ /* Check that returned entries are within chunks that were added. */
+ ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size),
+ true);
+}
+
+TEST(mpool, allocation_with_fallback)
+{
+ struct mpool fallback;
+ struct mpool p;
+ constexpr size_t entry_size = 16;
+ constexpr size_t entries_per_chunk = 10;
+ constexpr size_t chunk_count = 10;
+ std::vector<std::unique_ptr<char[]>> chunks;
+ std::vector<uintptr_t> allocs;
+ void* ret;
+
+ mpool_init(&fallback, entry_size);
+ mpool_init_with_fallback(&p, &fallback);
+
+ /* Allocate from an empty pool. */
+ EXPECT_THAT(mpool_alloc(&p), IsNull());
+
+ /* Allocate a number of chunks and add them to the fallback pool. */
+ add_chunks(chunks, &fallback, chunk_count,
+ entries_per_chunk * entry_size);
+
+ /* Allocate from the pool until we run out of memory. */
+ while ((ret = mpool_alloc(&p))) {
+ allocs.push_back((uintptr_t)ret);
+ }
+
+ /* Check that returned entries are within chunks that were added. */
+ ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size),
+ true);
+}
+
+TEST(mpool, free_with_fallback)
+{
+ struct mpool fallback;
+ struct mpool p;
+ constexpr size_t entry_size = 16;
+ constexpr size_t entries_per_chunk = 1;
+ constexpr size_t chunk_count = 1;
+ std::vector<std::unique_ptr<char[]>> chunks;
+ std::vector<uintptr_t> allocs;
+ void* ret;
+
+ mpool_init(&fallback, entry_size);
+ mpool_init_with_fallback(&p, &fallback);
+
+ /* Allocate a number of chunks and add them to the fallback pool. */
+ add_chunks(chunks, &fallback, chunk_count,
+ entries_per_chunk * entry_size);
+
+ /* Allocate, making use of the fallback and free again. */
+ ret = mpool_alloc(&p);
+ mpool_free(&p, ret);
+
+ /* The entry is not available in the fallback. */
+ EXPECT_THAT(mpool_alloc(&fallback), IsNull());
+
+ /* The entry will be allocated by the local pool. */
+ EXPECT_THAT(mpool_alloc(&p), Eq(ret));
+
+ /* Return the memory to the local pool and then to the fallback. */
+ mpool_free(&p, ret);
+ mpool_fini(&p);
+
+ /* The fallback can now allocate the entry. */
+ EXPECT_THAT(mpool_alloc(&fallback), Eq(ret));
+}
+
+} /* namespace */
diff --git a/src/panic.c b/src/panic.c
new file mode 100644
index 0000000..9c0a4f6
--- /dev/null
+++ b/src/panic.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/panic.h"
+
+#include <stdarg.h>
+
+#include "hf/abort.h"
+#include "hf/dlog.h"
+
+/**
+ * Logs a reason before calling abort.
+ *
+ * TODO: Determine if we want to omit strings on non-debug builds.
+ */
+noreturn void panic(const char *fmt, ...)
+{
+ va_list args;
+
+ dlog("Panic: ");
+
+ va_start(args, fmt);
+ vdlog(fmt, args);
+ va_end(args);
+
+ dlog("\n");
+
+ abort();
+}
diff --git a/src/spci_architected_message.c b/src/spci_architected_message.c
new file mode 100644
index 0000000..06106d5
--- /dev/null
+++ b/src/spci_architected_message.c
@@ -0,0 +1,659 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/api.h"
+#include "hf/check.h"
+#include "hf/dlog.h"
+#include "hf/spci_internal.h"
+#include "hf/std.h"
+#include "hf/vm.h"
+
+/**
+ * Obtain the next mode to apply to the two VMs.
+ *
+ * Returns true iff a state transition was found.
+ */
+static bool spci_msg_get_next_state(
+ const struct spci_mem_transitions *transitions,
+ uint32_t transition_count, uint32_t memory_to_attributes,
+ uint32_t orig_from_mode, uint32_t orig_to_mode, uint32_t *from_mode,
+ uint32_t *to_mode)
+{
+ const uint32_t state_mask =
+ MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
+ const uint32_t orig_from_state = orig_from_mode & state_mask;
+
+ for (uint32_t index = 0; index < transition_count; index++) {
+ uint32_t table_orig_from_mode =
+ transitions[index].orig_from_mode;
+ uint32_t table_orig_to_mode = transitions[index].orig_to_mode;
+
+ if (((orig_from_state) == table_orig_from_mode) &&
+ ((orig_to_mode & state_mask) == table_orig_to_mode)) {
+ *to_mode = transitions[index].to_mode |
+ memory_to_attributes;
+
+ *from_mode = transitions[index].from_mode |
+ (~state_mask & orig_from_mode);
+
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * Verify that all pages have the same mode, that the starting mode
+ * constitutes a valid state and obtain the next mode to apply
+ * to the two VMs.
+ *
+ * Returns:
+ * The error code false indicates that:
+ * 1) a state transition was not found;
+ * 2) the pages being shared do not have the same mode within the <to>
+ * or <form> VMs;
+ * 3) The beginning and end IPAs are not page aligned;
+ * 4) The requested share type was not handled.
+ * Success is indicated by true.
+ *
+ */
+static bool spci_msg_check_transition(struct vm *to, struct vm *from,
+ uint32_t share_type,
+ uint32_t *orig_from_mode,
+ struct spci_memory_region *memory_region,
+ uint32_t memory_to_attributes,
+ uint32_t *from_mode, uint32_t *to_mode)
+{
+ uint32_t orig_to_mode;
+ const struct spci_mem_transitions *mem_transition_table;
+ uint32_t transition_table_size;
+ uint32_t i;
+
+ /*
+ * TODO: Transition table does not currently consider the multiple
+ * shared case.
+ */
+ static const struct spci_mem_transitions donate_transitions[] = {
+ {
+ /* 1) {O-EA, !O-NA} -> {!O-NA, O-EA} */
+ .orig_from_mode = 0,
+ .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+ .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+ .to_mode = 0,
+ },
+ {
+ /* 2) {O-NA, !O-EA} -> {!O-NA, O-EA} */
+ .orig_from_mode = MM_MODE_INVALID,
+ .orig_to_mode = MM_MODE_UNOWNED,
+ .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+ .to_mode = 0,
+ },
+ {
+ /* 3) {O-SA, !O-SA} -> {!O-NA, O-EA} */
+ .orig_from_mode = MM_MODE_SHARED,
+ .orig_to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
+ .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+ .to_mode = 0,
+ },
+ {
+ /*
+ * Duplicate of 1) in order to cater for an alternative
+ * representation of !O-NA:
+ * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
+ * are both alternate representations of !O-NA.
+ */
+ /* 4) {O-EA, !O-NA} -> {!O-NA, O-EA} */
+ .orig_from_mode = 0,
+ .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED,
+ .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED,
+ .to_mode = 0,
+ },
+ };
+
+ static const uint32_t size_donate_transitions =
+ ARRAY_SIZE(donate_transitions);
+
+ /*
+ * This data structure holds the allowed state transitions for the
+ * "lend" state machine. In this state machine the owner keeps ownership
+ * but loses access to the lent pages.
+ */
+ static const struct spci_mem_transitions lend_transitions[] = {
+ {
+ /* 1) {O-EA, !O-NA} -> {O-NA, !O-EA} */
+ .orig_from_mode = 0,
+ .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED,
+ .from_mode = MM_MODE_INVALID,
+ .to_mode = MM_MODE_UNOWNED,
+ },
+ {
+ /*
+ * Duplicate of 1) in order to cater for an alternative
+ * representation of !O-NA:
+ * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
+ * are both alternate representations of !O-NA.
+ */
+ /* 2) {O-EA, !O-NA} -> {O-NA, !O-EA} */
+ .orig_from_mode = 0,
+ .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+ .from_mode = MM_MODE_INVALID,
+ .to_mode = MM_MODE_UNOWNED,
+ },
+ };
+
+ static const uint32_t size_lend_transitions =
+ ARRAY_SIZE(lend_transitions);
+
+ /*
+ * This data structure holds the allowed state transitions for the
+ * "share" state machine. In this state machine the owner keeps the
+ * shared pages mapped on its stage2 table and keeps access as well.
+ */
+ static const struct spci_mem_transitions share_transitions[] = {
+ {
+ /* 1) {O-EA, !O-NA} -> {O-SA, !O-SA} */
+ .orig_from_mode = 0,
+ .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED,
+ .from_mode = MM_MODE_SHARED,
+ .to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
+ },
+ {
+ /*
+ * Duplicate of 1) in order to cater for an alternative
+ * representation of !O-NA:
+ * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
+ * are both alternate representations of !O-NA.
+ */
+ /* 2) {O-EA, !O-NA} -> {O-SA, !O-SA} */
+ .orig_from_mode = 0,
+ .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
+ .from_mode = MM_MODE_SHARED,
+ .to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
+ },
+ };
+
+ static const uint32_t size_share_transitions =
+ ARRAY_SIZE(share_transitions);
+
+ static const struct spci_mem_transitions relinquish_transitions[] = {
+ {
+ /* 1) {!O-EA, O-NA} -> {!O-NA, O-EA} */
+ .orig_from_mode = MM_MODE_UNOWNED,
+ .orig_to_mode = MM_MODE_INVALID,
+ .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED,
+ .to_mode = 0,
+ },
+ {
+ /* 2) {!O-SA, O-SA} -> {!O-NA, O-EA} */
+ .orig_from_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
+ .orig_to_mode = MM_MODE_SHARED,
+ .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED,
+ .to_mode = 0,
+ },
+ };
+
+ static const uint32_t size_relinquish_transitions =
+ ARRAY_SIZE(relinquish_transitions);
+
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ if (memory_region->constituent_count == 0) {
+ /*
+ * Fail if there are no constituents. Otherwise
+ * spci_msg_get_next_state would get an unitialised
+ * *orig_from_mode and orig_to_mode.
+ */
+ return false;
+ }
+
+ for (i = 0; i < memory_region->constituent_count; ++i) {
+ ipaddr_t begin = ipa_init(constituents[i].address);
+ size_t size = constituents[i].page_count * PAGE_SIZE;
+ ipaddr_t end = ipa_add(begin, size);
+ uint32_t current_from_mode;
+ uint32_t current_to_mode;
+
+ /* Fail if addresses are not page-aligned. */
+ if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
+ !is_aligned(ipa_addr(end), PAGE_SIZE)) {
+ return false;
+ }
+
+ /*
+ * Ensure that this constituent memory range is all mapped with
+ * the same mode.
+ */
+ if (!mm_vm_get_mode(&from->ptable, begin, end,
+ ¤t_from_mode) ||
+ !mm_vm_get_mode(&to->ptable, begin, end,
+ ¤t_to_mode)) {
+ return false;
+ }
+
+ /*
+ * Ensure that all constituents are mapped with the same mode.
+ */
+ if (i == 0) {
+ *orig_from_mode = current_from_mode;
+ orig_to_mode = current_to_mode;
+ } else if (current_from_mode != *orig_from_mode ||
+ current_to_mode != orig_to_mode) {
+ return false;
+ }
+ }
+
+ /* Ensure the address range is normal memory and not a device. */
+ if (*orig_from_mode & MM_MODE_D) {
+ return false;
+ }
+
+ switch (share_type) {
+ case SPCI_MSG_SEND_LEGACY_MEMORY_DONATE:
+ mem_transition_table = donate_transitions;
+ transition_table_size = size_donate_transitions;
+ break;
+
+ case SPCI_MSG_SEND_LEGACY_MEMORY_LEND:
+ mem_transition_table = lend_transitions;
+ transition_table_size = size_lend_transitions;
+ break;
+
+ case SPCI_MSG_SEND_LEGACY_MEMORY_SHARE:
+ mem_transition_table = share_transitions;
+ transition_table_size = size_share_transitions;
+ break;
+
+ case SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH:
+ mem_transition_table = relinquish_transitions;
+ transition_table_size = size_relinquish_transitions;
+ break;
+
+ default:
+ return false;
+ }
+
+ return spci_msg_get_next_state(mem_transition_table,
+ transition_table_size,
+ memory_to_attributes, *orig_from_mode,
+ orig_to_mode, from_mode, to_mode);
+}
+
+/**
+ * Updates a VM's page table such that the given set of physical address ranges
+ * are mapped in the address space at the corresponding address ranges, in the
+ * mode provided.
+ *
+ * If commit is false, the page tables will be allocated from the mpool but no
+ * mappings will actually be updated. This function must always be called first
+ * with commit false to check that it will succeed before calling with commit
+ * true, to avoid leaving the page table in a half-updated state. To make a
+ * series of changes atomically you can call them all with commit false before
+ * calling them all with commit true.
+ *
+ * mm_vm_defrag should always be called after a series of page table updates,
+ * whether they succeed or fail.
+ *
+ * Returns true on success, or false if the update failed and no changes were
+ * made to memory mappings.
+ */
+static bool spci_region_group_identity_map(
+ struct vm_locked vm_locked, struct spci_memory_region *memory_region,
+ int mode, struct mpool *ppool, bool commit)
+{
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+ uint32_t memory_constituent_count = memory_region->constituent_count;
+
+ /* Iterate over the memory region constituents. */
+ for (uint32_t index = 0; index < memory_constituent_count; index++) {
+ size_t size = constituents[index].page_count * PAGE_SIZE;
+ paddr_t pa_begin =
+ pa_from_ipa(ipa_init(constituents[index].address));
+ paddr_t pa_end = pa_add(pa_begin, size);
+
+ if (commit) {
+ vm_identity_commit(vm_locked, pa_begin, pa_end, mode,
+ ppool, NULL);
+ } else if (!vm_identity_prepare(vm_locked, pa_begin, pa_end,
+ mode, ppool)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Clears a region of physical memory by overwriting it with zeros. The data is
+ * flushed from the cache so the memory has been cleared across the system.
+ */
+static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool)
+{
+ /*
+ * TODO: change this to a CPU local single page window rather than a
+ * global mapping of the whole range. Such an approach will limit
+ * the changes to stage-1 tables and will allow only local
+ * invalidation.
+ */
+ bool ret;
+ struct mm_stage1_locked stage1_locked = mm_lock_stage1();
+ void *ptr =
+ mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool);
+ size_t size = pa_difference(begin, end);
+
+ if (!ptr) {
+ /* TODO: partial defrag of failed range. */
+ /* Recover any memory consumed in failed mapping. */
+ mm_defrag(stage1_locked, ppool);
+ goto fail;
+ }
+
+ memset_s(ptr, size, 0, size);
+ arch_mm_flush_dcache(ptr, size);
+ mm_unmap(stage1_locked, begin, end, ppool);
+
+ ret = true;
+ goto out;
+
+fail:
+ ret = false;
+
+out:
+ mm_unlock_stage1(&stage1_locked);
+
+ return ret;
+}
+
+/**
+ * Clears a region of physical memory by overwriting it with zeros. The data is
+ * flushed from the cache so the memory has been cleared across the system.
+ */
+static bool spci_clear_memory_region(struct spci_memory_region *memory_region,
+ struct mpool *api_page_pool)
+{
+ struct mpool local_page_pool;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+ uint32_t memory_constituent_count = memory_region->constituent_count;
+ struct mm_stage1_locked stage1_locked;
+ bool ret = false;
+
+ /*
+ * Create a local pool so any freed memory can't be used by another
+ * thread. This is to ensure each constituent that is mapped can be
+ * unmapped again afterwards.
+ */
+ mpool_init_with_fallback(&local_page_pool, api_page_pool);
+
+ /* Iterate over the memory region constituents. */
+ for (uint32_t i = 0; i < memory_constituent_count; ++i) {
+ size_t size = constituents[i].page_count * PAGE_SIZE;
+ paddr_t begin = pa_from_ipa(ipa_init(constituents[i].address));
+ paddr_t end = pa_add(begin, size);
+
+ if (!clear_memory(begin, end, &local_page_pool)) {
+ /*
+ * api_clear_memory will defrag on failure, so no need
+ * to do it here.
+ */
+ goto out;
+ }
+ }
+
+ /*
+ * Need to defrag after clearing, as it may have added extra mappings to
+ * the stage 1 page table.
+ */
+ stage1_locked = mm_lock_stage1();
+ mm_defrag(stage1_locked, &local_page_pool);
+ mm_unlock_stage1(&stage1_locked);
+
+ ret = true;
+
+out:
+ mpool_fini(&local_page_pool);
+ return ret;
+}
+
+/**
+ * Shares memory from the calling VM with another. The memory can be shared in
+ * different modes.
+ *
+ * This function requires the calling context to hold the <to> and <from> locks.
+ *
+ * Returns:
+ * In case of error one of the following values is returned:
+ * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were
+ * erroneous;
+ * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete
+ * the request.
+ * Success is indicated by SPCI_SUCCESS.
+ */
+static struct spci_value spci_share_memory(
+ struct vm_locked to_locked, struct vm_locked from_locked,
+ struct spci_memory_region *memory_region, uint32_t memory_to_attributes,
+ uint32_t share_type, struct mpool *api_page_pool)
+{
+ struct vm *to = to_locked.vm;
+ struct vm *from = from_locked.vm;
+ uint32_t orig_from_mode;
+ uint32_t from_mode;
+ uint32_t to_mode;
+ struct mpool local_page_pool;
+ struct spci_value ret;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ /*
+ * Make sure constituents are properly aligned to a 64-bit boundary. If
+ * not we would get alignment faults trying to read (64-bit) page
+ * addresses.
+ */
+ if (!is_aligned(constituents, 8)) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Disallow reflexive shares as this suggests an error in the VM. */
+ if (to == from) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /*
+ * Check if the state transition is lawful for both VMs involved
+ * in the memory exchange, ensure that all constituents of a memory
+ * region being shared are at the same state.
+ */
+ if (!spci_msg_check_transition(to, from, share_type, &orig_from_mode,
+ memory_region, memory_to_attributes,
+ &from_mode, &to_mode)) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /*
+ * Create a local pool so any freed memory can't be used by another
+ * thread. This is to ensure the original mapping can be restored if the
+ * clear fails.
+ */
+ mpool_init_with_fallback(&local_page_pool, api_page_pool);
+
+ /*
+ * First reserve all required memory for the new page table entries in
+ * both sender and recipient page tables without committing, to make
+ * sure the entire operation will succeed without exhausting the page
+ * pool.
+ */
+ if (!spci_region_group_identity_map(from_locked, memory_region,
+ from_mode, api_page_pool, false) ||
+ !spci_region_group_identity_map(to_locked, memory_region, to_mode,
+ api_page_pool, false)) {
+ /* TODO: partial defrag of failed range. */
+ ret = spci_error(SPCI_NO_MEMORY);
+ goto out;
+ }
+
+ /*
+ * First update the mapping for the sender so there is no overlap with
+ * the recipient. This won't allocate because the transaction was
+ * already prepared above, but may free pages in the case that a whole
+ * block is being unmapped that was previously partially mapped.
+ */
+ CHECK(spci_region_group_identity_map(
+ from_locked, memory_region, from_mode, &local_page_pool, true));
+
+ /* Clear the memory so no VM or device can see the previous contents. */
+ if ((memory_region->flags & SPCI_MEMORY_REGION_FLAG_CLEAR) &&
+ !spci_clear_memory_region(memory_region, api_page_pool)) {
+ /*
+ * On failure, roll back by returning memory to the sender. This
+ * may allocate pages which were previously freed into
+ * `local_page_pool` by the call above, but will never allocate
+ * more pages than that so can never fail.
+ */
+ CHECK(spci_region_group_identity_map(from_locked, memory_region,
+ orig_from_mode,
+ &local_page_pool, true));
+
+ ret = spci_error(SPCI_NO_MEMORY);
+ goto out;
+ }
+
+ /*
+ * Complete the transfer by mapping the memory into the recipient. This
+ * won't allocate because the transaction was already prepared above, so
+ * it doesn't need to use the `local_page_pool`.
+ */
+ CHECK(spci_region_group_identity_map(to_locked, memory_region, to_mode,
+ api_page_pool, true));
+
+ ret = (struct spci_value){.func = SPCI_SUCCESS_32};
+
+out:
+ mpool_fini(&local_page_pool);
+
+ /*
+ * Tidy up the page tables by reclaiming failed mappings (if there was
+ * an error) or merging entries into blocks where possible (on success).
+ */
+ mm_vm_defrag(&to->ptable, api_page_pool);
+ mm_vm_defrag(&from->ptable, api_page_pool);
+
+ return ret;
+}
+
+/**
+ * Check if the message length and the number of memory region constituents
+ * match, if the check is correct call the memory sharing routine.
+ */
+static struct spci_value spci_validate_call_share_memory(
+ struct vm_locked to_locked, struct vm_locked from_locked,
+ struct spci_memory_region *memory_region, uint32_t memory_share_size,
+ uint32_t share_type, struct mpool *api_page_pool)
+{
+ uint32_t memory_to_attributes;
+ uint32_t attributes_size;
+ uint32_t constituents_size;
+
+ /*
+ * Ensure the number of constituents are within the memory
+ * bounds.
+ */
+ attributes_size = sizeof(struct spci_memory_region_attributes) *
+ memory_region->attribute_count;
+ constituents_size = sizeof(struct spci_memory_region_constituent) *
+ memory_region->constituent_count;
+ if (memory_region->constituent_offset <
+ sizeof(struct spci_memory_region) + attributes_size ||
+ memory_share_size !=
+ memory_region->constituent_offset + constituents_size) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* The sender must match the message sender. */
+ if (memory_region->sender != from_locked.vm->id) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* We only support a single recipient. */
+ if (memory_region->attribute_count != 1) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ /* The recipient must match the message recipient. */
+ if (memory_region->attributes[0].receiver != to_locked.vm->id) {
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ switch (share_type) {
+ case SPCI_MSG_SEND_LEGACY_MEMORY_DONATE:
+ case SPCI_MSG_SEND_LEGACY_MEMORY_LEND:
+ case SPCI_MSG_SEND_LEGACY_MEMORY_SHARE:
+ memory_to_attributes = spci_memory_attrs_to_mode(
+ memory_region->attributes[0].memory_attributes);
+ break;
+ case SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH:
+ memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
+ break;
+ default:
+ dlog("Invalid memory sharing message.\n");
+ return spci_error(SPCI_INVALID_PARAMETERS);
+ }
+
+ return spci_share_memory(to_locked, from_locked, memory_region,
+ memory_to_attributes, share_type,
+ api_page_pool);
+}
+
+/**
+ * Performs initial architected message information parsing. Calls the
+ * corresponding api functions implementing the functionality requested
+ * in the architected message.
+ */
+struct spci_value spci_msg_handle_architected_message(
+ struct vm_locked to_locked, struct vm_locked from_locked,
+ struct spci_memory_region *memory_region, uint32_t size,
+ uint32_t attributes, struct mpool *api_page_pool)
+{
+ uint32_t share_type = attributes & SPCI_MSG_SEND_LEGACY_MEMORY_MASK;
+ struct spci_value ret = spci_validate_call_share_memory(
+ to_locked, from_locked, memory_region, size, share_type,
+ api_page_pool);
+
+ /* Copy data to the destination Rx. */
+ /*
+ * TODO: Translate the <from> IPA addresses to <to> IPA addresses.
+ * Currently we assume identity mapping of the stage 2 translation.
+ * Removing this assumption relies on a mechanism to handle scenarios
+ * where the memory region fits in the source Tx buffer but cannot fit
+ * in the destination Rx buffer. This mechanism will be defined at the
+ * spec level.
+ */
+ if (ret.func == SPCI_SUCCESS_32) {
+ memcpy_s(to_locked.vm->mailbox.recv, SPCI_MSG_PAYLOAD_MAX,
+ memory_region, size);
+ to_locked.vm->mailbox.recv_size = size;
+ to_locked.vm->mailbox.recv_sender = from_locked.vm->id;
+ to_locked.vm->mailbox.recv_attributes = share_type;
+ }
+
+ return ret;
+}
diff --git a/src/std.c b/src/std.c
new file mode 100644
index 0000000..4576118
--- /dev/null
+++ b/src/std.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/std.h"
+
+#include "hf/check.h"
+
+/* Declare unsafe functions locally so they are not available globally. */
+void *memset(void *s, int c, size_t n);
+void *memcpy(void *dst, const void *src, size_t n);
+void *memmove(void *dst, const void *src, size_t n);
+
+/*
+ * As per the C11 specification, mem*_s() operations fill the destination buffer
+ * if runtime constraint validation fails, assuming that `dest` and `destsz`
+ * are both valid.
+ */
+#define CHECK_OR_FILL(cond, dest, destsz, ch) \
+ do { \
+ if (!(cond)) { \
+ if ((dest) != NULL && (destsz) <= RSIZE_MAX) { \
+ memset_s((dest), (destsz), (ch), (destsz)); \
+ } \
+ panic("%s failed: " #cond, __func__); \
+ } \
+ } while (0)
+
+#define CHECK_OR_ZERO_FILL(cond, dest, destsz) \
+ CHECK_OR_FILL(cond, dest, destsz, '\0')
+
+void memset_s(void *dest, rsize_t destsz, int ch, rsize_t count)
+{
+ CHECK_OR_FILL(dest != NULL, dest, destsz, ch);
+
+ /* Check count <= destsz <= RSIZE_MAX. */
+ CHECK_OR_FILL(destsz <= RSIZE_MAX, dest, destsz, ch);
+ CHECK_OR_FILL(count <= destsz, dest, destsz, ch);
+
+ /*
+ * Clang analyzer doesn't like us calling unsafe memory functions, so
+ * make it ignore this call.
+ */
+#ifndef __clang_analyzer__
+ memset(dest, ch, count);
+#endif
+}
+
+void memcpy_s(void *dest, rsize_t destsz, const void *src, rsize_t count)
+{
+ uintptr_t d = (uintptr_t)dest;
+ uintptr_t s = (uintptr_t)src;
+
+ CHECK_OR_ZERO_FILL(dest != NULL, dest, destsz);
+ CHECK_OR_ZERO_FILL(src != NULL, dest, destsz);
+
+ /* Check count <= destsz <= RSIZE_MAX. */
+ CHECK_OR_ZERO_FILL(destsz <= RSIZE_MAX, dest, destsz);
+ CHECK_OR_ZERO_FILL(count <= destsz, dest, destsz);
+
+ /*
+ * Buffer overlap test.
+ * case a) `d < s` implies `s >= d+count`
+ * case b) `d > s` implies `d >= s+count`
+ */
+ CHECK_OR_ZERO_FILL(d != s, dest, destsz);
+ CHECK_OR_ZERO_FILL(d < s || d >= (s + count), dest, destsz);
+ CHECK_OR_ZERO_FILL(d > s || s >= (d + count), dest, destsz);
+
+#ifndef __clang_analyzer__
+ memcpy(dest, src, count);
+#endif
+}
+
+void memmove_s(void *dest, rsize_t destsz, const void *src, rsize_t count)
+{
+ CHECK_OR_ZERO_FILL(dest != NULL, dest, destsz);
+ CHECK_OR_ZERO_FILL(src != NULL, dest, destsz);
+
+ /* Check count <= destsz <= RSIZE_MAX. */
+ CHECK_OR_ZERO_FILL(destsz <= RSIZE_MAX, dest, destsz);
+ CHECK_OR_ZERO_FILL(count <= destsz, dest, destsz);
+
+#ifndef __clang_analyzer__
+ memmove(dest, src, count);
+#endif
+}
+
+/**
+ * Finds the first occurrence of character `ch` in the first `count` bytes of
+ * memory pointed to by `ptr`.
+ *
+ * Returns NULL if `ch` is not found.
+ * Panics if `ptr` is NULL (undefined behaviour).
+ */
+void *memchr(const void *ptr, int ch, size_t count)
+{
+ size_t i;
+ const unsigned char *p = (const unsigned char *)ptr;
+
+ CHECK(ptr != NULL);
+
+ /* Iterate over at most `strsz` characters of `str`. */
+ for (i = 0; i < count; ++i) {
+ if (p[i] == (unsigned char)ch) {
+ return (void *)(&p[i]);
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Returns the length of the null-terminated byte string `str`, examining at
+ * most `strsz` bytes.
+ *
+ * If `str` is a NULL pointer, it returns zero.
+ * If a NULL character is not found, it returns `strsz`.
+ */
+size_t strnlen_s(const char *str, size_t strsz)
+{
+ if (str == NULL) {
+ return 0;
+ }
+
+ for (size_t i = 0; i < strsz; ++i) {
+ if (str[i] == '\0') {
+ return i;
+ }
+ }
+
+ /* NULL character not found. */
+ return strsz;
+}
diff --git a/src/string.c b/src/string.c
new file mode 100644
index 0000000..4876a3f
--- /dev/null
+++ b/src/string.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/string.h"
+
+#include "hf/static_assert.h"
+#include "hf/std.h"
+
+void string_init_empty(struct string *str)
+{
+ static_assert(sizeof(str->data) >= 1, "String buffer too small");
+ str->data[0] = '\0';
+}
+
+/**
+ * Caller must guarantee that `data` points to a NULL-terminated string.
+ * The constructor checks that it fits into the internal buffer and copies
+ * the string there.
+ */
+enum string_return_code string_init(struct string *str, const char *data,
+ size_t size)
+{
+ /*
+ * Require that the value contains exactly one NULL character and that
+ * it is the last byte.
+ */
+ if (size < 1 || memchr(data, '\0', size) != &data[size - 1]) {
+ return STRING_ERROR_INVALID_INPUT;
+ }
+
+ if (size > sizeof(str->data)) {
+ return STRING_ERROR_TOO_LONG;
+ }
+
+ memcpy_s(str->data, sizeof(str->data), data, size);
+ return STRING_SUCCESS;
+}
+
+bool string_is_empty(const struct string *str)
+{
+ return str->data[0] == '\0';
+}
+
+const char *string_data(const struct string *str)
+{
+ return str->data;
+}
diff --git a/src/string_test.cc b/src/string_test.cc
new file mode 100644
index 0000000..f814636
--- /dev/null
+++ b/src/string_test.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gmock/gmock.h>
+
+extern "C" {
+#include "hf/string.h"
+}
+
+namespace
+{
+TEST(string, valid)
+{
+ struct string str;
+ constexpr const char data[] = "test";
+
+ string_init_empty(&str);
+ ASSERT_TRUE(string_is_empty(&str));
+ ASSERT_STREQ(string_data(&str), "");
+
+ ASSERT_EQ(string_init(&str, data, sizeof(data)), STRING_SUCCESS);
+ ASSERT_FALSE(string_is_empty(&str));
+ ASSERT_STRNE(string_data(&str), "");
+ ASSERT_STREQ(string_data(&str), "test");
+}
+
+TEST(string, data_zero_size)
+{
+ struct string str;
+ constexpr const char data[] = "test";
+
+ ASSERT_EQ(string_init(&str, data, 0), STRING_ERROR_INVALID_INPUT);
+}
+
+TEST(string, data_no_null_terminator)
+{
+ struct string str;
+ constexpr const char data[] = {'t', 'e', 's', 't'};
+
+ ASSERT_EQ(string_init(&str, data, sizeof(data)),
+ STRING_ERROR_INVALID_INPUT);
+}
+
+TEST(string, data_two_null_terminators)
+{
+ struct string str;
+ constexpr const char data[] = {'\0', 't', 'e', 's', 't', '\0'};
+
+ ASSERT_EQ(string_init(&str, data, sizeof(data)),
+ STRING_ERROR_INVALID_INPUT);
+}
+
+} /* namespace */
diff --git a/src/vcpu.c b/src/vcpu.c
new file mode 100644
index 0000000..2f1b5fc
--- /dev/null
+++ b/src/vcpu.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/vcpu.h"
+
+#include "hf/check.h"
+#include "hf/dlog.h"
+#include "hf/std.h"
+#include "hf/vm.h"
+
+/**
+ * Locks the given vCPU and updates `locked` to hold the newly locked vCPU.
+ */
+struct vcpu_locked vcpu_lock(struct vcpu *vcpu)
+{
+ struct vcpu_locked locked = {
+ .vcpu = vcpu,
+ };
+
+ sl_lock(&vcpu->lock);
+
+ return locked;
+}
+
+/**
+ * Unlocks a vCPU previously locked with vpu_lock, and updates `locked` to
+ * reflect the fact that the vCPU is no longer locked.
+ */
+void vcpu_unlock(struct vcpu_locked *locked)
+{
+ sl_unlock(&locked->vcpu->lock);
+ locked->vcpu = NULL;
+}
+
+void vcpu_init(struct vcpu *vcpu, struct vm *vm)
+{
+ memset_s(vcpu, sizeof(*vcpu), 0, sizeof(*vcpu));
+ sl_init(&vcpu->lock);
+ vcpu->regs_available = true;
+ vcpu->vm = vm;
+ vcpu->state = VCPU_STATE_OFF;
+}
+
+/**
+ * Initialise the registers for the given vCPU and set the state to
+ * VCPU_STATE_READY. The caller must hold the vCPU lock while calling this.
+ */
+void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg)
+{
+ arch_regs_set_pc_arg(&vcpu.vcpu->regs, entry, arg);
+ vcpu.vcpu->state = VCPU_STATE_READY;
+}
+
+spci_vcpu_index_t vcpu_index(const struct vcpu *vcpu)
+{
+ size_t index = vcpu - vcpu->vm->vcpus;
+
+ CHECK(index < UINT16_MAX);
+ return index;
+}
+
+/**
+ * Check whether the given vcpu_state is an off state, for the purpose of
+ * turning vCPUs on and off. Note that aborted still counts as on in this
+ * context.
+ */
+bool vcpu_is_off(struct vcpu_locked vcpu)
+{
+ switch (vcpu.vcpu->state) {
+ case VCPU_STATE_OFF:
+ return true;
+ case VCPU_STATE_READY:
+ case VCPU_STATE_RUNNING:
+ case VCPU_STATE_BLOCKED_MAILBOX:
+ case VCPU_STATE_BLOCKED_INTERRUPT:
+ case VCPU_STATE_ABORTED:
+ /*
+ * Aborted still counts as ON for the purposes of PSCI,
+ * because according to the PSCI specification (section
+ * 5.7.1) a core is only considered to be off if it has
+ * been turned off with a CPU_OFF call or hasn't yet
+ * been turned on with a CPU_ON call.
+ */
+ return false;
+ }
+}
+
+/**
+ * Starts a vCPU of a secondary VM.
+ *
+ * Returns true if the secondary was reset and started, or false if it was
+ * already on and so nothing was done.
+ */
+bool vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry,
+ uintreg_t arg)
+{
+ struct vcpu_locked vcpu_locked;
+ struct vm *vm = vcpu->vm;
+ bool vcpu_was_off;
+
+ CHECK(vm->id != HF_PRIMARY_VM_ID);
+
+ vcpu_locked = vcpu_lock(vcpu);
+ vcpu_was_off = vcpu_is_off(vcpu_locked);
+ if (vcpu_was_off) {
+ /*
+ * Set vCPU registers to a clean state ready for boot. As this
+ * is a secondary which can migrate between pCPUs, the ID of the
+ * vCPU is defined as the index and does not match the ID of the
+ * pCPU it is running on.
+ */
+ arch_regs_reset(vcpu);
+ vcpu_on(vcpu_locked, entry, arg);
+ }
+ vcpu_unlock(&vcpu_locked);
+
+ return vcpu_was_off;
+}
+
+/**
+ * Handles a page fault. It does so by determining if it's a legitimate or
+ * spurious fault, and recovering from the latter.
+ *
+ * Returns true if the caller should resume the current vCPU, or false if its VM
+ * should be aborted.
+ */
+bool vcpu_handle_page_fault(const struct vcpu *current,
+ struct vcpu_fault_info *f)
+{
+ struct vm *vm = current->vm;
+ uint32_t mode;
+ uint32_t mask = f->mode | MM_MODE_INVALID;
+ bool resume;
+
+ sl_lock(&vm->lock);
+
+ /*
+ * Check if this is a legitimate fault, i.e., if the page table doesn't
+ * allow the access attempted by the VM.
+ *
+ * Otherwise, this is a spurious fault, likely because another CPU is
+ * updating the page table. It is responsible for issuing global TLB
+ * invalidations while holding the VM lock, so we don't need to do
+ * anything else to recover from it. (Acquiring/releasing the lock
+ * ensured that the invalidations have completed.)
+ */
+ resume = mm_vm_get_mode(&vm->ptable, f->ipaddr, ipa_add(f->ipaddr, 1),
+ &mode) &&
+ (mode & mask) == f->mode;
+
+ sl_unlock(&vm->lock);
+
+ if (!resume) {
+ dlog("Stage-2 page fault: pc=%#x, vmid=%u, vcpu=%u, "
+ "vaddr=%#x, ipaddr=%#x, mode=%#x\n",
+ f->pc, vm->id, vcpu_index(current), f->vaddr, f->ipaddr,
+ f->mode);
+ }
+
+ return resume;
+}
diff --git a/src/vm.c b/src/vm.c
new file mode 100644
index 0000000..0f36e21
--- /dev/null
+++ b/src/vm.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/vm.h"
+
+#include "hf/api.h"
+#include "hf/check.h"
+#include "hf/cpu.h"
+#include "hf/layout.h"
+#include "hf/plat/iommu.h"
+#include "hf/spci.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+static struct vm vms[MAX_VMS];
+static spci_vm_count_t vm_count;
+
+bool vm_init(spci_vcpu_count_t vcpu_count, struct mpool *ppool,
+ struct vm **new_vm)
+{
+ uint32_t i;
+ struct vm *vm;
+
+ if (vm_count >= MAX_VMS) {
+ return false;
+ }
+
+ vm = &vms[vm_count];
+
+ memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
+
+ list_init(&vm->mailbox.waiter_list);
+ list_init(&vm->mailbox.ready_list);
+ sl_init(&vm->lock);
+
+ /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
+ vm->id = vm_count + HF_VM_ID_OFFSET;
+ vm->vcpu_count = vcpu_count;
+ vm->mailbox.state = MAILBOX_STATE_EMPTY;
+ atomic_init(&vm->aborting, false);
+
+ if (!mm_vm_init(&vm->ptable, ppool)) {
+ return false;
+ }
+
+ /* Initialise waiter entries. */
+ for (i = 0; i < MAX_VMS; i++) {
+ vm->wait_entries[i].waiting_vm = vm;
+ list_init(&vm->wait_entries[i].wait_links);
+ list_init(&vm->wait_entries[i].ready_links);
+ }
+
+ /* Do basic initialization of vCPUs. */
+ for (i = 0; i < vcpu_count; i++) {
+ vcpu_init(vm_get_vcpu(vm, i), vm);
+ }
+
+ ++vm_count;
+ *new_vm = vm;
+
+ return true;
+}
+
+spci_vm_count_t vm_get_count(void)
+{
+ return vm_count;
+}
+
+struct vm *vm_find(spci_vm_id_t id)
+{
+ uint16_t index;
+
+ /* Check that this is not a reserved ID. */
+ if (id < HF_VM_ID_OFFSET) {
+ return NULL;
+ }
+
+ index = id - HF_VM_ID_OFFSET;
+
+ /* Ensure the VM is initialized. */
+ if (index >= vm_count) {
+ return NULL;
+ }
+
+ return &vms[index];
+}
+
+/**
+ * Locks the given VM and updates `locked` to hold the newly locked VM.
+ */
+struct vm_locked vm_lock(struct vm *vm)
+{
+ struct vm_locked locked = {
+ .vm = vm,
+ };
+
+ sl_lock(&vm->lock);
+
+ return locked;
+}
+
+/**
+ * Locks two VMs ensuring that the locking order is according to the locks'
+ * addresses.
+ */
+struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
+{
+ struct two_vm_locked dual_lock;
+
+ sl_lock_both(&vm1->lock, &vm2->lock);
+ dual_lock.vm1.vm = vm1;
+ dual_lock.vm2.vm = vm2;
+
+ return dual_lock;
+}
+
+/**
+ * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
+ * the fact that the VM is no longer locked.
+ */
+void vm_unlock(struct vm_locked *locked)
+{
+ sl_unlock(&locked->vm->lock);
+ locked->vm = NULL;
+}
+
+/**
+ * Get the vCPU with the given index from the given VM.
+ * This assumes the index is valid, i.e. less than vm->vcpu_count.
+ */
+struct vcpu *vm_get_vcpu(struct vm *vm, spci_vcpu_index_t vcpu_index)
+{
+ CHECK(vcpu_index < vm->vcpu_count);
+ return &vm->vcpus[vcpu_index];
+}
+
+/**
+ * Gets `vm`'s wait entry for waiting on the `for_vm`.
+ */
+struct wait_entry *vm_get_wait_entry(struct vm *vm, spci_vm_id_t for_vm)
+{
+ uint16_t index;
+
+ CHECK(for_vm >= HF_VM_ID_OFFSET);
+ index = for_vm - HF_VM_ID_OFFSET;
+ CHECK(index < MAX_VMS);
+
+ return &vm->wait_entries[index];
+}
+
+/**
+ * Gets the ID of the VM which the given VM's wait entry is for.
+ */
+spci_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
+{
+ uint16_t index = entry - vm->wait_entries;
+
+ return index + HF_VM_ID_OFFSET;
+}
+
+/**
+ * Map a range of addresses to the VM in both the MMU and the IOMMU.
+ *
+ * mm_vm_defrag should always be called after a series of page table updates,
+ * whether they succeed or fail. This is because on failure extra page table
+ * entries may have been allocated and then not used, while on success it may be
+ * possible to compact the page table by merging several entries into a block.
+ *
+ * Returns true on success, or false if the update failed and no changes were
+ * made.
+ *
+ */
+bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
+{
+ if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
+ return false;
+ }
+
+ vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
+
+ return true;
+}
+
+/**
+ * Prepares the given VM for the given address mapping such that it will be able
+ * to commit the change without failure.
+ *
+ * In particular, multiple calls to this function will result in the
+ * corresponding calls to commit the changes to succeed.
+ *
+ * Returns true on success, or false if the update failed and no changes were
+ * made.
+ */
+bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool)
+{
+ return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
+ ppool);
+}
+
+/**
+ * Commits the given address mapping to the VM assuming the operation cannot
+ * fail. `vm_identity_prepare` must used correctly before this to ensure
+ * this condition.
+ */
+void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
+{
+ mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, ppool,
+ ipa);
+ plat_iommu_identity_map(vm_locked, begin, end, mode);
+}
+
+/**
+ * Unmap a range of addresses from the VM.
+ *
+ * Returns true on success, or false if the update failed and no changes were
+ * made.
+ */
+bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
+ struct mpool *ppool)
+{
+ uint32_t mode = MM_MODE_UNMAPPED_MASK;
+
+ return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
+}
+
+/**
+ * Unmaps the hypervisor pages from the given page table.
+ */
+bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
+{
+ /* TODO: If we add pages dynamically, they must be included here too. */
+ return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
+ ppool) &&
+ vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
+ ppool) &&
+ vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
+ ppool);
+}
diff --git a/src/vm_test.cc b/src/vm_test.cc
new file mode 100644
index 0000000..fb33ec4
--- /dev/null
+++ b/src/vm_test.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gmock/gmock.h>
+
+extern "C" {
+#include "hf/mpool.h"
+#include "hf/vm.h"
+}
+
+#include <memory>
+#include <span>
+#include <vector>
+
+#include "mm_test.hh"
+
+namespace
+{
+using namespace ::std::placeholders;
+
+using ::testing::AllOf;
+using ::testing::Each;
+using ::testing::SizeIs;
+
+using struct_vm = struct vm;
+
+constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
+const int TOP_LEVEL = arch_mm_stage2_max_level();
+
+class vm : public ::testing::Test
+{
+ void SetUp() override
+ {
+ /*
+ * TODO: replace with direct use of stdlib allocator so
+ * sanitizers are more effective.
+ */
+ test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
+ mpool_init(&ppool, sizeof(struct mm_page_table));
+ mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
+ }
+
+ std::unique_ptr<uint8_t[]> test_heap;
+
+ protected:
+ struct mpool ppool;
+};
+
+/**
+ * If nothing is mapped, unmapping the hypervisor has no effect.
+ */
+TEST_F(vm, vm_unmap_hypervisor_not_mapped)
+{
+ struct_vm *vm;
+ struct vm_locked vm_locked;
+
+ vm_init(1, &ppool, &vm);
+ vm_locked = vm_lock(vm);
+ ASSERT_TRUE(mm_vm_init(&vm->ptable, &ppool));
+ EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
+ EXPECT_THAT(
+ mm_test::get_ptable(vm->ptable),
+ AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
+ mm_vm_fini(&vm->ptable, &ppool);
+ vm_unlock(&vm_locked);
+}
+
+} /* namespace */
diff --git a/test/arch/BUILD.gn b/test/arch/BUILD.gn
new file mode 100644
index 0000000..c770dee
--- /dev/null
+++ b/test/arch/BUILD.gn
@@ -0,0 +1,38 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+import("//build/toolchain/platform.gni")
+
+group("arch") {
+ testonly = true
+
+ deps = [
+ ":arch_test",
+ ]
+}
+
+hypervisor("arch_test") {
+ testonly = true
+
+ sources = [
+ "dlog_test.c",
+ "mm_test.c",
+ ]
+
+ deps = [
+ "//src/arch/${plat_arch}:arch",
+ "//test/hftest:hftest_hypervisor",
+ ]
+}
diff --git a/test/arch/dlog_test.c b/test/arch/dlog_test.c
new file mode 100644
index 0000000..f31ba26
--- /dev/null
+++ b/test/arch/dlog_test.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/dlog.h"
+
+#include "test/hftest.h"
+
+/**
+ * Test that logs are written to the buffer, and the rest is empty.
+ */
+TEST(dlog, log_buffer)
+{
+ const char test_string[] = "Test string\n";
+
+ dlog(test_string);
+ ASSERT_EQ(strcmp(test_string, dlog_buffer), 0);
+ /* The \0 at the end shouldn't be counted. */
+ ASSERT_EQ(dlog_buffer_offset, sizeof(test_string) - 1);
+ for (int i = sizeof(test_string) - 1; i < DLOG_BUFFER_SIZE; ++i) {
+ EXPECT_EQ(dlog_buffer[i], '\0');
+ }
+}
diff --git a/test/arch/mm_test.c b/test/arch/mm_test.c
new file mode 100644
index 0000000..bf0c186
--- /dev/null
+++ b/test/arch/mm_test.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/mm.h"
+
+#include "hf/arch/mm.h"
+
+#include "test/hftest.h"
+
+/** There must be at least two levels in the page table. */
+#define MAX_LEVEL_LOWER_BOUND 1
+
+/**
+ * This is the number of levels that are tested and is constrained as it
+ * controls the depth of recursion in the memory management code.
+ */
+#define MAX_LEVEL_UPPER_BOUND 3
+
+/** X macro to expand tests for all levels. */
+#define EXPAND_LEVEL_TESTS \
+ LEVEL_TEST(0) \
+ LEVEL_TEST(1) \
+ LEVEL_TEST(2) \
+ LEVEL_TEST(3)
+
+/* TODO: work out how to run these test against the host fake arch. */
+
+/**
+ * A block must be allowed at level 0 as this is the level which represents
+ * pages.
+ */
+TEST(arch_mm, block_allowed_at_level0)
+{
+ ASSERT_TRUE(arch_mm_is_block_allowed(0));
+}
+
+/**
+ * The maximum level must be within acceptable bounds.
+ */
+TEST(arch_mm, max_level_stage1)
+{
+ uint8_t max_level = arch_mm_stage1_max_level();
+ EXPECT_GE(max_level, MAX_LEVEL_LOWER_BOUND);
+ EXPECT_LE(max_level, MAX_LEVEL_UPPER_BOUND);
+}
+
+/* TODO: initialize arch_mm and check max level of stage-2. */
+
+/**
+ * An absent entry is not present, valid, a block nor a table.
+ */
+#define LEVEL_TEST(lvl) \
+ TEST(arch_mm, absent_properties_level##lvl) \
+ { \
+ uint8_t level = lvl; \
+ pte_t absent_pte; \
+ \
+ absent_pte = arch_mm_absent_pte(level); \
+ \
+ EXPECT_FALSE(arch_mm_pte_is_present(absent_pte, level)); \
+ EXPECT_FALSE(arch_mm_pte_is_valid(absent_pte, level)); \
+ EXPECT_FALSE(arch_mm_pte_is_block(absent_pte, level)); \
+ EXPECT_FALSE(arch_mm_pte_is_table(absent_pte, level)); \
+ }
+EXPAND_LEVEL_TESTS
+#undef LEVEL_TEST
+
+/**
+ * An invalid block is present and mutually exclusive from a table.
+ */
+#define LEVEL_TEST(lvl) \
+ TEST(arch_mm, invalid_block_properties_level##lvl) \
+ { \
+ uint8_t level = lvl; \
+ uint64_t attrs = \
+ arch_mm_mode_to_stage2_attrs(MM_MODE_INVALID); \
+ pte_t block_pte; \
+ \
+ /* Test doesn't apply if a block is not allowed. */ \
+ if (!arch_mm_is_block_allowed(level)) { \
+ return; \
+ } \
+ \
+ block_pte = arch_mm_block_pte(level, pa_init(PAGE_SIZE * 19), \
+ attrs); \
+ \
+ EXPECT_TRUE(arch_mm_pte_is_present(block_pte, level)); \
+ EXPECT_FALSE(arch_mm_pte_is_valid(block_pte, level)); \
+ EXPECT_TRUE(arch_mm_pte_is_block(block_pte, level)); \
+ EXPECT_FALSE(arch_mm_pte_is_table(block_pte, level)); \
+ }
+EXPAND_LEVEL_TESTS
+#undef LEVEL_TEST
+
+/**
+ * A valid block is present and mutually exclusive from a table.
+ */
+#define LEVEL_TEST(lvl) \
+ TEST(arch_mm, valid_block_properties_level##lvl) \
+ { \
+ uint8_t level = lvl; \
+ uint64_t attrs = arch_mm_mode_to_stage2_attrs(0); \
+ pte_t block_pte; \
+ \
+ /* Test doesn't apply if a block is not allowed. */ \
+ if (!arch_mm_is_block_allowed(level)) { \
+ return; \
+ } \
+ \
+ block_pte = arch_mm_block_pte( \
+ level, pa_init(PAGE_SIZE * 12345678), attrs); \
+ \
+ EXPECT_TRUE(arch_mm_pte_is_present(block_pte, level)); \
+ EXPECT_TRUE(arch_mm_pte_is_valid(block_pte, level)); \
+ EXPECT_TRUE(arch_mm_pte_is_block(block_pte, level)); \
+ EXPECT_FALSE(arch_mm_pte_is_table(block_pte, level)); \
+ }
+EXPAND_LEVEL_TESTS
+#undef LEVEL_TEST
+
+/**
+ * A table is present, valid and mutually exclusive from a block.
+ */
+#define LEVEL_TEST(lvl) \
+ TEST(arch_mm, table_properties_level##lvl) \
+ { \
+ uint8_t level = lvl; \
+ pte_t table_pte; \
+ \
+ /* Test doesn't apply to level 0 as there can't be a table. */ \
+ if (level == 0) { \
+ return; \
+ } \
+ \
+ table_pte = arch_mm_table_pte(level, \
+ pa_init(PAGE_SIZE * 999999999)); \
+ \
+ EXPECT_TRUE(arch_mm_pte_is_present(table_pte, level)); \
+ EXPECT_TRUE(arch_mm_pte_is_valid(table_pte, level)); \
+ EXPECT_FALSE(arch_mm_pte_is_block(table_pte, level)); \
+ EXPECT_TRUE(arch_mm_pte_is_table(table_pte, level)); \
+ }
+EXPAND_LEVEL_TESTS
+#undef LEVEL_TEST
+
+/**
+ * The address and attributes of a block must be preserved when encoding and
+ * decoding.
+ */
+#define LEVEL_TEST(lvl) \
+ TEST(arch_mm, block_addr_and_attrs_preserved_level##lvl) \
+ { \
+ uint8_t level = lvl; \
+ paddr_t addr; \
+ uint64_t attrs; \
+ pte_t block_pte; \
+ \
+ /* Test doesn't apply if a block is not allowed. */ \
+ if (!arch_mm_is_block_allowed(level)) { \
+ return; \
+ } \
+ \
+ addr = pa_init(0); \
+ attrs = arch_mm_mode_to_stage2_attrs(0); \
+ block_pte = arch_mm_block_pte(level, addr, attrs); \
+ EXPECT_EQ(arch_mm_pte_attrs(block_pte, level), attrs); \
+ EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte, level)), \
+ pa_addr(addr)); \
+ \
+ addr = pa_init(PAGE_SIZE * 17); \
+ attrs = arch_mm_mode_to_stage2_attrs(MM_MODE_INVALID); \
+ block_pte = arch_mm_block_pte(level, addr, attrs); \
+ EXPECT_EQ(arch_mm_pte_attrs(block_pte, level), attrs); \
+ EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte, level)), \
+ pa_addr(addr)); \
+ \
+ addr = pa_init(PAGE_SIZE * 500); \
+ attrs = arch_mm_mode_to_stage2_attrs(MM_MODE_R | MM_MODE_W); \
+ block_pte = arch_mm_block_pte(level, addr, attrs); \
+ EXPECT_EQ(arch_mm_pte_attrs(block_pte, level), attrs); \
+ EXPECT_EQ(pa_addr(arch_mm_block_from_pte(block_pte, level)), \
+ pa_addr(addr)); \
+ }
+EXPAND_LEVEL_TESTS
+#undef LEVEL_TEST
diff --git a/test/hftest/BUILD.gn b/test/hftest/BUILD.gn
new file mode 100644
index 0000000..d65397b
--- /dev/null
+++ b/test/hftest/BUILD.gn
@@ -0,0 +1,155 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/toolchain/platform.gni")
+
+config("hftest_config") {
+ include_dirs = [ "//test/inc" ]
+}
+
+# Testing framework for a primary VM.
+source_set("hftest_primary_vm") {
+ testonly = true
+
+ public_configs = [ ":hftest_config" ]
+
+ deps = [
+ ":hftest_standalone",
+ "//vmlib/${plat_arch}:call",
+ ]
+}
+
+# Testing framework for a secondary VM. It's currently just a slave VM and
+# can't affect the tests directly.
+source_set("hftest_secondary_vm") {
+ testonly = true
+
+ public_configs = [ ":hftest_config" ]
+
+ sources = [
+ "service.c",
+ ]
+
+ deps = [
+ ":mm",
+ ":power_mgmt",
+ "//src:dlog",
+ "//src:memiter",
+ "//src:panic",
+ "//src:std",
+ "//src/arch/${plat_arch}:entry",
+ "//src/arch/${plat_arch}/hftest:entry",
+ "//src/arch/${plat_arch}/hftest:power_mgmt",
+ "//vmlib/${plat_arch}:call",
+ ]
+}
+
+# Testing framework for a hypervisor.
+source_set("hftest_hypervisor") {
+ testonly = true
+ public_configs = [ ":hftest_config" ]
+ deps = [
+ ":hftest_standalone",
+ ]
+}
+
+# Testing framework for tests running under Linux in the primary VM.
+source_set("hftest_linux") {
+ testonly = true
+ public_configs = [ ":hftest_config" ]
+
+ sources = [
+ "linux_main.c",
+ ]
+
+ deps = [
+ ":common",
+ "//src:dlog",
+ "//src:memiter",
+ "//src/arch/${plat_arch}/hftest:power_mgmt",
+ ]
+}
+
+source_set("hftest_standalone") {
+ visibility = [ ":*" ]
+ testonly = true
+
+ public_configs = [ ":hftest_config" ]
+
+ sources = [
+ "standalone_main.c",
+ ]
+
+ deps = [
+ ":common",
+ ":mm",
+ ":power_mgmt",
+ "//src:dlog",
+ "//src:fdt",
+ "//src:memiter",
+ "//src/arch/${plat_arch}:entry",
+ "//src/arch/${plat_arch}/hftest:entry",
+ "//src/arch/${plat_arch}/hftest:interrupts",
+ "//src/arch/${plat_arch}/hftest:power_mgmt",
+ ]
+}
+
+# Common code for hftest, whether it is running under Linux, under Hafnium in
+# the primary VM, or directly on the hardware.
+source_set("common") {
+ visibility = [ ":*" ]
+ testonly = true
+ public_configs = [ ":hftest_config" ]
+ sources = [
+ "common.c",
+ ]
+ deps = [
+ "//src:fdt_handler",
+ "//src:memiter",
+ "//src:panic",
+ "//src:std",
+ ]
+}
+
+source_set("mm") {
+ testonly = true
+
+ public_configs = [ ":hftest_config" ]
+
+ sources = [
+ "mm.c",
+ ]
+
+ deps = [
+ "//src:layout",
+ "//src:mm",
+ "//src/arch/${plat_arch}:arch",
+ "//src/arch/${plat_arch}/hftest:mm",
+ ]
+}
+
+source_set("power_mgmt") {
+ testonly = true
+
+ public_configs = [ ":hftest_config" ]
+
+ sources = [
+ "power_mgmt.c",
+ ]
+
+ deps = [
+ ":mm",
+ "//src/arch/${plat_arch}/hftest:power_mgmt",
+ ]
+}
diff --git a/test/hftest/common.c b/test/hftest/common.c
new file mode 100644
index 0000000..877f1db
--- /dev/null
+++ b/test/hftest/common.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/power_mgmt.h"
+
+#include "hf/boot_params.h"
+#include "hf/fdt_handler.h"
+#include "hf/memiter.h"
+#include "hf/std.h"
+
+#include "hftest_common.h"
+#include "test/hftest.h"
+
+HFTEST_ENABLE();
+
+static struct hftest_test hftest_constructed[HFTEST_MAX_TESTS];
+static size_t hftest_count;
+static struct hftest_test *hftest_list;
+
+static struct hftest_context global_context;
+
+struct hftest_context *hftest_get_context(void)
+{
+ return &global_context;
+}
+
+/**
+ * Adds the given test information to the global list, to be used by
+ * `hftest_use_registered_list`.
+ */
+void hftest_register(struct hftest_test test)
+{
+ if (hftest_count < HFTEST_MAX_TESTS) {
+ hftest_constructed[hftest_count++] = test;
+ } else {
+ HFTEST_FAIL(true, "Too many tests");
+ }
+}
+
+/**
+ * Uses the list of tests registered by `hftest_register(...)` as the ones to
+ * run.
+ */
+void hftest_use_registered_list(void)
+{
+ hftest_list = hftest_constructed;
+}
+
+/**
+ * Uses the given list of tests as the ones to run.
+ */
+void hftest_use_list(struct hftest_test list[], size_t count)
+{
+ hftest_list = list;
+ hftest_count = count;
+}
+
+/**
+ * Writes out a JSON structure describing the available tests.
+ */
+void hftest_json(void)
+{
+ const char *suite = NULL;
+ size_t i;
+ size_t suites_in_image = 0;
+ size_t tests_in_suite = 0;
+
+ HFTEST_LOG("{");
+ HFTEST_LOG(" \"suites\": [");
+ for (i = 0; i < hftest_count; ++i) {
+ struct hftest_test *test = &hftest_list[i];
+ if (test->suite != suite) {
+ /* Close out previously open suite. */
+ if (tests_in_suite) {
+ HFTEST_LOG(" ]");
+ HFTEST_LOG(" },");
+ }
+ /* Move onto new suite. */
+ ++suites_in_image;
+ suite = test->suite;
+ tests_in_suite = 0;
+ HFTEST_LOG(" {");
+ HFTEST_LOG(" \"name\": \"%s\",", test->suite);
+ }
+ if (test->kind == HFTEST_KIND_SET_UP) {
+ HFTEST_LOG(" \"setup\": true,");
+ }
+ if (test->kind == HFTEST_KIND_TEAR_DOWN) {
+ HFTEST_LOG(" \"teardown\": true,");
+ }
+ if (test->kind == HFTEST_KIND_TEST) {
+ if (!tests_in_suite) {
+ HFTEST_LOG(" \"tests\": [");
+ }
+ /*
+ * It's easier to put the comma at the start of the line
+ * than the end even though the JSON looks a bit funky.
+ */
+ HFTEST_LOG(" %c{", tests_in_suite ? ',' : ' ');
+ HFTEST_LOG(" \"name\": \"%s\",", test->name);
+ HFTEST_LOG(" \"is_long_running\": %s",
+ test->is_long_running ? "true" : "false");
+ HFTEST_LOG(" }");
+ ++tests_in_suite;
+ }
+ }
+ if (tests_in_suite) {
+ HFTEST_LOG(" ]");
+ HFTEST_LOG(" }");
+ }
+ HFTEST_LOG(" ]");
+ HFTEST_LOG("}");
+}
+
+/**
+ * Logs a failure message and shut down.
+ */
+noreturn void abort(void)
+{
+ HFTEST_LOG("FAIL");
+ arch_power_off();
+}
+
+static void run_test(hftest_test_fn set_up, hftest_test_fn test,
+ hftest_test_fn tear_down, const struct fdt_header *fdt)
+{
+ /* Prepare the context. */
+ struct hftest_context *ctx = hftest_get_context();
+ memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx));
+ ctx->abort = abort;
+ ctx->fdt = fdt;
+
+ /* Run any set up functions. */
+ if (set_up) {
+ set_up();
+ if (ctx->failures) {
+ abort();
+ }
+ }
+
+ /* Run the test. */
+ test();
+ if (ctx->failures) {
+ abort();
+ }
+
+ /* Run any tear down functions. */
+ if (tear_down) {
+ tear_down();
+ if (ctx->failures) {
+ abort();
+ }
+ }
+
+ HFTEST_LOG("FINISHED");
+}
+
+/**
+ * Runs the given test case.
+ */
+void hftest_run(struct memiter suite_name, struct memiter test_name,
+ const struct fdt_header *fdt)
+{
+ size_t i;
+ hftest_test_fn suite_set_up = NULL;
+ hftest_test_fn suite_tear_down = NULL;
+
+ for (i = 0; i < hftest_count; ++i) {
+ struct hftest_test *test = &hftest_list[i];
+
+ /* Check if this test is part of the suite we want. */
+ if (memiter_iseq(&suite_name, test->suite)) {
+ switch (test->kind) {
+ /*
+ * The first entries in the suite are the set up and
+ * tear down functions.
+ */
+ case HFTEST_KIND_SET_UP:
+ suite_set_up = test->fn;
+ break;
+ case HFTEST_KIND_TEAR_DOWN:
+ suite_tear_down = test->fn;
+ break;
+ /* Find the test. */
+ case HFTEST_KIND_TEST:
+ if (memiter_iseq(&test_name, test->name)) {
+ run_test(suite_set_up, test->fn,
+ suite_tear_down, fdt);
+ return;
+ }
+ break;
+ default:
+ /* Ignore other kinds. */
+ break;
+ }
+ }
+ }
+
+ HFTEST_LOG("Unable to find requested tests.");
+}
+
+/**
+ * Writes out usage information.
+ */
+void hftest_help(void)
+{
+ HFTEST_LOG("usage:");
+ HFTEST_LOG("");
+ HFTEST_LOG(" help");
+ HFTEST_LOG("");
+ HFTEST_LOG(" Show this help.");
+ HFTEST_LOG("");
+ HFTEST_LOG(" json");
+ HFTEST_LOG("");
+ HFTEST_LOG(
+ " Print a directory of test suites and tests in "
+ "JSON "
+ "format.");
+ HFTEST_LOG("");
+ HFTEST_LOG(" run <suite> <test>");
+ HFTEST_LOG("");
+ HFTEST_LOG(" Run the named test from the named test suite.");
+}
+
+static uintptr_t vcpu_index_to_id(size_t index)
+{
+ /* For now we use indices as IDs for vCPUs. */
+ return index;
+}
+
+/**
+ * Get the ID of the CPU with the given index.
+ */
+uintptr_t hftest_get_cpu_id(size_t index)
+{
+ struct boot_params params;
+ struct fdt_node n;
+ const struct fdt_header *fdt = hftest_get_context()->fdt;
+
+ if (fdt == NULL) {
+ /*
+ * We must be in a service VM, so apply the mapping that Hafnium
+ * uses for vCPU IDs.
+ */
+ return vcpu_index_to_id(index);
+ }
+
+ /* Find physical CPU ID from FDT. */
+ if (!fdt_root_node(&n, fdt)) {
+ FAIL("FDT failed validation.");
+ }
+ if (!fdt_find_child(&n, "")) {
+ FAIL("Unable to find FDT root node.");
+ }
+ fdt_find_cpus(&n, params.cpu_ids, ¶ms.cpu_count);
+
+ return params.cpu_ids[index];
+}
diff --git a/test/hftest/hftest.py b/test/hftest/hftest.py
new file mode 100755
index 0000000..1c88953
--- /dev/null
+++ b/test/hftest/hftest.py
@@ -0,0 +1,593 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Script which drives invocation of tests and parsing their output to produce
+a results report.
+"""
+
+from __future__ import print_function
+
+import xml.etree.ElementTree as ET
+
+import argparse
+import collections
+import datetime
+import json
+import os
+import re
+import subprocess
+import sys
+
+HFTEST_LOG_PREFIX = "[hftest] "
+HFTEST_LOG_FAILURE_PREFIX = "Failure:"
+HFTEST_LOG_FINISHED = "FINISHED"
+
+HF_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__))))
+DTC_SCRIPT = os.path.join(HF_ROOT, "build", "image", "dtc.py")
+FVP_BINARY = os.path.join(
+ os.path.dirname(HF_ROOT), "fvp", "Base_RevC_AEMv8A_pkg", "models",
+ "Linux64_GCC-4.9", "FVP_Base_RevC-2xAEMv8A")
+FVP_PREBUILTS_ROOT = os.path.join(
+ HF_ROOT, "prebuilts", "linux-aarch64", "arm-trusted-firmware", "fvp")
+FVP_PREBUILT_DTS = os.path.join(
+ FVP_PREBUILTS_ROOT, "fvp-base-gicv3-psci-1t.dts")
+FVP_PREBUILT_BL31 = os.path.join(FVP_PREBUILTS_ROOT, "bl31.bin")
+
+def read_file(path):
+ with open(path, "r") as f:
+ return f.read()
+
+def write_file(path, to_write, append=False):
+ with open(path, "a" if append else "w") as f:
+ f.write(to_write)
+
+def append_file(path, to_write):
+ write_file(path, to_write, append=True)
+
+def join_if_not_None(*args):
+ return " ".join(filter(lambda x: x, args))
+
+class ArtifactsManager:
+ """Class which manages folder with test artifacts."""
+
+ def __init__(self, log_dir):
+ self.created_files = []
+ self.log_dir = log_dir
+
+ # Create directory.
+ try:
+ os.makedirs(self.log_dir)
+ except OSError:
+ if not os.path.isdir(self.log_dir):
+ raise
+ print("Logs saved under", log_dir)
+
+ # Create files expected by the Sponge test result parser.
+ self.sponge_log_path = self.create_file("sponge_log", ".log")
+ self.sponge_xml_path = self.create_file("sponge_log", ".xml")
+
+ def gen_file_path(self, basename, extension):
+ """Generate path to a file in the log directory."""
+ return os.path.join(self.log_dir, basename + extension)
+
+ def create_file(self, basename, extension):
+ """Create and touch a new file in the log folder. Ensure that no other
+ file of the same name was created by this instance of ArtifactsManager.
+ """
+ # Determine the path of the file.
+ path = self.gen_file_path(basename, extension)
+
+ # Check that the path is unique.
+ assert(path not in self.created_files)
+ self.created_files += [ path ]
+
+ # Touch file.
+ with open(path, "w") as f:
+ pass
+
+ return path
+
+ def get_file(self, basename, extension):
+ """Return path to a file in the log folder. Assert that it was created
+ by this instance of ArtifactsManager."""
+ path = self.gen_file_path(basename, extension)
+ assert(path in self.created_files)
+ return path
+
+
+# Tuple holding the arguments common to all driver constructors.
+# This is to avoid having to pass arguments from subclasses to superclasses.
+DriverArgs = collections.namedtuple("DriverArgs", [
+ "artifacts",
+ "kernel",
+ "initrd",
+ "manifest",
+ "vm_args",
+ "cpu"
+ ])
+
+
+# State shared between the common Driver class and its subclasses during
+# a single invocation of the target platform.
+class DriverRunState:
+ def __init__(self, log_path):
+ self.log_path = log_path
+ self.ret_code = 0
+
+ def set_ret_code(self, ret_code):
+ self.ret_code = ret_code
+
+class DriverRunException(Exception):
+ """Exception thrown if subprocess invoked by a driver returned non-zero
+ status code. Used to fast-exit from a driver command sequence."""
+ pass
+
+
+class Driver:
+ """Parent class of drivers for all testable platforms."""
+
+ def __init__(self, args):
+ self.args = args
+
+ def get_run_log(self, run_name):
+ """Return path to the main log of a given test run."""
+ return self.args.artifacts.get_file(run_name, ".log")
+
+ def start_run(self, run_name):
+ """Hook called by Driver subclasses before they invoke the target
+ platform."""
+ return DriverRunState(self.args.artifacts.create_file(run_name, ".log"))
+
+ def exec_logged(self, run_state, exec_args):
+ """Run a subprocess on behalf of a Driver subclass and append its
+ stdout and stderr to the main log."""
+ assert(run_state.ret_code == 0)
+ with open(run_state.log_path, "a") as f:
+ f.write("$ {}\r\n".format(" ".join(exec_args)))
+ f.flush()
+ ret_code = subprocess.call(exec_args, stdout=f, stderr=f)
+ if ret_code != 0:
+ run_state.set_ret_code(ret_code)
+ raise DriverRunException()
+
+ def finish_run(self, run_state):
+ """Hook called by Driver subclasses after they finished running the
+ target platform. `ret_code` argument is the return code of the main
+ command run by the driver. A corresponding log message is printed."""
+ # Decode return code and add a message to the log.
+ with open(run_state.log_path, "a") as f:
+ if run_state.ret_code == 124:
+ f.write("\r\n{}{} timed out\r\n".format(
+ HFTEST_LOG_PREFIX, HFTEST_LOG_FAILURE_PREFIX))
+ elif run_state.ret_code != 0:
+ f.write("\r\n{}{} process return code {}\r\n".format(
+ HFTEST_LOG_PREFIX, HFTEST_LOG_FAILURE_PREFIX,
+ run_state.ret_code))
+
+ # Append log of this run to full test log.
+ log_content = read_file(run_state.log_path)
+ append_file(
+ self.args.artifacts.sponge_log_path,
+ log_content + "\r\n\r\n")
+ return log_content
+
+ def overlay_dtb(self, run_state, base_dtb, overlay_dtb, out_dtb):
+ """Overlay `overlay_dtb` over `base_dtb` into `out_dtb`."""
+ dtc_args = [
+ DTC_SCRIPT, "overlay",
+ out_dtb, base_dtb, overlay_dtb,
+ ]
+ self.exec_logged(run_state, dtc_args)
+
+
+class QemuDriver(Driver):
+ """Driver which runs tests in QEMU."""
+
+ def __init__(self, args):
+ Driver.__init__(self, args)
+
+ def gen_exec_args(self, test_args, is_long_running, dtb_path=None,
+ dumpdtb_path=None):
+ """Generate command line arguments for QEMU."""
+ time_limit = "120s" if is_long_running else "10s"
+ # If no CPU configuration is selected, then test against the maximum
+ # configuration, "max", supported by QEMU.
+ cpu = self.args.cpu or "max"
+ exec_args = [
+ "timeout", "--foreground", time_limit,
+ "./prebuilts/linux-x64/qemu/qemu-system-aarch64",
+ "-machine", "virt,virtualization=on,gic_version=3",
+ "-cpu", cpu, "-smp", "4", "-m", "64M",
+ "-nographic", "-nodefaults", "-serial", "stdio",
+ "-d", "unimp", "-kernel", self.args.kernel,
+ ]
+
+ if dtb_path:
+ exec_args += ["-dtb", dtb_path]
+
+ if dumpdtb_path:
+ exec_args += ["-machine", "dumpdtb=" + dumpdtb_path]
+
+ if self.args.initrd:
+ exec_args += ["-initrd", self.args.initrd]
+
+ vm_args = join_if_not_None(self.args.vm_args, test_args)
+ if vm_args:
+ exec_args += ["-append", vm_args]
+
+ return exec_args
+
+ def dump_dtb(self, run_state, test_args, path):
+ dumpdtb_args = self.gen_exec_args(test_args, False, dumpdtb_path=path)
+ self.exec_logged(run_state, dumpdtb_args)
+
+ def run(self, run_name, test_args, is_long_running):
+ """Run test given by `test_args` in QEMU."""
+ run_state = self.start_run(run_name)
+
+ try:
+ dtb_path = None
+
+ # If manifest DTBO specified, dump DTB from QEMU and overlay them.
+ if self.args.manifest:
+ base_dtb_path = self.args.artifacts.create_file(
+ run_name, ".base.dtb")
+ dtb_path = self.args.artifacts.create_file(run_name, ".dtb")
+ self.dump_dtb(run_state, test_args, base_dtb_path)
+ self.overlay_dtb(
+ run_state, base_dtb_path, self.args.manifest, dtb_path)
+
+ # Execute test in QEMU..
+ exec_args = self.gen_exec_args(test_args, is_long_running,
+ dtb_path=dtb_path)
+ self.exec_logged(run_state, exec_args)
+ except DriverRunException:
+ pass
+
+ return self.finish_run(run_state)
+
+
+class FvpDriver(Driver):
+ """Driver which runs tests in Arm FVP emulator."""
+
+ def __init__(self, args):
+ if args.cpu:
+ raise ValueError("FVP emulator does not support the --cpu option.")
+ Driver.__init__(self, args)
+
+ def gen_dts(self, dts_path, test_args, initrd_start, initrd_end):
+ """Create a DeviceTree source which will be compiled into a DTB and
+ passed to FVP for a test run."""
+ vm_args = join_if_not_None(self.args.vm_args, test_args)
+ write_file(dts_path, read_file(FVP_PREBUILT_DTS))
+ append_file(dts_path, """
+ / {{
+ chosen {{
+ bootargs = "{}";
+ stdout-path = "serial0:115200n8";
+ linux,initrd-start = <{}>;
+ linux,initrd-end = <{}>;
+ }};
+ }};
+ """.format(vm_args, initrd_start, initrd_end))
+
+ def gen_fvp_args(
+ self, is_long_running, initrd_start, uart0_log_path, uart1_log_path,
+ dtb_path):
+ """Generate command line arguments for FVP."""
+ time_limit = "80s" if is_long_running else "40s"
+ fvp_args = [
+ "timeout", "--foreground", time_limit,
+ FVP_BINARY,
+ "-C", "pctl.startup=0.0.0.0",
+ "-C", "bp.secure_memory=0",
+ "-C", "cluster0.NUM_CORES=4",
+ "-C", "cluster1.NUM_CORES=4",
+ "-C", "cache_state_modelled=0",
+ "-C", "bp.vis.disable_visualisation=true",
+ "-C", "bp.vis.rate_limit-enable=false",
+ "-C", "bp.terminal_0.start_telnet=false",
+ "-C", "bp.terminal_1.start_telnet=false",
+ "-C", "bp.terminal_2.start_telnet=false",
+ "-C", "bp.terminal_3.start_telnet=false",
+ "-C", "bp.pl011_uart0.untimed_fifos=1",
+ "-C", "bp.pl011_uart0.unbuffered_output=1",
+ "-C", "bp.pl011_uart0.out_file=" + uart0_log_path,
+ "-C", "bp.pl011_uart1.out_file=" + uart1_log_path,
+ "-C", "cluster0.cpu0.RVBAR=0x04020000",
+ "-C", "cluster0.cpu1.RVBAR=0x04020000",
+ "-C", "cluster0.cpu2.RVBAR=0x04020000",
+ "-C", "cluster0.cpu3.RVBAR=0x04020000",
+ "-C", "cluster1.cpu0.RVBAR=0x04020000",
+ "-C", "cluster1.cpu1.RVBAR=0x04020000",
+ "-C", "cluster1.cpu2.RVBAR=0x04020000",
+ "-C", "cluster1.cpu3.RVBAR=0x04020000",
+ "--data", "cluster0.cpu0=" + FVP_PREBUILT_BL31 + "@0x04020000",
+ "--data", "cluster0.cpu0=" + dtb_path + "@0x82000000",
+ "--data", "cluster0.cpu0=" + self.args.kernel + "@0x80000000",
+ "-C", "bp.ve_sysregs.mmbSiteDefault=0",
+ "-C", "bp.ve_sysregs.exit_on_shutdown=1",
+ ]
+
+ if self.args.initrd:
+ fvp_args += [
+ "--data",
+ "cluster0.cpu0={}@{}".format(
+ self.args.initrd, hex(initrd_start))
+ ]
+
+ return fvp_args
+
+ def run(self, run_name, test_args, is_long_running):
+ run_state = self.start_run(run_name)
+
+ base_dts_path = self.args.artifacts.create_file(run_name, ".base.dts")
+ base_dtb_path = self.args.artifacts.create_file(run_name, ".base.dtb")
+ dtb_path = self.args.artifacts.create_file(run_name, ".dtb")
+ uart0_log_path = self.args.artifacts.create_file(run_name, ".uart0.log")
+ uart1_log_path = self.args.artifacts.create_file(run_name, ".uart1.log")
+
+ initrd_start = 0x84000000
+ if self.args.initrd:
+ initrd_end = initrd_start + os.path.getsize(self.args.initrd)
+ else:
+ initrd_end = 0x85000000 # Default value
+
+ try:
+ # Create a DT to pass to FVP.
+ self.gen_dts(base_dts_path, test_args, initrd_start, initrd_end)
+
+ # Compile DTS to DTB.
+ dtc_args = [
+ DTC_SCRIPT, "compile", "-i", base_dts_path, "-o", base_dtb_path,
+ ]
+ self.exec_logged(run_state, dtc_args)
+
+ # If manifest DTBO specified, overlay it.
+ if self.args.manifest:
+ self.overlay_dtb(
+ run_state, base_dtb_path, self.args.manifest, dtb_path)
+ else:
+ dtb_path = base_dtb_path
+
+ # Run FVP.
+ fvp_args = self.gen_fvp_args(
+ is_long_running, initrd_start, uart0_log_path, uart1_log_path,
+ dtb_path)
+ self.exec_logged(run_state, fvp_args)
+ except DriverRunException:
+ pass
+
+ # Append UART0 output to main log.
+ append_file(run_state.log_path, read_file(uart0_log_path))
+ return self.finish_run(run_state)
+
+
+# Tuple used to return information about the results of running a set of tests.
+TestRunnerResult = collections.namedtuple("TestRunnerResult", [
+ "tests_run",
+ "tests_failed",
+ ])
+
+
+class TestRunner:
+ """Class which communicates with a test platform to obtain a list of
+ available tests and driving their execution."""
+
+ def __init__(self, artifacts, driver, image_name, suite_regex, test_regex,
+ skip_long_running_tests):
+ self.artifacts = artifacts
+ self.driver = driver
+ self.image_name = image_name
+ self.skip_long_running_tests = skip_long_running_tests
+
+ self.suite_re = re.compile(suite_regex or ".*")
+ self.test_re = re.compile(test_regex or ".*")
+
+ def extract_hftest_lines(self, raw):
+ """Extract hftest-specific lines from a raw output from an invocation
+ of the test platform."""
+ lines = []
+ for line in raw.splitlines():
+ if line.startswith("VM "):
+ line = line[len("VM 0: "):]
+ if line.startswith(HFTEST_LOG_PREFIX):
+ lines.append(line[len(HFTEST_LOG_PREFIX):])
+ return lines
+
+ def get_test_json(self):
+ """Invoke the test platform and request a JSON of available test and
+ test suites."""
+ out = self.driver.run("json", "json", False)
+ hf_out = "\n".join(self.extract_hftest_lines(out))
+ try:
+ return json.loads(hf_out)
+ except ValueError as e:
+ print(out)
+ raise e
+
+ def collect_results(self, fn, it, xml_node):
+ """Run `fn` on every entry in `it` and collect their TestRunnerResults.
+ Insert "tests" and "failures" nodes to `xml_node`."""
+ tests_run = 0
+ tests_failed = 0
+ for i in it:
+ sub_result = fn(i)
+ assert(sub_result.tests_run >= sub_result.tests_failed)
+ tests_run += sub_result.tests_run
+ tests_failed += sub_result.tests_failed
+
+ xml_node.set("tests", str(tests_run))
+ xml_node.set("failures", str(tests_failed))
+ return TestRunnerResult(tests_run, tests_failed)
+
+ def is_passed_test(self, test_out):
+ """Parse the output of a test and return True if it passed."""
+ return \
+ len(test_out) > 0 and \
+ test_out[-1] == HFTEST_LOG_FINISHED and \
+ not any(l.startswith(HFTEST_LOG_FAILURE_PREFIX) for l in test_out)
+
+ def get_log_name(self, suite, test):
+ """Returns a string with a generated log name for the test."""
+ log_name = ""
+
+ cpu = self.driver.args.cpu
+ if cpu:
+ log_name += cpu + "."
+
+ log_name += suite["name"] + "." + test["name"]
+
+ return log_name
+
+ def run_test(self, suite, test, suite_xml):
+ """Invoke the test platform and request to run a given `test` in given
+ `suite`. Create a new XML node with results under `suite_xml`.
+ Test only invoked if it matches the regex given to constructor."""
+ if not self.test_re.match(test["name"]):
+ return TestRunnerResult(tests_run=0, tests_failed=0)
+
+ if self.skip_long_running_tests and test["is_long_running"]:
+ print(" SKIP", test["name"])
+ return TestRunnerResult(tests_run=0, tests_failed=0)
+
+ print(" RUN", test["name"])
+ log_name = self.get_log_name(suite, test)
+
+ test_xml = ET.SubElement(suite_xml, "testcase")
+ test_xml.set("name", test["name"])
+ test_xml.set("classname", suite["name"])
+ test_xml.set("status", "run")
+
+ out = self.extract_hftest_lines(self.driver.run(
+ log_name, "run {} {}".format(suite["name"], test["name"]),
+ test["is_long_running"]))
+
+ if self.is_passed_test(out):
+ print(" PASS")
+ return TestRunnerResult(tests_run=1, tests_failed=0)
+ else:
+ print("[x] FAIL --", self.driver.get_run_log(log_name))
+ failure_xml = ET.SubElement(test_xml, "failure")
+ # TODO: set a meaningful message and put log in CDATA
+ failure_xml.set("message", "Test failed")
+ return TestRunnerResult(tests_run=1, tests_failed=1)
+
+ def run_suite(self, suite, xml):
+ """Invoke the test platform and request to run all matching tests in
+ `suite`. Create new XML nodes with results under `xml`.
+ Suite skipped if it does not match the regex given to constructor."""
+ if not self.suite_re.match(suite["name"]):
+ return TestRunnerResult(tests_run=0, tests_failed=0)
+
+ print(" SUITE", suite["name"])
+ suite_xml = ET.SubElement(xml, "testsuite")
+ suite_xml.set("name", suite["name"])
+
+ return self.collect_results(
+ lambda test: self.run_test(suite, test, suite_xml),
+ suite["tests"],
+ suite_xml)
+
+ def run_tests(self):
+ """Run all suites and tests matching regexes given to constructor.
+ Write results to sponge log XML. Return the number of run and failed
+ tests."""
+
+ test_spec = self.get_test_json()
+ timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
+
+ xml = ET.Element("testsuites")
+ xml.set("name", self.image_name)
+ xml.set("timestamp", timestamp)
+
+ result = self.collect_results(
+ lambda suite: self.run_suite(suite, xml),
+ test_spec["suites"],
+ xml)
+
+ # Write XML to file.
+ with open(self.artifacts.sponge_xml_path, "w") as f:
+ ET.ElementTree(xml).write(f, encoding='utf-8', xml_declaration=True)
+
+ if result.tests_failed > 0:
+ print("[x] FAIL:", result.tests_failed, "of", result.tests_run,
+ "tests failed")
+ elif result.tests_run > 0:
+ print(" PASS: all", result.tests_run, "tests passed")
+
+ return result
+
+
+def Main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("image")
+ parser.add_argument("--out", required=True)
+ parser.add_argument("--log", required=True)
+ parser.add_argument("--out_initrd")
+ parser.add_argument("--initrd")
+ parser.add_argument("--suite")
+ parser.add_argument("--test")
+ parser.add_argument("--vm_args")
+ parser.add_argument("--fvp", action="store_true")
+ parser.add_argument("--skip-long-running-tests", action="store_true")
+ parser.add_argument("--cpu",
+ help="Selects the CPU configuration for the run environment.")
+ args = parser.parse_args()
+
+ # Resolve some paths.
+ image = os.path.join(args.out, args.image + ".bin")
+ initrd = None
+ manifest = None
+ image_name = args.image
+ if args.initrd:
+ initrd_dir = os.path.join(args.out_initrd, "obj", args.initrd)
+ initrd = os.path.join(initrd_dir, "initrd.img")
+ manifest = os.path.join(initrd_dir, "manifest.dtbo")
+ image_name += "_" + args.initrd
+ vm_args = args.vm_args or ""
+
+ # Create class which will manage all test artifacts.
+ artifacts = ArtifactsManager(os.path.join(args.log, image_name))
+
+ # Create a driver for the platform we want to test on.
+ driver_args = DriverArgs(artifacts, image, initrd, manifest, vm_args,
+ args.cpu)
+ if args.fvp:
+ driver = FvpDriver(driver_args)
+ else:
+ driver = QemuDriver(driver_args)
+
+ # Create class which will drive test execution.
+ runner = TestRunner(artifacts, driver, image_name, args.suite, args.test,
+ args.skip_long_running_tests)
+
+ # Run tests.
+ runner_result = runner.run_tests()
+
+ # Print error message if no tests were run as this is probably unexpected.
+ # Return suitable error code.
+ if runner_result.tests_run == 0:
+ print("Error: no tests match")
+ return 10
+ elif runner_result.tests_failed > 0:
+ return 1
+ else:
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/test/hftest/hftest_common.h b/test/hftest/hftest_common.h
new file mode 100644
index 0000000..fba4a54
--- /dev/null
+++ b/test/hftest/hftest_common.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/fdt.h"
+#include "hf/memiter.h"
+
+#include "test/hftest_impl.h"
+
+void hftest_use_registered_list(void);
+void hftest_use_list(struct hftest_test list[], size_t count);
+
+void hftest_json(void);
+void hftest_run(struct memiter suite_name, struct memiter test_name,
+ const struct fdt_header *fdt);
+void hftest_help(void);
diff --git a/test/hftest/linux_main.c b/test/hftest/linux_main.c
new file mode 100644
index 0000000..2ebe91b
--- /dev/null
+++ b/test/hftest/linux_main.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdalign.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "hf/memiter.h"
+
+#include "hftest_common.h"
+#include "test/hftest.h"
+#include <sys/reboot.h>
+
+void test_main(int argc, const char *argv[])
+{
+ const char *command;
+
+ if (argc < 2) {
+ HFTEST_LOG("Unable to parse command.");
+ return;
+ }
+ command = argv[1];
+
+ hftest_use_registered_list();
+
+ if (strcmp(command, "json") == 0) {
+ hftest_json();
+ return;
+ }
+
+ if (strcmp(command, "run") == 0) {
+ struct memiter suite_name;
+ struct memiter test_name;
+
+ if (argc != 4) {
+ HFTEST_LOG("Unable to parse test.");
+ return;
+ }
+
+ memiter_init(&suite_name, argv[2], strnlen_s(argv[2], 64));
+ memiter_init(&test_name, argv[3], strnlen_s(argv[3], 64));
+ hftest_run(suite_name, test_name, NULL);
+ return;
+ }
+
+ hftest_help();
+}
+
+int main(int argc, const char *argv[])
+{
+ test_main(argc, argv);
+ reboot(RB_POWER_OFF);
+ return 0;
+}
diff --git a/test/hftest/mm.c b/test/hftest/mm.c
new file mode 100644
index 0000000..7c78ab9
--- /dev/null
+++ b/test/hftest/mm.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/mm.h"
+
+#include "test/hftest.h"
+
+/* Number of pages reserved for page tables. Increase if necessary. */
+#define PTABLE_PAGES 3
+
+alignas(alignof(struct mm_page_table)) static char ptable_buf
+ [sizeof(struct mm_page_table) * PTABLE_PAGES];
+
+static struct mpool ppool;
+static struct mm_ptable ptable;
+
+static struct mm_stage1_locked get_stage1_locked(void)
+{
+ return (struct mm_stage1_locked){.ptable = &ptable};
+}
+
+bool hftest_mm_init(void)
+{
+ struct mm_stage1_locked stage1_locked;
+
+ mpool_init(&ppool, sizeof(struct mm_page_table));
+ if (!mpool_add_chunk(&ppool, ptable_buf, sizeof(ptable_buf))) {
+ HFTEST_FAIL(true, "Failed to add buffer to page-table pool.");
+ }
+
+ if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, &ppool)) {
+ HFTEST_FAIL(true, "Unable to allocate memory for page table.");
+ }
+
+ stage1_locked = get_stage1_locked();
+ mm_identity_map(stage1_locked, pa_init(0),
+ pa_init(mm_ptable_addr_space_end(MM_FLAG_STAGE1)),
+ MM_MODE_R | MM_MODE_W | MM_MODE_X, &ppool);
+
+ if (!arch_vm_mm_init()) {
+ return false;
+ }
+
+ arch_vm_mm_enable(ptable.root);
+
+ return true;
+}
+
+void hftest_mm_identity_map(const void *base, size_t size, uint32_t mode)
+{
+ struct mm_stage1_locked stage1_locked = get_stage1_locked();
+ paddr_t start = pa_from_va(va_from_ptr(base));
+ paddr_t end = pa_add(start, size);
+
+ if (mm_identity_map(stage1_locked, start, end, mode, &ppool) != base) {
+ FAIL("Could not add new page table mapping. Try increasing "
+ "size of the page table buffer.");
+ }
+}
+
+void hftest_mm_vcpu_init(void)
+{
+ arch_vm_mm_enable(ptable.root);
+}
diff --git a/test/hftest/power_mgmt.c b/test/hftest/power_mgmt.c
new file mode 100644
index 0000000..4c651a6
--- /dev/null
+++ b/test/hftest/power_mgmt.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/power_mgmt.h"
+
+#include "hf/arch/mm.h"
+
+#include "hf/spinlock.h"
+
+#include "test/hftest.h"
+
+struct cpu_start_state {
+ void (*entry)(uintptr_t arg);
+ uintreg_t arg;
+ struct spinlock lock;
+};
+
+static noreturn void cpu_entry(uintptr_t arg)
+{
+ struct cpu_start_state *s = (struct cpu_start_state *)arg;
+ struct cpu_start_state s_copy;
+
+ /*
+ * Initialize memory and enable caching. Must be the first thing we do.
+ */
+ hftest_mm_vcpu_init();
+
+ /* Make a copy of the cpu_start_state struct. */
+ s_copy = *s;
+
+ /* Inform cpu_start() that the state struct memory can now be freed. */
+ sl_unlock(&s->lock);
+
+ /* Call the given entry function with the given argument. */
+ s_copy.entry(s_copy.arg);
+
+ /* If the entry function returns, turn off the CPU. */
+ arch_cpu_stop();
+}
+
+bool hftest_cpu_start(uintptr_t id, void *stack, size_t stack_size,
+ void (*entry)(uintptr_t arg), uintptr_t arg)
+{
+ struct cpu_start_state s;
+ struct arch_cpu_start_state s_arch;
+
+ /*
+ * Config for arch_cpu_start() which will start a new CPU and
+ * immediately jump to cpu_entry(). This function must guarantee that
+ * the state struct is not be freed until cpu_entry() is called.
+ */
+ s_arch.initial_sp = (uintptr_t)stack + stack_size;
+ s_arch.entry = cpu_entry;
+ s_arch.arg = (uintptr_t)&s;
+
+ /*
+ * Flush the `cpu_start_state` struct because the new CPU will be
+ * started without caching enabled and will need the data early on.
+ * Write back is all that is really needed so flushing will definitely
+ * get the job done.
+ */
+ arch_mm_flush_dcache(&s_arch, sizeof(s_arch));
+
+ if ((s_arch.initial_sp % STACK_ALIGN) != 0) {
+ HFTEST_FAIL(true,
+ "Stack pointer of new vCPU not properly aligned.");
+ }
+
+ /*
+ * Config for cpu_entry(). Its job is to initialize memory and call the
+ * provided entry point with the provided argument.
+ */
+ s.entry = entry;
+ s.arg = arg;
+ sl_init(&s.lock);
+
+ /*
+ * Lock the cpu_start_state struct which will be unlocked once
+ * cpu_entry() does not need its content anymore. This simultaneously
+ * protects the arch_cpu_start_state struct which must not be freed
+ * before cpu_entry() is called.
+ */
+ sl_lock(&s.lock);
+
+ /* Try to start the given CPU. */
+ if (!arch_cpu_start(id, &s_arch)) {
+ return false;
+ }
+
+ /*
+ * Wait until cpu_entry() unlocks the cpu_start_state lock before
+ * freeing stack memory.
+ */
+ sl_lock(&s.lock);
+ return true;
+}
diff --git a/test/hftest/service.c b/test/hftest/service.c
new file mode 100644
index 0000000..afbf6f6
--- /dev/null
+++ b/test/hftest/service.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdalign.h>
+#include <stdint.h>
+
+#include "hf/memiter.h"
+#include "hf/mm.h"
+#include "hf/spci.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "test/hftest.h"
+
+alignas(4096) uint8_t kstack[4096];
+
+HFTEST_ENABLE();
+
+extern struct hftest_test hftest_begin[];
+extern struct hftest_test hftest_end[];
+
+static alignas(HF_MAILBOX_SIZE) uint8_t send[HF_MAILBOX_SIZE];
+static alignas(HF_MAILBOX_SIZE) uint8_t recv[HF_MAILBOX_SIZE];
+
+static hf_ipaddr_t send_addr = (hf_ipaddr_t)send;
+static hf_ipaddr_t recv_addr = (hf_ipaddr_t)recv;
+
+static struct hftest_context global_context;
+
+struct hftest_context *hftest_get_context(void)
+{
+ return &global_context;
+}
+
+/** Find the service with the name passed in the arguments. */
+static hftest_test_fn find_service(struct memiter *args)
+{
+ struct memiter service_name;
+ struct hftest_test *test;
+
+ if (!memiter_parse_str(args, &service_name)) {
+ return NULL;
+ }
+
+ for (test = hftest_begin; test < hftest_end; ++test) {
+ if (test->kind == HFTEST_KIND_SERVICE &&
+ memiter_iseq(&service_name, test->name)) {
+ return test->fn;
+ }
+ }
+
+ return NULL;
+}
+
+noreturn void abort(void)
+{
+ HFTEST_LOG("Service contained failures.");
+ /* Cause a fault, as a secondary can't power down the machine. */
+ *((volatile uint8_t *)1) = 1;
+
+ /* This should never be reached, but to make the compiler happy... */
+ for (;;) {
+ }
+}
+
+noreturn void kmain(size_t memory_size)
+{
+ struct memiter args;
+ hftest_test_fn service;
+ struct hftest_context *ctx;
+ struct spci_value ret;
+
+ /*
+ * Initialize the stage-1 MMU and identity-map the entire address space.
+ */
+ if (!hftest_mm_init()) {
+ HFTEST_LOG_FAILURE();
+ HFTEST_LOG(HFTEST_LOG_INDENT "Memory initialization failed");
+ for (;;) {
+ /* Hang if memory init failed. */
+ }
+ }
+
+ /* Prepare the context. */
+
+ /* Set up the mailbox. */
+ spci_rxtx_map(send_addr, recv_addr);
+
+ /* Receive the name of the service to run. */
+ ret = spci_msg_wait();
+ ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
+ memiter_init(&args, recv, spci_msg_send_size(ret));
+ service = find_service(&args);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Check the service was found. */
+ if (service == NULL) {
+ HFTEST_LOG_FAILURE();
+ HFTEST_LOG(HFTEST_LOG_INDENT
+ "Unable to find requested service");
+ for (;;) {
+ /* Hang if the service was unknown. */
+ }
+ }
+
+ /* Clean the context. */
+ ctx = hftest_get_context();
+ memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx));
+ ctx->abort = abort;
+ ctx->send = send;
+ ctx->recv = recv;
+ ctx->memory_size = memory_size;
+
+ /* Pause so the next time cycles are given the service will be run. */
+ spci_yield();
+
+ /* Let the service run. */
+ service();
+
+ /* Cleanly handle it if the service returns. */
+ if (ctx->failures) {
+ abort();
+ }
+
+ for (;;) {
+ /* Hang if the service returns. */
+ }
+}
diff --git a/test/hftest/standalone_main.c b/test/hftest/standalone_main.c
new file mode 100644
index 0000000..586b3b3
--- /dev/null
+++ b/test/hftest/standalone_main.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdalign.h>
+#include <stdint.h>
+
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/fdt.h"
+#include "hf/memiter.h"
+#include "hf/mm.h"
+
+#include "hftest_common.h"
+#include "test/hftest.h"
+
+alignas(4096) uint8_t kstack[4096];
+
+extern struct hftest_test hftest_begin[];
+extern struct hftest_test hftest_end[];
+
+void kmain(const struct fdt_header *fdt)
+{
+ struct fdt_node n;
+ const char *bootargs;
+ uint32_t bootargs_size;
+ struct memiter bootargs_iter;
+ struct memiter command;
+
+ /*
+ * Initialize the stage-1 MMU and identity-map the entire address space.
+ */
+ if ((VM_TOOLCHAIN == 1) && !hftest_mm_init()) {
+ HFTEST_LOG("Memory initialization failed.");
+ return;
+ }
+
+ /*
+ * Install the exception handler with no IRQ callback for now, so that
+ * exceptions are logged.
+ */
+ exception_setup(NULL, NULL);
+
+ hftest_use_list(hftest_begin, hftest_end - hftest_begin);
+
+ if (!fdt_root_node(&n, fdt)) {
+ HFTEST_LOG("FDT failed validation.");
+ return;
+ }
+
+ if (!fdt_find_child(&n, "")) {
+ HFTEST_LOG("Unable to find root node in FDT.");
+ return;
+ }
+
+ if (!fdt_find_child(&n, "chosen")) {
+ HFTEST_LOG("Unable to find 'chosen' node in FDT.");
+ return;
+ }
+
+ if (!fdt_read_property(&n, "bootargs", &bootargs, &bootargs_size)) {
+ HFTEST_LOG("Unable to read bootargs.");
+ return;
+ }
+
+ /* Remove null terminator. */
+ memiter_init(&bootargs_iter, bootargs, bootargs_size - 1);
+
+ if (!memiter_parse_str(&bootargs_iter, &command)) {
+ HFTEST_LOG("Unable to parse command.");
+ return;
+ }
+
+ if (memiter_iseq(&command, "json")) {
+ hftest_json();
+ return;
+ }
+
+ if (memiter_iseq(&command, "run")) {
+ struct memiter suite_name;
+ struct memiter test_name;
+
+ if (!memiter_parse_str(&bootargs_iter, &suite_name)) {
+ HFTEST_LOG("Unable to parse test suite.");
+ return;
+ }
+
+ if (!memiter_parse_str(&bootargs_iter, &test_name)) {
+ HFTEST_LOG("Unable to parse test.");
+ return;
+ }
+ hftest_run(suite_name, test_name, fdt);
+ return;
+ }
+
+ hftest_help();
+}
diff --git a/test/inc/test/hftest.h b/test/inc/test/hftest.h
new file mode 100644
index 0000000..bcd22d8
--- /dev/null
+++ b/test/inc/test/hftest.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "hf/dlog.h"
+
+/*
+ * Define a set up function to be run before every test in a test suite.
+ */
+#define SET_UP(suite) HFTEST_SET_UP(suite)
+
+/*
+ * Define a tear down function to be run after every test in a test suite.
+ */
+#define TEAR_DOWN(suite) HFTEST_TEAR_DOWN(suite)
+
+/*
+ * Define a test as part of a test suite.
+ */
+#define TEST(suite, test) HFTEST_TEST(suite, test, false)
+
+/*
+ * Define a test as part of a test suite and mark it long-running.
+ */
+#define TEST_LONG_RUNNING(suite, test) HFTEST_TEST(suite, test, true)
+
+/*
+ * Define a test service.
+ */
+#define TEST_SERVICE(service) HFTEST_TEST_SERVICE(service)
+
+/* Assertions. */
+#define ASSERT_EQ(x, y) HFTEST_ASSERT_OP(x, y, ==, true)
+#define ASSERT_NE(x, y) HFTEST_ASSERT_OP(x, y, !=, true)
+#define ASSERT_LE(x, y) HFTEST_ASSERT_OP(x, y, <=, true)
+#define ASSERT_LT(x, y) HFTEST_ASSERT_OP(x, y, <, true)
+#define ASSERT_GE(x, y) HFTEST_ASSERT_OP(x, y, >=, true)
+#define ASSERT_GT(x, y) HFTEST_ASSERT_OP(x, y, >, true)
+
+#define ASSERT_TRUE(x) ASSERT_EQ(x, true)
+#define ASSERT_FALSE(x) ASSERT_EQ(x, false)
+
+#define EXPECT_EQ(x, y) HFTEST_ASSERT_OP(x, y, ==, false)
+#define EXPECT_NE(x, y) HFTEST_ASSERT_OP(x, y, !=, false)
+#define EXPECT_LE(x, y) HFTEST_ASSERT_OP(x, y, <=, false)
+#define EXPECT_LT(x, y) HFTEST_ASSERT_OP(x, y, <, false)
+#define EXPECT_GE(x, y) HFTEST_ASSERT_OP(x, y, >=, false)
+#define EXPECT_GT(x, y) HFTEST_ASSERT_OP(x, y, >, false)
+
+#define EXPECT_TRUE(x) EXPECT_EQ(x, true)
+#define EXPECT_FALSE(x) EXPECT_EQ(x, false)
+
+#define FAIL(...) HFTEST_FAIL(true, __VA_ARGS__)
+
+/* Service utilities. */
+#define SERVICE_NAME_MAX_LENGTH 64
+#define SERVICE_SELECT(vm_id, service, send_buffer) \
+ HFTEST_SERVICE_SELECT(vm_id, service, send_buffer)
+
+#define SERVICE_SEND_BUFFER() HFTEST_SERVICE_SEND_BUFFER()
+#define SERVICE_RECV_BUFFER() HFTEST_SERVICE_RECV_BUFFER()
+#define SERVICE_MEMORY_SIZE() HFTEST_SERVICE_MEMORY_SIZE()
+
+/*
+ * This must be used exactly once in a test image to signal to the linker that
+ * the .hftest section is allowed to be included in the generated image.
+ */
+#define HFTEST_ENABLE() int hftest_enable
+
+/*
+ * Prefixed to log lines from tests for easy filtering in the console.
+ */
+#define HFTEST_LOG_PREFIX "[hftest] "
+
+/*
+ * Indentation used e.g. to give the reason for an assertion failure.
+ */
+#define HFTEST_LOG_INDENT " "
+
+/** Initializes stage-1 MMU for tests running in a VM. */
+bool hftest_mm_init(void);
+
+/** Adds stage-1 identity mapping for pages covering bytes [base, base+size). */
+void hftest_mm_identity_map(const void *base, size_t size, uint32_t mode);
+
+void hftest_mm_vcpu_init(void);
+
+/**
+ * Starts the CPU with the given ID. It will start at the provided entry point
+ * with the provided argument. It is a wrapper around the generic cpu_start()
+ * and takes care of MMU initialization.
+ */
+bool hftest_cpu_start(uintptr_t id, void *stack, size_t stack_size,
+ void (*entry)(uintptr_t arg), uintptr_t arg);
+
+uintptr_t hftest_get_cpu_id(size_t index);
+
+/* Above this point is the public API. Now include the implementation. */
+#include "hftest_impl.h"
diff --git a/test/inc/test/hftest_impl.h b/test/inc/test/hftest_impl.h
new file mode 100644
index 0000000..ed2fa91
--- /dev/null
+++ b/test/inc/test/hftest_impl.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdnoreturn.h>
+
+#include "hf/fdt.h"
+#include "hf/spci.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/spci.h"
+
+#define HFTEST_MAX_TESTS 50
+
+/*
+ * Log with the HFTEST_LOG_PREFIX and a new line. The zero is added so there is
+ * always at least one variadic argument.
+ */
+#define HFTEST_LOG(...) HFTEST_LOG_IMPL(__VA_ARGS__, 0)
+#define HFTEST_LOG_IMPL(format, ...) \
+ dlog("%s" format "\n", HFTEST_LOG_PREFIX, __VA_ARGS__)
+
+/* Helper to wrap the argument in quotes. */
+#define HFTEST_STR(str) #str
+
+/*
+ * Sections are names such that when the linker sorts them, all entries for the
+ * same test suite are contiguous and the set up and tear down entries come
+ * before the tests. This order simplifies test discovery in the running image.
+ */
+#define HFTEST_SET_UP_SECTION(suite_name) \
+ HFTEST_STR(.hftest.suite.suite_name .1set_up)
+#define HFTEST_TEAR_DOWN_SECTION(suite_name) \
+ HFTEST_STR(.hftest.suite.suite_name .1tear_down)
+#define HFTEST_TEST_SECTION(suite_name, test_name) \
+ HFTEST_STR(.hftest.suite.suite_name .2test.test_name)
+#define HFTEST_SERVICE_SECTION(service_name) \
+ HFTEST_STR(.hftest.service.service_name)
+
+/* Helpers to construct unique identifiers. */
+#define HFTEST_SET_UP_STRUCT(suite_name) hftest_set_up_##suite_name
+#define HFTEST_TEAR_DOWN_STRUCT(suite_name) hftest_tear_down_##suite_name
+#define HFTEST_TEST_STRUCT(suite_name, test_name) \
+ hftest_test_##suite_name##_##test_name
+#define HFTEST_SERVICE_STRUCT(service_name) hftest_service_##service_name
+
+#define HFTEST_SET_UP_FN(suite_name) hftest_set_up_fn_##suite_name
+#define HFTEST_TEAR_DOWN_FN(suite_name) hftest_tear_down_fn_##suite_name
+#define HFTEST_TEST_FN(suite_name, test_name) \
+ hftest_test_fn_##suite_name##_##test_name
+#define HFTEST_SERVICE_FN(service_name) hftest_service_fn_##service_name
+
+#define HFTEST_SET_UP_CONSTRUCTOR(suite_name) hftest_set_up_ctor_##suite_name
+#define HFTEST_TEAR_DOWN_CONSTRUCTOR(suite_name) \
+ hftest_tear_down_ctor_##suite_name
+#define HFTEST_TEST_CONSTRUCTOR(suite_name, test_name) \
+ hftest_test_ctor_##suite_name##_##test_name
+
+/* Register test functions. */
+#define HFTEST_SET_UP(suite_name) \
+ static void HFTEST_SET_UP_FN(suite_name)(void); \
+ const struct hftest_test __attribute__((used)) \
+ __attribute__((section(HFTEST_SET_UP_SECTION(suite_name)))) \
+ HFTEST_SET_UP_STRUCT(suite_name) = { \
+ .suite = #suite_name, \
+ .kind = HFTEST_KIND_SET_UP, \
+ .fn = HFTEST_SET_UP_FN(suite_name), \
+ }; \
+ static void __attribute__((constructor)) \
+ HFTEST_SET_UP_CONSTRUCTOR(suite_name)(void) \
+ { \
+ hftest_register(HFTEST_SET_UP_STRUCT(suite_name)); \
+ } \
+ static void HFTEST_SET_UP_FN(suite_name)(void)
+
+#define HFTEST_TEAR_DOWN(suite_name) \
+ static void HFTEST_TEAR_DOWN_FN(suite_name)(void); \
+ const struct hftest_test __attribute__((used)) \
+ __attribute__((section(HFTEST_TEAR_DOWN_SECTION(suite_name)))) \
+ HFTEST_TEAR_DOWN_STRUCT(suite_name) = { \
+ .suite = #suite_name, \
+ .kind = HFTEST_KIND_TEAR_DOWN, \
+ .fn = HFTEST_TEAR_DOWN_FN(suite_name), \
+ }; \
+ static void __attribute__((constructor)) \
+ HFTEST_TEAR_DOWN_CONSTRUCTOR(suite_name)(void) \
+ { \
+ hftest_register(HFTEST_TEAR_DOWN_STRUCT(suite_name)); \
+ } \
+ static void HFTEST_TEAR_DOWN_FN(suite_name)(void)
+
+#define HFTEST_TEST(suite_name, test_name, long_running) \
+ static void HFTEST_TEST_FN(suite_name, test_name)(void); \
+ const struct hftest_test __attribute__((used)) __attribute__( \
+ (section(HFTEST_TEST_SECTION(suite_name, test_name)))) \
+ HFTEST_TEST_STRUCT(suite_name, test_name) = { \
+ .suite = #suite_name, \
+ .kind = HFTEST_KIND_TEST, \
+ .name = #test_name, \
+ .is_long_running = long_running, \
+ .fn = HFTEST_TEST_FN(suite_name, test_name), \
+ }; \
+ static void __attribute__((constructor)) \
+ HFTEST_TEST_CONSTRUCTOR(suite_name, test_name)(void) \
+ { \
+ hftest_register(HFTEST_TEST_STRUCT(suite_name, test_name)); \
+ } \
+ static void HFTEST_TEST_FN(suite_name, test_name)(void)
+
+#define HFTEST_TEST_SERVICE(service_name) \
+ static void HFTEST_SERVICE_FN(service_name)(void); \
+ const struct hftest_test __attribute__((used)) \
+ __attribute__((section(HFTEST_SERVICE_SECTION(service_name)))) \
+ HFTEST_SERVICE_STRUCT(service_name) = { \
+ .kind = HFTEST_KIND_SERVICE, \
+ .name = #service_name, \
+ .fn = HFTEST_SERVICE_FN(service_name), \
+ }; \
+ static void HFTEST_SERVICE_FN(service_name)(void)
+
+/* Context for tests. */
+struct hftest_context {
+ uint32_t failures;
+ noreturn void (*abort)(void);
+
+ /* These are used in primary VMs. */
+ const struct fdt_header *fdt;
+
+ /* These are used in services. */
+ void *send;
+ void *recv;
+ size_t memory_size;
+};
+
+struct hftest_context *hftest_get_context(void);
+
+/* A test case. */
+typedef void (*hftest_test_fn)(void);
+
+enum hftest_kind {
+ HFTEST_KIND_SET_UP = 0,
+ HFTEST_KIND_TEST = 1,
+ HFTEST_KIND_TEAR_DOWN = 2,
+ HFTEST_KIND_SERVICE = 3,
+};
+
+/**
+ * The .hftest section contains an array of this struct which describes the test
+ * functions contained in the image allowing the image to inspect the tests it
+ * contains.
+ */
+struct hftest_test {
+ const char *suite;
+ enum hftest_kind kind;
+ const char *name;
+ bool is_long_running;
+ hftest_test_fn fn;
+};
+
+/*
+ * This union can store any of the primitive types supported by the assertion
+ * macros.
+ *
+ * It does not include pointers as comparison of pointers is not often needed
+ * and could be a mistake for string comparison. If pointer comparison is needed
+ * and explicit assertion such as ASSERT_PTR_EQ() would be more appropriate.
+ */
+union hftest_any {
+ bool b;
+ char c;
+ signed char sc;
+ unsigned char uc;
+ signed short ss;
+ unsigned short us;
+ signed int si;
+ unsigned int ui;
+ signed long int sli;
+ unsigned long int uli;
+ signed long long int slli;
+ unsigned long long int ulli;
+};
+
+/* _Generic formatting doesn't seem to be supported so doing this manually. */
+/* clang-format off */
+
+/* Select the union member to match the type of the expression. */
+#define hftest_any_get(any, x) \
+ _Generic((x), \
+ bool: (any).b, \
+ char: (any).c, \
+ signed char: (any).sc, \
+ unsigned char: (any).uc, \
+ signed short: (any).ss, \
+ unsigned short: (any).us, \
+ signed int: (any).si, \
+ unsigned int: (any).ui, \
+ signed long int: (any).sli, \
+ unsigned long int: (any).uli, \
+ signed long long int: (any).slli, \
+ unsigned long long int: (any).ulli)
+
+/*
+ * dlog format specifier for types. Note, these aren't the standard specifiers
+ * for the types.
+ */
+#define hftest_dlog_format(x) \
+ _Generic((x), \
+ bool: "%u", \
+ char: "%c", \
+ signed char: "%d", \
+ unsigned char: "%u", \
+ signed short: "%d", \
+ unsigned short: "%u", \
+ signed int: "%d", \
+ unsigned int: "%u", \
+ signed long int: "%d", \
+ unsigned long int: "%u", \
+ signed long long int: "%d", \
+ unsigned long long int: "%u")
+
+/* clang-format on */
+
+#define HFTEST_LOG_FAILURE() \
+ dlog(HFTEST_LOG_PREFIX "Failure: %s:%u\n", __FILE__, __LINE__);
+
+#define HFTEST_ASSERT_OP(lhs, rhs, op, fatal) \
+ do { \
+ union hftest_any lhs_value; \
+ union hftest_any rhs_value; \
+ hftest_any_get(lhs_value, lhs) = (lhs); \
+ hftest_any_get(rhs_value, rhs) = (rhs); \
+ if (!(hftest_any_get(lhs_value, lhs) \
+ op hftest_any_get(rhs_value, rhs))) { \
+ struct hftest_context *ctx = hftest_get_context(); \
+ ++ctx->failures; \
+ HFTEST_LOG_FAILURE(); \
+ dlog(HFTEST_LOG_PREFIX HFTEST_LOG_INDENT \
+ "%s %s %s (%s=", \
+ #lhs, #op, #rhs, #lhs); \
+ dlog(hftest_dlog_format(lhs), \
+ hftest_any_get(lhs_value, lhs)); \
+ dlog(", %s=", #rhs); \
+ dlog(hftest_dlog_format(rhs), \
+ hftest_any_get(rhs_value, rhs)); \
+ dlog(")\n"); \
+ if (fatal) { \
+ ctx->abort(); \
+ } \
+ } \
+ } while (0)
+
+#define HFTEST_FAIL(fatal, ...) \
+ do { \
+ struct hftest_context *ctx = hftest_get_context(); \
+ ++ctx->failures; \
+ HFTEST_LOG_FAILURE(); \
+ dlog(HFTEST_LOG_PREFIX HFTEST_LOG_INDENT __VA_ARGS__); \
+ dlog("\n"); \
+ if (fatal) { \
+ ctx->abort(); \
+ } \
+ } while (0)
+
+/**
+ * Select the service to run in a service VM.
+ */
+#define HFTEST_SERVICE_SELECT(vm_id, service, send_buffer) \
+ do { \
+ struct spci_value run_res; \
+ uint32_t msg_length = \
+ strnlen_s(service, SERVICE_NAME_MAX_LENGTH); \
+ \
+ /* \
+ * Let the service configure its mailbox and wait for a \
+ * message. \
+ */ \
+ run_res = spci_run(vm_id, 0); \
+ ASSERT_EQ(run_res.func, SPCI_MSG_WAIT_32); \
+ ASSERT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE); \
+ \
+ /* Send the selected service to run and let it be handled. */ \
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, service, \
+ msg_length); \
+ \
+ ASSERT_EQ(spci_msg_send(hf_vm_get_id(), vm_id, msg_length, 0) \
+ .func, \
+ SPCI_SUCCESS_32); \
+ run_res = spci_run(vm_id, 0); \
+ ASSERT_EQ(run_res.func, SPCI_YIELD_32); \
+ } while (0)
+
+#define HFTEST_SERVICE_SEND_BUFFER() hftest_get_context()->send
+#define HFTEST_SERVICE_RECV_BUFFER() hftest_get_context()->recv
+#define HFTEST_SERVICE_MEMORY_SIZE() hftest_get_context()->memory_size
+
+void hftest_register(struct hftest_test test);
diff --git a/test/inc/test/vmapi/exception_handler.h b/test/inc/test/vmapi/exception_handler.h
new file mode 100644
index 0000000..07ef312
--- /dev/null
+++ b/test/inc/test/vmapi/exception_handler.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "vmapi/hf/spci.h"
+
+bool exception_handler_skip_instruction(void);
+
+bool exception_handler_yield(void);
+
+int exception_handler_get_num(void);
+
+void exception_handler_reset(void);
+
+void exception_handler_send_exception_count(void);
+
+int exception_handler_receive_exception_count(
+ const struct spci_value *send_res,
+ const struct spci_memory_region *recv_buf);
diff --git a/test/inc/test/vmapi/spci.h b/test/inc/test/vmapi/spci.h
new file mode 100644
index 0000000..3ecebf6
--- /dev/null
+++ b/test/inc/test/vmapi/spci.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "vmapi/hf/spci.h"
+
+#define EXPECT_SPCI_ERROR(value, spci_error) \
+ do { \
+ EXPECT_EQ(value.func, SPCI_ERROR_32); \
+ EXPECT_EQ(value.arg2, spci_error); \
+ } while (0)
+
+struct mailbox_buffers {
+ void *send;
+ void *recv;
+};
+
+struct mailbox_buffers set_up_mailbox(void);
diff --git a/test/linux/BUILD.gn b/test/linux/BUILD.gn
new file mode 100644
index 0000000..0be5630
--- /dev/null
+++ b/test/linux/BUILD.gn
@@ -0,0 +1,96 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+import("//build/toolchain/platform.gni")
+
+executable("test_binary") {
+ include_dirs = [
+ "//driver/linux/inc/uapi",
+ "//third_party/linux/include/uapi",
+ ]
+
+ testonly = true
+ sources = [
+ "linux.c",
+ ]
+ deps = [
+ "//test/hftest:hftest_linux",
+ ]
+ output_name = "test_binary"
+}
+
+vm_kernel("socket_vm1") {
+ testonly = true
+
+ deps = [
+ ":hftest_secondary_vm_socket",
+ ]
+}
+
+linux_initrd("linux_test_initrd") {
+ testonly = true
+
+ # Always use the aarch64_linux_clang toolchain to build test_binary
+ test_binary_target = ":test_binary(//build/toolchain:aarch64_linux_clang)"
+ sources = [
+ get_label_info(test_binary_target, "root_out_dir") + "/test_binary",
+ get_label_info("//driver/linux", "target_out_dir") + "/hafnium.ko",
+ ]
+ deps = [
+ "//driver/linux",
+ test_binary_target,
+ ]
+}
+
+initrd("linux_test") {
+ testonly = true
+
+ manifest = "manifest.dts"
+ primary_name = "vmlinuz"
+ primary_vm = "//third_party/linux:linux__prebuilt"
+ primary_initrd = ":linux_test_initrd"
+ secondary_vms = [ [
+ "socket0",
+ ":socket_vm1",
+ ] ]
+}
+
+group("linux") {
+ testonly = true
+
+ deps = [
+ ":linux_test",
+ ]
+}
+
+# Testing framework for a secondary VM with socket.
+source_set("hftest_secondary_vm_socket") {
+ testonly = true
+
+ configs += [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "hftest_socket.c",
+ ]
+
+ deps = [
+ "//src:dlog",
+ "//src:panic",
+ "//src:std",
+ "//src/arch/${plat_arch}:entry",
+ "//src/arch/${plat_arch}/hftest:entry",
+ "//src/arch/${plat_arch}/hftest:power_mgmt",
+ ]
+}
diff --git a/test/linux/hftest_socket.c b/test/linux/hftest_socket.c
new file mode 100644
index 0000000..b25691f
--- /dev/null
+++ b/test/linux/hftest_socket.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdalign.h>
+#include <stdint.h>
+
+#include "hf/memiter.h"
+#include "hf/spci.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+#include "vmapi/hf/transport.h"
+
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+alignas(4096) uint8_t kstack[4096];
+
+static alignas(HF_MAILBOX_SIZE) uint8_t send[HF_MAILBOX_SIZE];
+static alignas(HF_MAILBOX_SIZE) uint8_t recv[HF_MAILBOX_SIZE];
+
+static hf_ipaddr_t send_addr = (hf_ipaddr_t)send;
+static hf_ipaddr_t recv_addr = (hf_ipaddr_t)recv;
+
+static struct hftest_context global_context;
+
+struct hftest_context *hftest_get_context(void)
+{
+ return &global_context;
+}
+
+noreturn void abort(void)
+{
+ HFTEST_LOG("Service contained failures.");
+ /* Cause a fault, as a secondary can't power down the machine. */
+ *((volatile uint8_t *)1) = 1;
+
+ /* This should never be reached, but to make the compiler happy... */
+ for (;;) {
+ }
+}
+
+static void swap(uint64_t *a, uint64_t *b)
+{
+ uint64_t t = *a;
+ *a = *b;
+ *b = t;
+}
+
+noreturn void kmain(size_t memory_size)
+{
+ struct hftest_context *ctx;
+
+ /* Prepare the context. */
+
+ /* Set up the mailbox. */
+ spci_rxtx_map(send_addr, recv_addr);
+
+ EXPECT_SPCI_ERROR(spci_rx_release(), SPCI_DENIED);
+
+ /* Clean the context. */
+ ctx = hftest_get_context();
+ memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx));
+ ctx->abort = abort;
+ ctx->send = send;
+ ctx->recv = recv;
+ ctx->memory_size = memory_size;
+
+ for (;;) {
+ struct spci_value ret;
+
+ /* Receive the packet. */
+ ret = spci_msg_wait();
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_LE(spci_msg_send_size(ret), SPCI_MSG_PAYLOAD_MAX);
+
+ /* Echo the message back to the sender. */
+ memcpy_s(send, SPCI_MSG_PAYLOAD_MAX, recv,
+ spci_msg_send_size(ret));
+
+ /* Swap the socket's source and destination ports */
+ struct hf_msg_hdr *hdr = (struct hf_msg_hdr *)send;
+ swap(&(hdr->src_port), &(hdr->dst_port));
+
+ /* Swap the destination and source ids. */
+ spci_vm_id_t dst_id = spci_msg_send_sender(ret);
+ spci_vm_id_t src_id = spci_msg_send_receiver(ret);
+
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ EXPECT_EQ(spci_msg_send(src_id, dst_id, spci_msg_send_size(ret),
+ 0)
+ .func,
+ SPCI_SUCCESS_32);
+ }
+}
diff --git a/test/linux/linux.c b/test/linux/linux.c
new file mode 100644
index 0000000..d98d412
--- /dev/null
+++ b/test/linux/linux.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "hf/dlog.h"
+#include "hf/socket.h"
+
+#include "test/hftest.h"
+#include <sys/socket.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+
+#define MAX_BUF_SIZE 256
+
+static int finit_module(int fd, const char *param_values, int flags)
+{
+ return (int)syscall(SYS_finit_module, fd, param_values, flags);
+}
+
+static int delete_module(const char *name, int flags)
+{
+ return (int)syscall(SYS_delete_module, name, flags);
+}
+
+static void insmod_hafnium(void)
+{
+ int module_file = open("/hafnium.ko", O_RDONLY);
+ if (module_file < 0) {
+ FAIL("Failed to load Hafnium kernel module from /hafnium.ko");
+ return;
+ }
+ EXPECT_EQ(finit_module(module_file, "", 0), 0);
+ close(module_file);
+}
+
+static void rmmod_hafnium(void)
+{
+ EXPECT_EQ(delete_module("hafnium", 0), 0);
+}
+
+/**
+ * Loads and unloads the Hafnium kernel module.
+ */
+TEST(linux, load_hafnium)
+{
+ insmod_hafnium();
+ rmmod_hafnium();
+}
+
+/**
+ * Uses the kernel module to send a socket message from the primary VM to a
+ * secondary VM and echoes it back to the primary.
+ */
+TEST(linux, socket_echo_hafnium)
+{
+ spci_vm_id_t vm_id = HF_VM_ID_OFFSET + 1;
+ int port = 10;
+ int socket_id;
+ struct hf_sockaddr addr;
+ const char send_buf[] = "The quick brown fox jumps over the lazy dogs.";
+ size_t send_len = strlen(send_buf);
+ char resp_buf[MAX_BUF_SIZE];
+ ssize_t recv_len;
+
+ ASSERT_LT(send_len, MAX_BUF_SIZE);
+
+ insmod_hafnium();
+
+ /* Create Hafnium socket. */
+ socket_id = socket(PF_HF, SOCK_DGRAM, 0);
+ if (socket_id == -1) {
+ FAIL("Socket creation failed: %s", strerror(errno));
+ return;
+ }
+ HFTEST_LOG("Socket created successfully.");
+
+ /* Connect to requested VM & port. */
+ addr.family = PF_HF;
+ addr.vm_id = vm_id;
+ addr.port = port;
+ if (connect(socket_id, (struct sockaddr *)&addr, sizeof(addr)) == -1) {
+ FAIL("Socket connection failed: %s", strerror(errno));
+ return;
+ }
+ HFTEST_LOG("Socket to secondary VM %d connected on port %d.", vm_id,
+ port);
+
+ /*
+ * Send a message to the secondary VM.
+ * Enable the confirm flag to try again in case port is busy.
+ */
+ if (send(socket_id, send_buf, send_len, MSG_CONFIRM) < 0) {
+ FAIL("Socket send() failed: %s", strerror(errno));
+ return;
+ }
+ HFTEST_LOG("Packet with length %d sent.", send_len);
+
+ /* Receive a response, which should be an echo of the sent packet. */
+ recv_len = recv(socket_id, resp_buf, sizeof(resp_buf) - 1, 0);
+
+ if (recv_len == -1) {
+ FAIL("Socket recv() failed: %s", strerror(errno));
+ return;
+ }
+ HFTEST_LOG("Packet with length %d received.", recv_len);
+
+ EXPECT_EQ(recv_len, send_len);
+ EXPECT_EQ(memcmp(send_buf, resp_buf, send_len), 0);
+
+ EXPECT_EQ(close(socket_id), 0);
+ rmmod_hafnium();
+}
diff --git a/test/linux/manifest.dts b/test/linux/manifest.dts
new file mode 100644
index 0000000..29e2075
--- /dev/null
+++ b/test/linux/manifest.dts
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ hypervisor {
+ compatible = "hafnium,hafnium";
+ vm1 {
+ debug_name = "linux_test";
+ kernel_filename = "vmlinuz";
+ ramdisk_filename = "initrd.img";
+ };
+
+ vm2 {
+ debug_name = "socket0";
+ vcpu_count = <1>;
+ mem_size = <0x100000>;
+ kernel_filename = "socket0";
+ };
+ };
+};
diff --git a/test/vmapi/BUILD.gn b/test/vmapi/BUILD.gn
new file mode 100644
index 0000000..c981e76
--- /dev/null
+++ b/test/vmapi/BUILD.gn
@@ -0,0 +1,27 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+import("//build/toolchain/platform.gni")
+
+group("vmapi") {
+ testonly = true
+
+ deps = [
+ "arch/${plat_arch}:arch",
+ "common:common",
+ "primary_only:primary_only_test",
+ "primary_with_secondaries:primary_with_secondaries_test",
+ ]
+}
diff --git a/test/vmapi/arch/aarch64/BUILD.gn b/test/vmapi/arch/aarch64/BUILD.gn
new file mode 100644
index 0000000..8441677
--- /dev/null
+++ b/test/vmapi/arch/aarch64/BUILD.gn
@@ -0,0 +1,48 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+
+group("arch") {
+ testonly = true
+
+ deps = [
+ ":aarch64_test",
+ ]
+}
+
+# Tests specific to aarch64.
+vm_kernel("aarch64_test_vm") {
+ testonly = true
+ public_configs = [ "//src/arch/aarch64:config" ]
+
+ sources = [
+ "arch_features.c",
+ "smc_whitelist.c",
+ "smccc.c",
+ ]
+
+ deps = [
+ "//src/arch/aarch64:arch",
+ "//test/hftest:hftest_primary_vm",
+ ]
+}
+
+initrd("aarch64_test") {
+ testonly = true
+
+ manifest = "manifest.dts"
+ primary_name = "aarch64_test"
+ primary_vm = ":aarch64_test_vm"
+}
diff --git a/test/vmapi/arch/aarch64/arch_features.c b/test/vmapi/arch/aarch64/arch_features.c
new file mode 100644
index 0000000..66f29db
--- /dev/null
+++ b/test/vmapi/arch/aarch64/arch_features.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vmapi/hf/call.h"
+
+#include "../msr.h"
+#include "test/hftest.h"
+
+/**
+ * Test that encoding a system register using the implementation defined syntax
+ * maps to the same register defined by name.
+ */
+TEST(arch_features, read_write_msr_impdef)
+{
+ uintreg_t value = 0xa;
+ write_msr(S3_3_C9_C13_0, value);
+ EXPECT_EQ(read_msr(S3_3_C9_C13_0), value);
+ EXPECT_EQ(read_msr(PMCCNTR_EL0), value);
+}
diff --git a/test/vmapi/arch/aarch64/gicv3/BUILD.gn b/test/vmapi/arch/aarch64/gicv3/BUILD.gn
new file mode 100644
index 0000000..57ceb99
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/BUILD.gn
@@ -0,0 +1,52 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+import("//build/toolchain/platform.gni")
+
+config("config") {
+ include_dirs = [ "inc" ]
+}
+
+# Tests specific to GICv3.
+vm_kernel("gicv3_test_vm") {
+ testonly = true
+ public_configs = [ ":config" ]
+
+ sources = [
+ "busy_secondary.c",
+ "gicv3.c",
+ "interrupts.c",
+ "timer_secondary.c",
+ ]
+
+ deps = [
+ "//src/arch/aarch64:arch",
+ "//src/arch/aarch64/hftest:interrupts",
+ "//src/arch/aarch64/hftest:interrupts_gicv3",
+ "//test/hftest:hftest_primary_vm",
+ ]
+}
+
+initrd("gicv3_test") {
+ testonly = true
+
+ manifest = "manifest.dts"
+ primary_name = "gicv3_test"
+ primary_vm = ":gicv3_test_vm"
+ secondary_vms = [ [
+ "services1",
+ "services:gicv3_service_vm1",
+ ] ]
+}
diff --git a/test/vmapi/arch/aarch64/gicv3/busy_secondary.c b/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
new file mode 100644
index 0000000..8e39a2e
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/busy_secondary.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/irq.h"
+#include "hf/arch/vm/interrupts_gicv3.h"
+
+#include "hf/dlog.h"
+#include "hf/spci.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "gicv3.h"
+#include "msr.h"
+#include "test/hftest.h"
+
+/**
+ * Converts a number of nanoseconds to the equivalent number of timer ticks.
+ */
+static uint64_t ns_to_ticks(uint64_t ns)
+{
+ return ns * read_msr(cntfrq_el0) / NANOS_PER_UNIT;
+}
+
+SET_UP(busy_secondary)
+{
+ system_setup();
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
+ SERVICE_SELECT(SERVICE_VM1, "busy", send_buffer);
+}
+
+TEST(busy_secondary, virtual_timer)
+{
+ const char message[] = "loop";
+ struct spci_value run_res;
+
+ interrupt_enable(VIRTUAL_TIMER_IRQ, true);
+ interrupt_set_priority(VIRTUAL_TIMER_IRQ, 0x80);
+ interrupt_set_edge_triggered(VIRTUAL_TIMER_IRQ, true);
+ /*
+ * Hypervisor timer IRQ is needed for Hafnium to return control to the
+ * primary if the (emulated) virtual timer fires while the secondary is
+ * running.
+ */
+ interrupt_enable(HYPERVISOR_TIMER_IRQ, true);
+ interrupt_set_priority(HYPERVISOR_TIMER_IRQ, 0x80);
+ interrupt_set_edge_triggered(HYPERVISOR_TIMER_IRQ, true);
+ interrupt_set_priority_mask(0xff);
+ arch_irq_enable();
+
+ /* Let the secondary get started and wait for our message. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Check that no interrupts are active or pending to start with. */
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+
+ dlog("Starting timer\n");
+ /* Set virtual timer for 1 mS and enable. */
+ write_msr(CNTV_TVAL_EL0, ns_to_ticks(1000000));
+ write_msr(CNTV_CTL_EL0, 0x00000001);
+
+ /* Let secondary start looping. */
+ dlog("Telling secondary to loop.\n");
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_INTERRUPT_32);
+
+ dlog("Waiting for interrupt\n");
+ while (last_interrupt_id == 0) {
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+ }
+
+ /* Check that we got the interrupt. */
+ dlog("Checking for interrupt\n");
+ EXPECT_EQ(last_interrupt_id, VIRTUAL_TIMER_IRQ);
+ /* Check timer status. */
+ EXPECT_EQ(read_msr(CNTV_CTL_EL0), 0x00000005);
+
+ /* There should again be no pending or active interrupts. */
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+}
+
+TEST(busy_secondary, physical_timer)
+{
+ const char message[] = "loop";
+ struct spci_value run_res;
+
+ interrupt_enable(PHYSICAL_TIMER_IRQ, true);
+ interrupt_set_priority(PHYSICAL_TIMER_IRQ, 0x80);
+ interrupt_set_edge_triggered(PHYSICAL_TIMER_IRQ, true);
+ interrupt_set_priority_mask(0xff);
+ arch_irq_enable();
+
+ /* Let the secondary get started and wait for our message. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Check that no interrupts are active or pending to start with. */
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+
+ dlog("Starting timer\n");
+ /* Set physical timer for 1 ms and enable. */
+ write_msr(CNTP_TVAL_EL0, ns_to_ticks(1000000));
+ write_msr(CNTP_CTL_EL0, 0x00000001);
+
+ /* Let secondary start looping. */
+ dlog("Telling secondary to loop.\n");
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_INTERRUPT_32);
+
+ dlog("Waiting for interrupt\n");
+ while (last_interrupt_id == 0) {
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+ }
+
+ /* Check that we got the interrupt. */
+ dlog("Checking for interrupt\n");
+ EXPECT_EQ(last_interrupt_id, PHYSICAL_TIMER_IRQ);
+ /* Check timer status. */
+ EXPECT_EQ(read_msr(CNTP_CTL_EL0), 0x00000005);
+
+ /* There should again be no pending or active interrupts. */
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+}
diff --git a/test/vmapi/arch/aarch64/gicv3/gicv3.c b/test/vmapi/arch/aarch64/gicv3/gicv3.c
new file mode 100644
index 0000000..13c4565
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/gicv3.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gicv3.h"
+
+#include "hf/arch/irq.h"
+#include "hf/arch/vm/interrupts.h"
+#include "hf/arch/vm/interrupts_gicv3.h"
+
+#include "hf/dlog.h"
+#include "hf/mm.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "../msr.h"
+#include "test/hftest.h"
+
+alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
+alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
+
+hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
+hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
+
+void *send_buffer = send_page;
+void *recv_buffer = recv_page;
+
+volatile uint32_t last_interrupt_id = 0;
+
+static void irq(void)
+{
+ uint32_t interrupt_id = interrupt_get_and_acknowledge();
+ dlog("primary IRQ %d from current\n", interrupt_id);
+ last_interrupt_id = interrupt_id;
+ interrupt_end(interrupt_id);
+ dlog("primary IRQ %d ended\n", interrupt_id);
+}
+
+void system_setup()
+{
+ const uint32_t mode = MM_MODE_R | MM_MODE_W | MM_MODE_D;
+ hftest_mm_identity_map((void *)GICD_BASE, PAGE_SIZE, mode);
+ hftest_mm_identity_map((void *)GICR_BASE, PAGE_SIZE, mode);
+ hftest_mm_identity_map((void *)SGI_BASE, PAGE_SIZE, mode);
+
+ exception_setup(irq, NULL);
+ interrupt_gic_setup();
+}
+
+/* Check that system registers are configured as we expect on startup. */
+TEST(system, system_registers_enabled)
+{
+ /* Check that system register interface to GICv3 is enabled. */
+ uint32_t expected_sre =
+ 1U << 2 | /* Disable IRQ bypass. */
+ 1U << 1 | /* Disable FIQ bypass. */
+ 1U << 0; /* Enable system register interface to GICv3. */
+ EXPECT_EQ(read_msr(ICC_SRE_EL1), expected_sre);
+}
+
+TEST(system, system_setup)
+{
+ system_setup();
+
+ /* Should have affinity routing enabled, group 1 interrupts enabled,
+ * group 0 disabled. */
+ EXPECT_EQ(io_read32(GICD_CTLR) & 0x13, 0x12);
+ EXPECT_EQ(read_msr(ICC_CTLR_EL1) & 0xff, 0);
+}
+
+/*
+ * Check that an attempt by a secondary VM to access a GICv3 system register is
+ * trapped.
+ */
+TEST(system, icc_ctlr_access_trapped_secondary)
+{
+ struct spci_value run_res;
+
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
+ SERVICE_SELECT(SERVICE_VM1, "access_systemreg_ctlr", send_buffer);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/*
+ * Check that an attempt by a secondary VM to write ICC_SRE_EL1 is trapped or
+ * ignored.
+ */
+TEST(system, icc_sre_write_trapped_secondary)
+{
+ struct spci_value run_res;
+
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
+ SERVICE_SELECT(SERVICE_VM1, "write_systemreg_sre", send_buffer);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
diff --git a/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h b/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h
new file mode 100644
index 0000000..5fe9a52
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/inc/gicv3.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/addr.h"
+#include "hf/mm.h"
+#include "hf/types.h"
+
+#define PPI_IRQ_BASE 16
+#define PHYSICAL_TIMER_IRQ (PPI_IRQ_BASE + 14)
+#define VIRTUAL_TIMER_IRQ (PPI_IRQ_BASE + 11)
+#define HYPERVISOR_TIMER_IRQ (PPI_IRQ_BASE + 10)
+
+#define NANOS_PER_UNIT 1000000000
+
+#define SERVICE_VM1 (HF_VM_ID_OFFSET + 1)
+
+extern alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
+extern alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
+
+extern hf_ipaddr_t send_page_addr;
+extern hf_ipaddr_t recv_page_addr;
+
+extern void *send_buffer;
+extern void *recv_buffer;
+
+extern volatile uint32_t last_interrupt_id;
+
+void system_setup();
diff --git a/test/vmapi/arch/aarch64/gicv3/interrupts.c b/test/vmapi/arch/aarch64/gicv3/interrupts.c
new file mode 100644
index 0000000..3472663
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/interrupts.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/irq.h"
+#include "hf/arch/vm/interrupts_gicv3.h"
+
+#include "hf/dlog.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "../msr.h"
+#include "gicv3.h"
+#include "test/hftest.h"
+
+SET_UP(interrupts)
+{
+ system_setup();
+}
+
+TEST(interrupts, enable_sgi)
+{
+ /* Interrupt IDs 0 to 15 are SGIs. */
+ uint8_t intid = 3;
+ interrupt_set_priority_mask(0xff);
+ interrupt_set_priority(intid, 0x80);
+ arch_irq_enable();
+ interrupt_enable_all(true);
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+
+ /* Send ourselves the SGI. */
+ last_interrupt_id = 0xffffffff;
+ dlog("sending SGI\n");
+ interrupt_send_sgi(intid, false, 0, 0, 0, 1);
+ dlog("sent SGI\n");
+
+ /* Check that we got it, and we are back to not active or pending. */
+ EXPECT_EQ(last_interrupt_id, intid);
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+}
+
+TEST(interrupts, disable_sgi)
+{
+ /* Interrupt IDs 0 to 15 are SGIs. */
+ uint8_t intid = 3;
+ interrupt_enable_all(true);
+ interrupt_enable(intid, false);
+ interrupt_set_priority_mask(0xff);
+ interrupt_set_priority(intid, 0x80);
+ arch_irq_enable();
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+
+ /* Send ourselves the SGI. */
+ last_interrupt_id = 0xffffffff;
+ dlog("sending SGI\n");
+ interrupt_send_sgi(intid, false, 0, 0, 0, 1);
+ dlog("sent SGI\n");
+
+ /* Check that we didn't get it, but it is pending (and not active). */
+ EXPECT_EQ(last_interrupt_id, 0xffffffff);
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0x1 << intid);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+}
+
+TEST(interrupts, physical_timer)
+{
+ interrupt_enable(PHYSICAL_TIMER_IRQ, true);
+ interrupt_set_priority(PHYSICAL_TIMER_IRQ, 0x80);
+ interrupt_set_edge_triggered(PHYSICAL_TIMER_IRQ, true);
+ interrupt_set_priority_mask(0xff);
+ arch_irq_enable();
+
+ /*
+ * Check that no (SGI or PPI) interrupts are active or pending to start
+ * with.
+ */
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+
+ dlog("Starting timer\n");
+ /* Set physical timer for 1 tick. */
+ write_msr(CNTP_TVAL_EL0, 1);
+ /* Enable it. */
+ write_msr(CNTP_CTL_EL0, 0x00000001);
+
+ dlog("waiting for interrupt\n");
+ while (last_interrupt_id == 0) {
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+ }
+
+ /* Check that we got the interrupt. */
+ dlog("Checking for interrupt\n");
+ EXPECT_EQ(last_interrupt_id, PHYSICAL_TIMER_IRQ);
+ /* Check timer status. */
+ EXPECT_EQ(read_msr(CNTP_CTL_EL0), 0x00000005);
+
+ /* There should again be no pending or active interrupts. */
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+}
+
+TEST(interrupts, virtual_timer)
+{
+ interrupt_enable(VIRTUAL_TIMER_IRQ, true);
+ interrupt_set_priority(VIRTUAL_TIMER_IRQ, 0x80);
+ interrupt_set_edge_triggered(VIRTUAL_TIMER_IRQ, true);
+ interrupt_set_priority_mask(0xff);
+ arch_irq_enable();
+
+ /* Check that no interrupts are active or pending to start with. */
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+
+ dlog("Starting timer\n");
+ /* Set virtual timer for 1 tick. */
+ write_msr(CNTV_TVAL_EL0, 1);
+ /* Enable it. */
+ write_msr(CNTV_CTL_EL0, 0x00000001);
+
+ dlog("Waiting for interrupt\n");
+ while (last_interrupt_id == 0) {
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+ }
+
+ /* Check that we got the interrupt. */
+ dlog("Checking for interrupt\n");
+ EXPECT_EQ(last_interrupt_id, VIRTUAL_TIMER_IRQ);
+ /* Check timer status. */
+ EXPECT_EQ(read_msr(CNTV_CTL_EL0), 0x00000005);
+
+ /* There should again be no pending or active interrupts. */
+ EXPECT_EQ(io_read32_array(GICD_ISPENDR, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISPENDR0), 0);
+ EXPECT_EQ(io_read32_array(GICD_ISACTIVER, 0), 0);
+ EXPECT_EQ(io_read32(GICR_ISACTIVER0), 0);
+}
diff --git a/test/vmapi/arch/aarch64/gicv3/manifest.dts b/test/vmapi/arch/aarch64/gicv3/manifest.dts
new file mode 100644
index 0000000..ad500e3
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/manifest.dts
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ hypervisor {
+ compatible = "hafnium,hafnium";
+ vm1 {
+ debug_name = "gicv3_test";
+ kernel_filename = "gicv3_test";
+ };
+
+ vm2 {
+ debug_name = "services1";
+ vcpu_count = <1>;
+ mem_size = <0x100000>;
+ kernel_filename = "services1";
+ };
+ };
+};
diff --git a/test/vmapi/arch/aarch64/gicv3/services/BUILD.gn b/test/vmapi/arch/aarch64/gicv3/services/BUILD.gn
new file mode 100644
index 0000000..6c2f019
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/services/BUILD.gn
@@ -0,0 +1,87 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+
+source_set("common") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+ sources = [
+ "common.c",
+ ]
+}
+
+# Service which loops forever.
+source_set("busy") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "busy.c",
+ ]
+
+ deps = [
+ ":common",
+ ]
+}
+
+# Service which uses timers.
+source_set("timer") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "timer.c",
+ ]
+
+ deps = [
+ ":common",
+ "//src/arch/aarch64:arch",
+ "//src/arch/aarch64/hftest:interrupts",
+ ]
+}
+
+# Service which tries to access GICv3 system registers.
+source_set("systemreg") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "systemreg.c",
+ ]
+
+ deps = [
+ ":common",
+ "//src/arch/aarch64:arch",
+ ]
+
+ include_dirs = [
+ "//test/vmapi/common",
+ "//inc/vmapi/hf",
+ ]
+}
+
+# Group services together into VMs.
+
+vm_kernel("gicv3_service_vm1") {
+ testonly = true
+
+ deps = [
+ ":busy",
+ ":systemreg",
+ ":timer",
+ "//test/hftest:hftest_secondary_vm",
+ "//test/vmapi/common:common",
+ ]
+}
diff --git a/test/vmapi/arch/aarch64/gicv3/services/busy.c b/test/vmapi/arch/aarch64/gicv3/services/busy.c
new file mode 100644
index 0000000..fece9f6
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/services/busy.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/interrupts_gicv3.h"
+
+#include "hf/dlog.h"
+
+#include "vmapi/hf/call.h"
+
+#include "common.h"
+#include "test/hftest.h"
+
+/*
+ * Secondary VM that loops forever after receiving a message.
+ */
+
+TEST_SERVICE(busy)
+{
+ dlog("Secondary waiting for message...\n");
+ mailbox_receive_retry();
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ dlog("Secondary received message, looping forever.\n");
+ for (;;) {
+ }
+}
diff --git a/test/vmapi/arch/aarch64/gicv3/services/common.c b/test/vmapi/arch/aarch64/gicv3/services/common.c
new file mode 100644
index 0000000..ba18d39
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/services/common.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common.h"
+
+#include "vmapi/hf/call.h"
+
+#include "test/hftest.h"
+
+/**
+ * Try to receive a message from the mailbox, blocking if necessary, and
+ * retrying if interrupted.
+ */
+struct spci_value mailbox_receive_retry(void)
+{
+ struct spci_value received;
+
+ do {
+ received = spci_msg_wait();
+ } while (received.func == SPCI_ERROR_32 &&
+ received.arg2 == SPCI_INTERRUPTED);
+
+ return received;
+}
diff --git a/test/vmapi/arch/aarch64/gicv3/services/common.h b/test/vmapi/arch/aarch64/gicv3/services/common.h
new file mode 100644
index 0000000..ced8baf
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/services/common.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vmapi/hf/spci.h"
+
+struct spci_value mailbox_receive_retry(void);
diff --git a/test/vmapi/arch/aarch64/gicv3/services/systemreg.c b/test/vmapi/arch/aarch64/gicv3/services/systemreg.c
new file mode 100644
index 0000000..78ddc0f
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/services/systemreg.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/events.h"
+#include "hf/arch/vm/interrupts.h"
+#include "hf/arch/vm/interrupts_gicv3.h"
+#include "hf/arch/vm/timer.h"
+
+#include "hf/dlog.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "common.h"
+#include "test/hftest.h"
+#include "test/vmapi/exception_handler.h"
+
+/*
+ * Secondary VM that tries to access GICv3 system registers.
+ */
+
+TEST_SERVICE(access_systemreg_ctlr)
+{
+ exception_setup(NULL, exception_handler_skip_instruction);
+
+ /* Reading ICC_CTLR_EL1 should trap the VM. */
+ read_msr(ICC_CTLR_EL1);
+
+ /* Writing ICC_CTLR_EL1 should trap the VM. */
+ write_msr(ICC_CTLR_EL1, 0);
+
+ EXPECT_EQ(exception_handler_get_num(), 2);
+
+ /* Yield after catching the exceptions. */
+ spci_yield();
+}
+
+TEST_SERVICE(write_systemreg_sre)
+{
+ ASSERT_EQ(read_msr(ICC_SRE_EL1), 0x7);
+ /* Writing ICC_SRE_EL1 should trap the VM or be ignored. */
+ write_msr(ICC_SRE_EL1, 0x0);
+ ASSERT_EQ(read_msr(ICC_SRE_EL1), 0x7);
+ write_msr(ICC_SRE_EL1, 0xffffffff);
+ ASSERT_EQ(read_msr(ICC_SRE_EL1), 0x7);
+ spci_yield();
+}
diff --git a/test/vmapi/arch/aarch64/gicv3/services/timer.c b/test/vmapi/arch/aarch64/gicv3/services/timer.c
new file mode 100644
index 0000000..f62aa80
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/services/timer.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/timer.h"
+
+#include "hf/arch/irq.h"
+#include "hf/arch/vm/events.h"
+#include "hf/arch/vm/interrupts.h"
+#include "hf/arch/vm/interrupts_gicv3.h"
+
+#include "hf/dlog.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "common.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+/*
+ * Secondary VM that sets timers in response to messages, and sends messages
+ * back when they fire.
+ */
+
+static volatile bool timer_fired = false;
+
+static void irq_current(void)
+{
+ uint32_t interrupt_id = hf_interrupt_get();
+ char buffer[] = "Got IRQ xx.";
+ int size = sizeof(buffer);
+ dlog("secondary IRQ %d from current\n", interrupt_id);
+ if (interrupt_id == HF_VIRTUAL_TIMER_INTID) {
+ timer_fired = true;
+ }
+ buffer[8] = '0' + interrupt_id / 10;
+ buffer[9] = '0' + interrupt_id % 10;
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, buffer, size);
+ spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, size, 0);
+ dlog("secondary IRQ %d ended\n", interrupt_id);
+ event_send_local();
+}
+
+TEST_SERVICE(timer)
+{
+ exception_setup(irq_current, NULL);
+ hf_interrupt_enable(HF_VIRTUAL_TIMER_INTID, true);
+ arch_irq_enable();
+
+ for (;;) {
+ uint8_t *message = (uint8_t *)SERVICE_RECV_BUFFER();
+ bool wfi;
+ bool wfe;
+ bool receive;
+ bool disable_interrupts;
+ uint32_t ticks;
+ struct spci_value ret = mailbox_receive_retry();
+
+ if (spci_msg_send_sender(ret) != HF_PRIMARY_VM_ID ||
+ spci_msg_send_size(ret) != sizeof("**** xxxxxxx")) {
+ FAIL("Got unexpected message from VM %d, size %d.\n",
+ spci_msg_send_sender(ret),
+ spci_msg_send_size(ret));
+ }
+
+ /*
+ * Start a timer to send the message back: enable it and
+ * set it for the requested number of ticks.
+ */
+ wfi = memcmp(message, "WFI ", 4) == 0;
+ wfe = memcmp(message, "WFE ", 4) == 0;
+ receive = memcmp(message, "RECV", 4) == 0;
+ disable_interrupts = wfi || receive;
+ ticks = (message[5] - '0') * 1000000 +
+ (message[6] - '0') * 100000 +
+ (message[7] - '0') * 10000 + (message[8] - '0') * 1000 +
+ (message[9] - '0') * 100 + (message[10] - '0') * 10 +
+ (message[11] - '0');
+
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ dlog("Starting timer for %d ticks.\n", ticks);
+
+ if (disable_interrupts) {
+ arch_irq_disable();
+ }
+
+ timer_set(ticks);
+ timer_start();
+ dlog("Waiting for timer...\n");
+
+ /* Wait for the timer interrupt. */
+ if (wfi) {
+ interrupt_wait();
+ } else if (wfe) {
+ while (!timer_fired) {
+ event_wait();
+ }
+ } else if (receive) {
+ struct spci_value res = spci_msg_wait();
+
+ EXPECT_SPCI_ERROR(res, SPCI_INTERRUPTED);
+ } else {
+ /* Busy wait until the timer fires. */
+ while (!timer_fired) {
+ }
+ }
+
+ if (disable_interrupts) {
+ arch_irq_enable();
+ }
+
+ EXPECT_TRUE(timer_fired);
+ timer_fired = false;
+ dlog("Done waiting.\n");
+ }
+}
diff --git a/test/vmapi/arch/aarch64/gicv3/timer_secondary.c b/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
new file mode 100644
index 0000000..1e5c107
--- /dev/null
+++ b/test/vmapi/arch/aarch64/gicv3/timer_secondary.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/irq.h"
+#include "hf/arch/vm/interrupts_gicv3.h"
+
+#include "hf/abi.h"
+#include "hf/call.h"
+#include "hf/spci.h"
+
+#include "gicv3.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+SET_UP(timer_secondary)
+{
+ system_setup();
+
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
+ SERVICE_SELECT(SERVICE_VM1, "timer", send_buffer);
+
+ interrupt_enable(VIRTUAL_TIMER_IRQ, true);
+ interrupt_set_edge_triggered(VIRTUAL_TIMER_IRQ, true);
+ interrupt_set_priority_mask(0xff);
+ arch_irq_enable();
+}
+
+TEAR_DOWN(timer_secondary)
+{
+ EXPECT_SPCI_ERROR(spci_rx_release(), SPCI_DENIED);
+}
+
+static void timer_busywait_secondary()
+{
+ const char message[] = "loop 0099999";
+ const char expected_response[] = "Got IRQ 03.";
+ struct spci_value run_res;
+
+ /* Let the secondary get started and wait for our message. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Send the message for the secondary to set a timer. */
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /*
+ * Let the secondary handle the message and set the timer. It will loop
+ * until the hardware interrupt fires, at which point we'll get and
+ * ignore the interrupt, and see a SPCI_YIELD return code.
+ */
+ dlog("running secondary after sending timer message.\n");
+ last_interrupt_id = 0;
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_INTERRUPT_32);
+ dlog("secondary yielded after receiving timer message\n");
+ EXPECT_EQ(last_interrupt_id, VIRTUAL_TIMER_IRQ);
+
+ /*
+ * Now that the timer has expired, when we call spci_run again Hafnium
+ * should inject a virtual timer interrupt into the secondary, which
+ * should get it and respond.
+ */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response));
+ EXPECT_EQ(memcmp(recv_buffer, expected_response,
+ sizeof(expected_response)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Send a message to the interruptible VM, which will start a timer to interrupt
+ * itself to send a response back.
+ */
+TEST(timer_secondary, busywait)
+{
+ /*
+ * Run the test twice in a row, to check that the state doesn't get
+ * messed up.
+ */
+ timer_busywait_secondary();
+ timer_busywait_secondary();
+}
+
+static void timer_secondary(const char message[], uint64_t expected_code)
+{
+ const char expected_response[] = "Got IRQ 03.";
+ size_t message_length = strnlen_s(message, 64) + 1;
+ struct spci_value run_res;
+
+ /* Let the secondary get started and wait for our message. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Send the message for the secondary to set a timer. */
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, message_length);
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, message_length, 0)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the secondary handle the message and set the timer. */
+ last_interrupt_id = 0;
+ run_res = spci_run(SERVICE_VM1, 0);
+
+ /*
+ * There's a race for whether the secondary manages to block and switch
+ * to the primary before the hardware timer fires, so we need to handle
+ * three cases:
+ * 1. The (hardware) timer fires immediately, we get SPCI_INTERRUPT.
+ * 2. The secondary blocks and switches back, we get expected_code until
+ * the timer fires.
+ * 2a. The timer then expires while we are in the primary, so Hafnium
+ * can inject the timer interrupt the next time we call spci_run.
+ * 2b. The timer fires while the secondary is running, so we get
+ * SPCI_INTERRUPT as in case 1.
+ */
+
+ if (run_res.func != expected_code &&
+ run_res.func != SPCI_INTERRUPT_32) {
+ FAIL("Expected run to return SPCI_INTERRUPT or %#x, but "
+ "got %#x",
+ expected_code, run_res.func);
+ }
+
+ /* Loop until the timer fires. */
+ while (run_res.func == expected_code) {
+ /*
+ * This case happens if the secondary manages to block and
+ * switch to the primary before the timer fires.
+ */
+ dlog("Primary looping until timer fires\n");
+ if (expected_code == HF_SPCI_RUN_WAIT_FOR_INTERRUPT ||
+ expected_code == SPCI_MSG_WAIT_32) {
+ EXPECT_NE(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+ dlog("%d ns remaining\n", run_res.arg2);
+ }
+ run_res = spci_run(SERVICE_VM1, 0);
+ }
+ dlog("Primary done looping\n");
+
+ if (run_res.func == SPCI_INTERRUPT_32) {
+ /*
+ * This case happens if the (hardware) timer fires before the
+ * secondary blocks and switches to the primary, either
+ * immediately after setting the timer or during the loop above.
+ * Then we get the interrupt to the primary, ignore it, and see
+ * a SPCI_INTERRUPT code from the spci_run call, so we should
+ * call it again for the timer interrupt to be injected
+ * automatically by Hafnium.
+ */
+ EXPECT_EQ(last_interrupt_id, VIRTUAL_TIMER_IRQ);
+ dlog("Preempted by timer interrupt, running again\n");
+ run_res = spci_run(SERVICE_VM1, 0);
+ }
+
+ /* Once we wake it up it should get the timer interrupt and respond. */
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response));
+ EXPECT_EQ(memcmp(recv_buffer, expected_response,
+ sizeof(expected_response)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Send a message to the interruptible VM, which will start a timer to interrupt
+ * itself to send a response back. This test is run with both long and short
+ * timer lengths, to try to cover both cases of the race for whether the timer
+ * fires before or after the secondary VM blocks and switches back to the
+ * primary.
+ */
+TEST(timer_secondary, wfi_short)
+{
+ /*
+ * Run the test twice in a row, to check that the state doesn't get
+ * messed up.
+ */
+ timer_secondary("WFI 0000001", HF_SPCI_RUN_WAIT_FOR_INTERRUPT);
+ timer_secondary("WFI 0000001", HF_SPCI_RUN_WAIT_FOR_INTERRUPT);
+}
+
+TEST(timer_secondary, wfi_long)
+{
+ /*
+ * Run the test twice in a row, to check that the state doesn't get
+ * messed up.
+ */
+ timer_secondary("WFI 0099999", HF_SPCI_RUN_WAIT_FOR_INTERRUPT);
+ timer_secondary("WFI 0099999", HF_SPCI_RUN_WAIT_FOR_INTERRUPT);
+}
+
+TEST(timer_secondary, wfe_short)
+{
+ /*
+ * Run the test twice in a row, to check that the state doesn't get
+ * messed up.
+ */
+ timer_secondary("WFE 0000001", SPCI_YIELD_32);
+ timer_secondary("WFE 0000001", SPCI_YIELD_32);
+}
+
+TEST(timer_secondary, wfe_long)
+{
+ /*
+ * Run the test twice in a row, to check that the state doesn't get
+ * messed up.
+ */
+ timer_secondary("WFE 0099999", SPCI_YIELD_32);
+ timer_secondary("WFE 0099999", SPCI_YIELD_32);
+}
+
+TEST(timer_secondary, receive_short)
+{
+ /*
+ * Run the test twice in a row, to check that the state doesn't get
+ * messed up.
+ */
+ timer_secondary("RECV 0000001", SPCI_MSG_WAIT_32);
+ timer_secondary("RECV 0000001", SPCI_MSG_WAIT_32);
+}
+
+TEST(timer_secondary, receive_long)
+{
+ /*
+ * Run the test twice in a row, to check that the state doesn't get
+ * messed up.
+ */
+ timer_secondary("RECV 0099999", SPCI_MSG_WAIT_32);
+ timer_secondary("RECV 0099999", SPCI_MSG_WAIT_32);
+}
+
+/**
+ * Set the timer for a very long time, and expect that it doesn't fire.
+ */
+TEST(timer_secondary, wfi_very_long)
+{
+ const char message[] = "WFI 9999999";
+ size_t message_length = strnlen_s(message, 64) + 1;
+ struct spci_value run_res;
+
+ /* Let the secondary get started and wait for our message. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Send the message for the secondary to set a timer. */
+ memcpy_s(send_buffer, SPCI_MSG_PAYLOAD_MAX, message, message_length);
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, message_length, 0)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /*
+ * Let the secondary handle the message and set the timer.
+ */
+ last_interrupt_id = 0;
+ for (int i = 0; i < 20; ++i) {
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, HF_SPCI_RUN_WAIT_FOR_INTERRUPT);
+ dlog("Primary looping until timer fires; %d ns "
+ "remaining\n",
+ run_res.arg2);
+ }
+}
diff --git a/test/vmapi/arch/aarch64/manifest.dts b/test/vmapi/arch/aarch64/manifest.dts
new file mode 100644
index 0000000..2d56429
--- /dev/null
+++ b/test/vmapi/arch/aarch64/manifest.dts
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ hypervisor {
+ compatible = "hafnium,hafnium";
+ vm1 {
+ debug_name = "aarch64_test";
+ kernel_filename = "aarch64_test";
+
+ smc_whitelist = <
+ 0x30000001
+ 0x30000002
+ 0x30000003
+ 0x30000004
+ >;
+ };
+ };
+};
diff --git a/test/vmapi/arch/aarch64/smc_whitelist.c b/test/vmapi/arch/aarch64/smc_whitelist.c
new file mode 100644
index 0000000..fe0c664
--- /dev/null
+++ b/test/vmapi/arch/aarch64/smc_whitelist.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vmapi/hf/call.h"
+
+#include "smc.h"
+#include "test/hftest.h"
+
+TEST(smc_whitelist, not_whitelisted_unknown)
+{
+ const uint32_t non_whitelisted_ta_call = 0x3000f00d;
+ struct spci_value smc_res = smc_forward(
+ non_whitelisted_ta_call, 0x1111111111111111, 0x2222222222222222,
+ 0x3333333333333333, 0x4444444444444444, 0x5555555555555555,
+ 0x6666666666666666, 0x77777777);
+
+ EXPECT_EQ(smc_res.func, SMCCC_ERROR_UNKNOWN);
+ EXPECT_EQ(smc_res.arg1, UINT64_C(0x1111111111111111));
+ EXPECT_EQ(smc_res.arg2, UINT64_C(0x2222222222222222));
+ EXPECT_EQ(smc_res.arg3, UINT64_C(0x3333333333333333));
+ EXPECT_EQ(smc_res.arg4, UINT64_C(0x4444444444444444));
+ EXPECT_EQ(smc_res.arg5, UINT64_C(0x5555555555555555));
+ EXPECT_EQ(smc_res.arg6, UINT64_C(0x6666666666666666));
+ EXPECT_EQ(smc_res.arg7, UINT64_C(0x77777777));
+}
diff --git a/test/vmapi/arch/aarch64/smccc.c b/test/vmapi/arch/aarch64/smccc.c
new file mode 100644
index 0000000..d42cc16
--- /dev/null
+++ b/test/vmapi/arch/aarch64/smccc.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "vmapi/hf/call.h"
+#include "vmapi/hf/spci.h"
+
+#include "smc.h"
+#include "test/hftest.h"
+
+static struct spci_value hvc(uint32_t func, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3, uint64_t arg4,
+ uint64_t arg5, uint32_t caller_id)
+{
+ register uint64_t r0 __asm__("x0") = func;
+ register uint64_t r1 __asm__("x1") = arg0;
+ register uint64_t r2 __asm__("x2") = arg1;
+ register uint64_t r3 __asm__("x3") = arg2;
+ register uint64_t r4 __asm__("x4") = arg3;
+ register uint64_t r5 __asm__("x5") = arg4;
+ register uint64_t r6 __asm__("x6") = arg5;
+ register uint64_t r7 __asm__("x7") = caller_id;
+
+ __asm__ volatile(
+ "hvc #0"
+ : /* Output registers, also used as inputs ('+' constraint). */
+ "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5),
+ "+r"(r6), "+r"(r7));
+
+ return (struct spci_value){.func = r0,
+ .arg1 = r1,
+ .arg2 = r2,
+ .arg3 = r3,
+ .arg4 = r4,
+ .arg5 = r5,
+ .arg6 = r6,
+ .arg7 = r7};
+}
+
+TEST(smccc, hf_debug_log_smc_zero_or_unchanged)
+{
+ struct spci_value smc_res =
+ smc_forward(HF_DEBUG_LOG, '\n', 0x2222222222222222,
+ 0x3333333333333333, 0x4444444444444444,
+ 0x5555555555555555, 0x6666666666666666, 0x77777777);
+
+ EXPECT_EQ(smc_res.func, 0);
+ EXPECT_EQ(smc_res.arg1, '\n');
+ EXPECT_EQ(smc_res.arg2, UINT64_C(0x2222222222222222));
+ EXPECT_EQ(smc_res.arg3, UINT64_C(0x3333333333333333));
+ EXPECT_EQ(smc_res.arg4, UINT64_C(0x4444444444444444));
+ EXPECT_EQ(smc_res.arg5, UINT64_C(0x5555555555555555));
+ EXPECT_EQ(smc_res.arg6, UINT64_C(0x6666666666666666));
+ EXPECT_EQ(smc_res.arg7, UINT64_C(0x77777777));
+}
+
+TEST(smccc, hf_debug_log_hvc_zero_or_unchanged)
+{
+ struct spci_value smc_res =
+ hvc(HF_DEBUG_LOG, '\n', 0x2222222222222222, 0x3333333333333333,
+ 0x4444444444444444, 0x5555555555555555, 0x6666666666666666,
+ 0x77777777);
+
+ EXPECT_EQ(smc_res.func, 0);
+ EXPECT_EQ(smc_res.arg1, '\n');
+ EXPECT_EQ(smc_res.arg2, UINT64_C(0x2222222222222222));
+ EXPECT_EQ(smc_res.arg3, UINT64_C(0x3333333333333333));
+ EXPECT_EQ(smc_res.arg4, UINT64_C(0x4444444444444444));
+ EXPECT_EQ(smc_res.arg5, UINT64_C(0x5555555555555555));
+ EXPECT_EQ(smc_res.arg6, UINT64_C(0x6666666666666666));
+ EXPECT_EQ(smc_res.arg7, UINT64_C(0x77777777));
+}
+
+/**
+ * Checks that calling SPCI_FEATURES via an SMC works as expected.
+ * The spci_features helper function uses an HVC, but an SMC should also work.
+ */
+TEST(smccc, spci_features_smc)
+{
+ struct spci_value ret;
+
+ ret = smc32(SPCI_FEATURES_32, SPCI_VERSION_32, 0, 0, 0, 0, 0, 0);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+ EXPECT_EQ(ret.arg1, 0);
+ EXPECT_EQ(ret.arg2, 0);
+ EXPECT_EQ(ret.arg3, 0);
+ EXPECT_EQ(ret.arg4, 0);
+ EXPECT_EQ(ret.arg5, 0);
+ EXPECT_EQ(ret.arg6, 0);
+ EXPECT_EQ(ret.arg7, 0);
+}
diff --git a/test/vmapi/common/BUILD.gn b/test/vmapi/common/BUILD.gn
new file mode 100644
index 0000000..48be642
--- /dev/null
+++ b/test/vmapi/common/BUILD.gn
@@ -0,0 +1,24 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+
+source_set("common") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+ sources = [
+ "exception_handler.c",
+ "spci.c",
+ ]
+}
diff --git a/test/vmapi/common/exception_handler.c b/test/vmapi/common/exception_handler.c
new file mode 100644
index 0000000..c58a5b8
--- /dev/null
+++ b/test/vmapi/common/exception_handler.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/dlog.h"
+
+#include "vmapi/hf/call.h"
+
+#include "../msr.h"
+#include "test/hftest.h"
+
+/**
+ * Tracks the number of times the exception handler has been invoked.
+ */
+static int exception_handler_exception_count = 0;
+
+/**
+ * Sends the number of exceptions handled to the Primary VM.
+ */
+void exception_handler_send_exception_count(void)
+{
+ void *send_buf = SERVICE_SEND_BUFFER();
+
+ dlog("Sending exception_count %d to primary VM\n",
+ exception_handler_exception_count);
+ memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX,
+ (const void *)&exception_handler_exception_count,
+ sizeof(exception_handler_exception_count));
+ EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID,
+ sizeof(exception_handler_exception_count), 0)
+ .func,
+ SPCI_SUCCESS_32);
+}
+
+/**
+ * Receives the number of exceptions handled.
+ */
+int exception_handler_receive_exception_count(
+ const struct spci_value *send_res,
+ const struct spci_memory_region *recv_buf)
+{
+ int exception_count = *((const int *)recv_buf);
+
+ EXPECT_EQ(send_res->func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(*send_res), sizeof(exception_count));
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ return exception_count;
+}
+
+/**
+ * EL1 exception handler to use in unit test VMs.
+ * Skips the instruction that triggered the exception.
+ */
+bool exception_handler_skip_instruction(void)
+{
+ dlog("%s function is triggered!\n", __func__);
+ ++exception_handler_exception_count;
+
+ /* Skip instruction that triggered the exception. */
+ uint64_t next_pc = read_msr(elr_el1);
+ next_pc += 4UL;
+ write_msr(elr_el1, next_pc);
+
+ /* Indicate that elr_el1 should not be restored. */
+ return true;
+}
+
+/**
+ * EL1 exception handler to use in unit test VMs.
+ * Yields control back to the hypervisor and sends the number of exceptions.
+ */
+bool exception_handler_yield(void)
+{
+ dlog("%s function is triggered!\n", __func__);
+ ++exception_handler_exception_count;
+
+ exception_handler_send_exception_count();
+
+ /* Indicate that elr_el1 should not be restored. */
+ return true;
+}
+
+/**
+ * Returns the number of times the instruction handler was invoked.
+ */
+int exception_handler_get_num(void)
+{
+ return exception_handler_exception_count;
+}
+
+/**
+ * Resets the number of exceptions counter;
+ */
+void exception_handler_reset(void)
+{
+ exception_handler_exception_count = 0;
+}
diff --git a/test/vmapi/common/spci.c b/test/vmapi/common/spci.c
new file mode 100644
index 0000000..a175cea
--- /dev/null
+++ b/test/vmapi/common/spci.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/spci.h"
+
+#include "hf/mm.h"
+#include "hf/static_assert.h"
+
+#include "vmapi/hf/call.h"
+
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
+static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
+static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page.");
+static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page.");
+
+static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
+static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
+
+struct mailbox_buffers set_up_mailbox(void)
+{
+ ASSERT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
+ return (struct mailbox_buffers){
+ .send = send_page,
+ .recv = recv_page,
+ };
+}
diff --git a/test/vmapi/primary_only/BUILD.gn b/test/vmapi/primary_only/BUILD.gn
new file mode 100644
index 0000000..cdb35b9
--- /dev/null
+++ b/test/vmapi/primary_only/BUILD.gn
@@ -0,0 +1,36 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+
+# Tests with no secondary VMs.
+vm_kernel("primary_only_test_vm") {
+ testonly = true
+
+ sources = [
+ "faults.c",
+ "primary_only.c",
+ ]
+
+ deps = [
+ "//test/hftest:hftest_primary_vm",
+ ]
+}
+
+initrd("primary_only_test") {
+ testonly = true
+ manifest = "manifest.dts"
+ primary_name = "primary_only_test"
+ primary_vm = ":primary_only_test_vm"
+}
diff --git a/test/vmapi/primary_only/faults.c b/test/vmapi/primary_only/faults.c
new file mode 100644
index 0000000..34b7e15
--- /dev/null
+++ b/test/vmapi/primary_only/faults.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdalign.h>
+
+#include "hf/arch/vm/power_mgmt.h"
+
+#include "hf/mm.h"
+#include "hf/spinlock.h"
+
+#include "vmapi/hf/call.h"
+
+#include "test/hftest.h"
+
+alignas(PAGE_SIZE) static char tx[PAGE_SIZE];
+alignas(PAGE_SIZE) static char rx[PAGE_SIZE];
+
+struct state {
+ volatile bool done;
+ struct spinlock lock;
+};
+
+/**
+ * Releases the lock passed in, then spins reading the rx buffer.
+ */
+static void rx_reader(uintptr_t arg)
+{
+ struct state *s = (struct state *)arg;
+ sl_unlock(&s->lock);
+
+ while (!s->done) {
+ *(volatile char *)(&rx[0]);
+ }
+
+ sl_unlock(&s->lock);
+}
+
+/**
+ * Forces a spurious fault and check that Hafnium recovers from it.
+ */
+TEST(faults, spurious_due_to_configure)
+{
+ struct state s;
+ alignas(4096) static uint8_t other_stack[4096];
+
+ sl_init(&s.lock);
+ s.done = false;
+
+ /* Start secondary CPU while holding lock. */
+ sl_lock(&s.lock);
+ EXPECT_EQ(
+ hftest_cpu_start(hftest_get_cpu_id(1), other_stack,
+ sizeof(other_stack), rx_reader, (uintptr_t)&s),
+ true);
+
+ /* Wait for CPU to release the lock. */
+ sl_lock(&s.lock);
+
+ /* Configure the VM's buffers. */
+ EXPECT_EQ(spci_rxtx_map((hf_ipaddr_t)&tx[0], (hf_ipaddr_t)&rx[0]).func,
+ SPCI_SUCCESS_32);
+
+ /* Tell other CPU to stop and wait for it. */
+ s.done = true;
+ sl_lock(&s.lock);
+}
diff --git a/test/vmapi/primary_only/manifest.dts b/test/vmapi/primary_only/manifest.dts
new file mode 100644
index 0000000..f83e057
--- /dev/null
+++ b/test/vmapi/primary_only/manifest.dts
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ hypervisor {
+ compatible = "hafnium,hafnium";
+ vm1 {
+ debug_name = "primary_only_test";
+ kernel_filename = "primary_only_test";
+ };
+ };
+};
diff --git a/test/vmapi/primary_only/primary_only.c b/test/vmapi/primary_only/primary_only.c
new file mode 100644
index 0000000..ab26f1d
--- /dev/null
+++ b/test/vmapi/primary_only/primary_only.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdalign.h>
+
+#include "hf/arch/vm/power_mgmt.h"
+
+#include "hf/spinlock.h"
+
+#include "vmapi/hf/call.h"
+
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+/*
+ * TODO: Some of these tests are duplicated between 'primary_only' and
+ * 'primary_with_secondaries'. Move them to a common place consider running
+ * them inside secondary VMs too.
+ */
+
+/**
+ * Confirms the primary VM has the primary ID.
+ */
+TEST(hf_vm_get_id, primary_has_primary_id)
+{
+ EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID);
+}
+
+/**
+ * Confirm there is only the primary VM.
+ */
+TEST(hf_vm_get_count, no_secondary_vms)
+{
+ EXPECT_EQ(hf_vm_get_count(), 1);
+}
+
+/**
+ * Confirm the primary has at least one vCPU.
+ */
+TEST(hf_vcpu_get_count, primary_has_at_least_one)
+{
+ EXPECT_GE(hf_vcpu_get_count(HF_PRIMARY_VM_ID), 0);
+}
+
+/**
+ * Confirm an error is returned when getting the vCPU count of a non-existent
+ * VM.
+ */
+TEST(hf_vcpu_get_count, no_secondary_vms)
+{
+ EXPECT_EQ(hf_vcpu_get_count(HF_VM_ID_OFFSET + 1), 0);
+}
+
+/**
+ * Confirm an error is returned when getting the vCPU count for a reserved ID.
+ */
+TEST(hf_vcpu_get_count, reserved_vm_id)
+{
+ spci_vm_id_t id;
+
+ for (id = 0; id < HF_VM_ID_OFFSET; ++id) {
+ EXPECT_EQ(hf_vcpu_get_count(id), 0);
+ }
+}
+
+/**
+ * Confirm an error is returned when getting the vCPU count of a VM with an ID
+ * that is likely to be far outside the resource limit.
+ */
+TEST(hf_vcpu_get_count, large_invalid_vm_id)
+{
+ EXPECT_EQ(hf_vcpu_get_count(0xffff), 0);
+}
+
+/**
+ * Confirm it is an error when running a vCPU from the primary VM.
+ */
+TEST(spci_run, cannot_run_primary)
+{
+ struct spci_value res = spci_run(HF_PRIMARY_VM_ID, 0);
+ EXPECT_SPCI_ERROR(res, SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * Confirm it is an error when running a vCPU from a non-existent secondary VM.
+ */
+TEST(spci_run, cannot_run_absent_secondary)
+{
+ struct spci_value res = spci_run(1, 0);
+ EXPECT_SPCI_ERROR(res, SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * Yielding from the primary is a noop.
+ */
+TEST(spci_yield, yield_is_noop_for_primary)
+{
+ EXPECT_EQ(spci_yield().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Releases the lock passed in.
+ */
+static void vm_cpu_entry(uintptr_t arg)
+{
+ struct spinlock *lock = (struct spinlock *)arg;
+
+ dlog("Second CPU started.\n");
+ sl_unlock(lock);
+}
+
+/**
+ * Confirm a new CPU can be started to execute in parallel.
+ */
+TEST(cpus, start)
+{
+ struct spinlock lock = SPINLOCK_INIT;
+ alignas(4096) static uint8_t other_stack[4096];
+
+ /* Start secondary while holding lock. */
+ sl_lock(&lock);
+ EXPECT_EQ(hftest_cpu_start(hftest_get_cpu_id(1), other_stack,
+ sizeof(other_stack), vm_cpu_entry,
+ (uintptr_t)&lock),
+ true);
+
+ /* Wait for CPU to release the lock. */
+ sl_lock(&lock);
+}
+
+/** Ensures that the Hafnium SPCI version is reported as expected. */
+TEST(spci, spci_version)
+{
+ const int32_t major_revision = 0;
+ const int32_t major_revision_offset = 16;
+ const int32_t minor_revision = 9;
+ const int32_t current_version =
+ (major_revision << major_revision_offset) | minor_revision;
+
+ struct spci_value ret = spci_version();
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+ EXPECT_EQ(ret.arg2, current_version);
+}
+
+/** Ensures that SPCI_FEATURES is reporting the expected interfaces. */
+TEST(spci, spci_features)
+{
+ struct spci_value ret;
+
+ ret = spci_features(SPCI_ERROR_32);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+
+ ret = spci_features(SPCI_SUCCESS_32);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+
+ ret = spci_features(SPCI_VERSION_32);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+
+ ret = spci_features(SPCI_FEATURES_32);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+
+ ret = spci_features(SPCI_ID_GET_32);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+
+ ret = spci_features(SPCI_YIELD_32);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+
+ ret = spci_features(SPCI_MSG_SEND_32);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+
+ ret = spci_features(SPCI_MSG_POLL_32);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+
+ ret = spci_features(SPCI_MSG_WAIT_32);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+
+ ret = spci_features(SPCI_YIELD_32);
+ EXPECT_EQ(ret.func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Ensures that SPCI_FEATURES returns not supported for a bogus FID or
+ * currently non-implemented interfaces.
+ */
+TEST(spci, spci_features_not_supported)
+{
+ struct spci_value ret;
+
+ ret = spci_features(0);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(0x84000000);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(SPCI_INTERRUPT_32);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(SPCI_RX_RELEASE_32);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(SPCI_RXTX_MAP_32);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(SPCI_RXTX_UNMAP_32);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(SPCI_PARTITION_INFO_GET_32);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(SPCI_RUN_32);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(SPCI_MSG_SEND_DIRECT_RESP_32);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(SPCI_MSG_SEND_DIRECT_REQ_32);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(SPCI_MSG_SEND_DIRECT_REQ_32);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+
+ ret = spci_features(SPCI_MSG_SEND_DIRECT_RESP_32);
+ EXPECT_SPCI_ERROR(ret, SPCI_NOT_SUPPORTED);
+}
+
+/**
+ * Test that floating-point operations work in the primary VM.
+ */
+TEST(fp, fp)
+{
+ /*
+ * Get some numbers that the compiler can't tell are constants, so it
+ * can't optimise them away.
+ */
+ double a = hf_vm_get_count();
+ double b = hf_vcpu_get_count(HF_PRIMARY_VM_ID);
+ double result = a * b;
+ dlog("VM count: %d\n", hf_vm_get_count());
+ dlog("vCPU count: %d\n", hf_vcpu_get_count(HF_PRIMARY_VM_ID));
+ dlog("result: %d\n", (int)result);
+ EXPECT_TRUE(a == 1.0);
+ EXPECT_TRUE(b == 8.0);
+ EXPECT_TRUE(result == 8.0);
+}
diff --git a/test/vmapi/primary_with_secondaries/BUILD.gn b/test/vmapi/primary_with_secondaries/BUILD.gn
new file mode 100644
index 0000000..ee3ab92
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/BUILD.gn
@@ -0,0 +1,74 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+
+config("config") {
+ include_dirs = [ "inc" ]
+}
+
+# Tests with secondary VMs.
+vm_kernel("primary_with_secondaries_test_vm") {
+ testonly = true
+ public_configs = [
+ ":config",
+ "//src/arch/aarch64:config",
+ ]
+
+ sources = [
+ "boot.c",
+ "debug_el1.c",
+ "floating_point.c",
+ "interrupts.c",
+ "mailbox.c",
+ "memory_sharing.c",
+ "no_services.c",
+ "perfmon.c",
+ "run_race.c",
+ "smp.c",
+ "spci.c",
+ "sysregs.c",
+ "unmapped.c",
+ ]
+
+ deps = [
+ "//src/arch/aarch64/hftest:registers",
+ "//test/hftest:hftest_primary_vm",
+ "//test/vmapi/common",
+ "//vmlib",
+ ]
+}
+
+initrd("primary_with_secondaries_test") {
+ testonly = true
+
+ manifest = "manifest.dts"
+
+ primary_name = "primary_with_secondaries_test"
+ primary_vm = ":primary_with_secondaries_test_vm"
+ secondary_vms = [
+ [
+ "services1",
+ "services:service_vm1",
+ ],
+ [
+ "services2",
+ "services:service_vm2",
+ ],
+ [
+ "services3",
+ "services:service_vm3",
+ ],
+ ]
+}
diff --git a/test/vmapi/primary_with_secondaries/boot.c b/test/vmapi/primary_with_secondaries/boot.c
new file mode 100644
index 0000000..e098538
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/boot.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/dlog.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/exception_handler.h"
+#include "test/vmapi/spci.h"
+
+/**
+ * The VM gets its memory size on boot, and can access it all.
+ */
+TEST(boot, memory_size)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "boot_memory", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/**
+ * Accessing memory outside the given range traps the VM and yields.
+ */
+TEST(boot, beyond_memory_size)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "boot_memory_overrun", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Accessing memory before the start of the image traps the VM and yields.
+ */
+TEST(boot, memory_before_image)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "boot_memory_underrun", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
diff --git a/test/vmapi/primary_with_secondaries/debug_el1.c b/test/vmapi/primary_with_secondaries/debug_el1.c
new file mode 100644
index 0000000..d7d3df1
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/debug_el1.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "primary_with_secondary.h"
+#include "sysregs.h"
+#include "test/vmapi/spci.h"
+
+/**
+ * QEMU does not properly handle the trapping of certain system register
+ * accesses. This was fixed in a custom local build that we could use. If not
+ * using that build, limit testing to the subset QEMU handles correctly.
+ */
+#define CUSTOM_QEMU_BUILD() 0
+
+TEST(debug_el1, secondary_basic)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "debug_el1_secondary_basic", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/**
+ * Attempts to access debug registers for read, without validating their value.
+ */
+TEST(debug_el1, primary_basic)
+{
+ EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID);
+
+ if (CUSTOM_QEMU_BUILD()) {
+ TRY_READ(DBGAUTHSTATUS_EL1);
+ TRY_READ(DBGCLAIMCLR_EL1);
+ TRY_READ(DBGCLAIMSET_EL1);
+ TRY_READ(DBGPRCR_EL1);
+ TRY_READ(OSDTRRX_EL1);
+ TRY_READ(OSDTRTX_EL1);
+ TRY_READ(OSECCR_EL1);
+
+ TRY_READ(DBGBCR2_EL1);
+ TRY_READ(DBGBCR3_EL1);
+ TRY_READ(DBGBCR4_EL1);
+ TRY_READ(DBGBCR5_EL1);
+ TRY_READ(DBGBVR2_EL1);
+ TRY_READ(DBGBVR3_EL1);
+ TRY_READ(DBGBVR4_EL1);
+ TRY_READ(DBGBVR5_EL1);
+ TRY_READ(DBGWCR2_EL1);
+ TRY_READ(DBGWCR3_EL1);
+ TRY_READ(DBGWVR2_EL1);
+ TRY_READ(DBGWVR3_EL1);
+ }
+
+ /* The following is the subset currently supported by QEMU. */
+ TRY_READ(MDCCINT_EL1);
+ TRY_READ(MDRAR_EL1);
+ TRY_READ(MDSCR_EL1);
+ TRY_READ(OSDLR_EL1);
+ TRY_READ(OSLSR_EL1);
+
+ TRY_READ(DBGBCR0_EL1);
+ TRY_READ(DBGBCR1_EL1);
+ TRY_READ(DBGBVR0_EL1);
+ TRY_READ(DBGBVR1_EL1);
+ TRY_READ(DBGWCR0_EL1);
+ TRY_READ(DBGWCR1_EL1);
+ TRY_READ(DBGWVR0_EL1);
+ TRY_READ(DBGWVR1_EL1);
+}
+
+/**
+ * Tests a few debug registers for read and write, and checks that the expected
+ * value is written/read.
+ */
+TEST(debug_el1, primary_read_write)
+{
+ EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID);
+
+ TRY_WRITE_READ(DBGBCR0_EL1, 0x2);
+ TRY_WRITE_READ(DBGBVR0_EL1, 0xf0);
+}
diff --git a/test/vmapi/primary_with_secondaries/floating_point.c b/test/vmapi/primary_with_secondaries/floating_point.c
new file mode 100644
index 0000000..1904d68
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/floating_point.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/std.h"
+#include "hf/arch/vm/registers.h"
+
+#include "hf/spci.h"
+
+#include "vmapi/hf/call.h"
+
+#include "../msr.h"
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+/**
+ * Test that floating point registers are saved and restored by
+ * filling them with one value here and a different value in the
+ * service.
+ */
+TEST(floating_point, fp_fill)
+{
+ const double first = 1.2;
+ const double second = -2.3;
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ fill_fp_registers(first);
+ SERVICE_SELECT(SERVICE_VM1, "fp_fill", mb.send);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+ EXPECT_EQ(check_fp_register(first), true);
+
+ fill_fp_registers(second);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+ EXPECT_EQ(check_fp_register(second), true);
+}
+
+/**
+ * Test that the floating point control register is restored correctly
+ * on full context switch when needed by changing it in the service.
+ */
+TEST(floating_point, fp_fpcr)
+{
+ uintreg_t value = 0;
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ EXPECT_EQ(read_msr(fpcr), value);
+
+ SERVICE_SELECT(SERVICE_VM1, "fp_fpcr", mb.send);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+ EXPECT_EQ(read_msr(fpcr), value);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+ EXPECT_EQ(read_msr(fpcr), value);
+}
diff --git a/test/vmapi/primary_with_secondaries/inc/primary_with_secondary.h b/test/vmapi/primary_with_secondaries/inc/primary_with_secondary.h
new file mode 100644
index 0000000..ad8dcc8
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/inc/primary_with_secondary.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#define SERVICE_VM1 (HF_VM_ID_OFFSET + 1)
+#define SERVICE_VM2 (HF_VM_ID_OFFSET + 2)
+#define SERVICE_VM3 (HF_VM_ID_OFFSET + 3)
+
+#define SELF_INTERRUPT_ID 5
+#define EXTERNAL_INTERRUPT_ID_A 7
+#define EXTERNAL_INTERRUPT_ID_B 8
+#define EXTERNAL_INTERRUPT_ID_C 9
diff --git a/test/vmapi/primary_with_secondaries/interrupts.c b/test/vmapi/primary_with_secondaries/interrupts.c
new file mode 100644
index 0000000..dce89ab
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/interrupts.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+TEAR_DOWN(interrupts)
+{
+ EXPECT_SPCI_ERROR(spci_rx_release(), SPCI_DENIED);
+}
+
+/**
+ * Send a message to the interruptible VM, which will interrupt itself to send a
+ * response back.
+ */
+TEST(interrupts, interrupt_self)
+{
+ const char message[] = "Ping";
+ const char expected_response[] = "Got IRQ 05.";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "interruptible", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Set the message, echo it and wait for a response. */
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response));
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Inject an interrupt to the interrupt VM, which will send a message back.
+ * Repeat this twice to make sure it doesn't get into a bad state after the
+ * first one.
+ */
+TEST(interrupts, inject_interrupt_twice)
+{
+ const char expected_response[] = "Got IRQ 07.";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "interruptible", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Inject the interrupt and wait for a message. */
+ hf_interrupt_inject(SERVICE_VM1, 0, EXTERNAL_INTERRUPT_ID_A);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response));
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Inject the interrupt again, and wait for the same message. */
+ hf_interrupt_inject(SERVICE_VM1, 0, EXTERNAL_INTERRUPT_ID_A);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response));
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Inject two different interrupts to the interrupt VM, which will send a
+ * message back each time.
+ */
+TEST(interrupts, inject_two_interrupts)
+{
+ const char expected_response[] = "Got IRQ 07.";
+ const char expected_response_2[] = "Got IRQ 08.";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "interruptible", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Inject the interrupt and wait for a message. */
+ hf_interrupt_inject(SERVICE_VM1, 0, EXTERNAL_INTERRUPT_ID_A);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response));
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Inject a different interrupt and wait for a different message. */
+ hf_interrupt_inject(SERVICE_VM1, 0, EXTERNAL_INTERRUPT_ID_B);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response_2));
+ EXPECT_EQ(memcmp(mb.recv, expected_response_2,
+ sizeof(expected_response_2)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Inject an interrupt then send a message to the interrupt VM, which will send
+ * a message back each time. This is to test that interrupt injection doesn't
+ * interfere with message reception.
+ */
+TEST(interrupts, inject_interrupt_message)
+{
+ const char expected_response[] = "Got IRQ 07.";
+ const char message[] = "Ping";
+ const char expected_response_2[] = "Got IRQ 05.";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "interruptible", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Inject the interrupt and wait for a message. */
+ hf_interrupt_inject(SERVICE_VM1, 0, EXTERNAL_INTERRUPT_ID_A);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response));
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Now send a message to the secondary. */
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response_2));
+ EXPECT_EQ(memcmp(mb.recv, expected_response_2,
+ sizeof(expected_response_2)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Inject an interrupt which the target VM has not enabled, and then send a
+ * message telling it to enable that interrupt ID. It should then (and only
+ * then) send a message back.
+ */
+TEST(interrupts, inject_interrupt_disabled)
+{
+ const char expected_response[] = "Got IRQ 09.";
+ const char message[] = "Enable interrupt C";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "interruptible", mb.send);
+
+ /* Inject the interrupt and expect not to get a message. */
+ hf_interrupt_inject(SERVICE_VM1, 0, EXTERNAL_INTERRUPT_ID_C);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /*
+ * Now send a message to the secondary to enable the interrupt ID, and
+ * expect the response from the interrupt we sent before.
+ */
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response));
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * If a secondary VM has an enabled and pending interrupt, even if interrupts
+ * are disabled globally via PSTATE, then hf_mailbox_receive should not block
+ * even if `block` is true.
+ */
+TEST(interrupts, pending_interrupt_no_blocking_receive)
+{
+ const char expected_response[] = "Done waiting";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "receive_block", mb.send);
+
+ /*
+ * Inject the interrupt and run the VM. It should disable interrupts
+ * globally, enable the specific interrupt, and then send us a message
+ * back after failing to receive a message a few times.
+ */
+ hf_interrupt_inject(SERVICE_VM1, 0, EXTERNAL_INTERRUPT_ID_A);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response));
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * If a secondary VM has an enabled and pending interrupt, even if interrupts
+ * are disabled globally via PSTATE, then WFI should be treated as a no-op and
+ * not return to the primary.
+ */
+TEST(interrupts, pending_interrupt_wfi_not_trapped)
+{
+ const char expected_response[] = "Done waiting";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "wfi", mb.send);
+
+ /*
+ * Inject the interrupt and run the VM. It should disable interrupts
+ * globally, enable the specific interrupt, and then send us a message
+ * back after running WFI a few times.
+ */
+ hf_interrupt_inject(SERVICE_VM1, 0, EXTERNAL_INTERRUPT_ID_A);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response));
+ EXPECT_EQ(memcmp(mb.recv, expected_response, sizeof(expected_response)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/*
+ * Deliver an interrupt and a message to the same vCPU and check that both are
+ * delivered the next time the vCPU is run.
+ */
+TEST(interrupts, deliver_interrupt_and_message)
+{
+ const char message[] = "I\'ll see you again.";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "interruptible_echo", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ hf_interrupt_inject(SERVICE_VM1, 0, EXTERNAL_INTERRUPT_ID_A);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(message));
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
diff --git a/test/vmapi/primary_with_secondaries/mailbox.c b/test/vmapi/primary_with_secondaries/mailbox.c
new file mode 100644
index 0000000..b96c595
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/mailbox.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "hf/spci.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+/**
+ * Reverses the order of the elements in the given array.
+ */
+static void reverse(char *s, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len / 2; i++) {
+ char t = s[i];
+ s[i] = s[len - 1 - i];
+ s[len - 1 - i] = t;
+ }
+}
+
+/**
+ * Finds the next lexicographic permutation of the given array, if there is one.
+ */
+static void next_permutation(char *s, size_t len)
+{
+ size_t i;
+ size_t j;
+
+ for (i = len - 2; i < len; i--) {
+ const char t = s[i];
+ if (t >= s[i + 1]) {
+ continue;
+ }
+
+ for (j = len - 1; t >= s[j]; j--) {
+ }
+
+ s[i] = s[j];
+ s[j] = t;
+ reverse(s + i + 1, len - i - 1);
+ return;
+ }
+}
+
+TEAR_DOWN(mailbox)
+{
+ EXPECT_SPCI_ERROR(spci_rx_release(), SPCI_DENIED);
+}
+
+/**
+ * Clearing an empty mailbox is an error.
+ */
+TEST(mailbox, clear_empty)
+{
+ EXPECT_SPCI_ERROR(spci_rx_release(), SPCI_DENIED);
+ EXPECT_SPCI_ERROR(spci_rx_release(), SPCI_DENIED);
+ EXPECT_SPCI_ERROR(spci_rx_release(), SPCI_DENIED);
+}
+
+/**
+ * Send and receive the same message from the echo VM.
+ */
+TEST(mailbox, echo)
+{
+ const char message[] = "Echo this back to me!";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "echo", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Set the message, echo it and check it didn't change. */
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(message));
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Repeatedly send a message and receive it back from the echo VM.
+ */
+TEST(mailbox, repeated_echo)
+{
+ char message[] = "Echo this back to me!";
+ struct spci_value run_res;
+ uint8_t i;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "echo", mb.send);
+
+ for (i = 0; i < 100; i++) {
+ /* Run secondary until it reaches the wait for messages. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Set the message, echo it and check it didn't change. */
+ next_permutation(message, sizeof(message) - 1);
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message,
+ sizeof(message));
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1,
+ sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(message));
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ }
+}
+
+/**
+ * Send a message to relay_a which will forward it to relay_b where it will be
+ * sent back here.
+ */
+TEST(mailbox, relay)
+{
+ const char message[] = "Send this round the relay!";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "relay", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "relay", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+ run_res = spci_run(SERVICE_VM2, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /*
+ * Build the message chain so the message is sent from here to
+ * SERVICE_VM1, then to SERVICE_VM2 and finally back to here.
+ */
+ {
+ spci_vm_id_t *chain = (spci_vm_id_t *)mb.send;
+ *chain++ = htole32(SERVICE_VM2);
+ *chain++ = htole32(HF_PRIMARY_VM_ID);
+ memcpy_s(chain,
+ SPCI_MSG_PAYLOAD_MAX - (2 * sizeof(spci_vm_id_t)),
+ message, sizeof(message));
+
+ EXPECT_EQ(
+ spci_msg_send(
+ HF_PRIMARY_VM_ID, SERVICE_VM1,
+ sizeof(message) + (2 * sizeof(spci_vm_id_t)), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ }
+
+ /* Let SERVICE_VM1 forward the message. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_receiver(run_res), SERVICE_VM2);
+ EXPECT_EQ(spci_msg_send_size(run_res), 0);
+
+ /* Let SERVICE_VM2 forward the message. */
+ run_res = spci_run(SERVICE_VM2, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+
+ /* Ensure the message is intact. */
+ EXPECT_EQ(spci_msg_send_receiver(run_res), HF_PRIMARY_VM_ID);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(message));
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Send a message before the secondary VM is configured, but do not register
+ * for notification. Ensure we're not notified.
+ */
+TEST(mailbox, no_primary_to_secondary_notification_on_configure)
+{
+ struct spci_value run_res;
+
+ set_up_mailbox();
+
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, 0, 0),
+ SPCI_BUSY);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, 0, 0).func,
+ SPCI_SUCCESS_32);
+}
+
+/**
+ * Send a message before the secondary VM is configured, and receive a
+ * notification when it configures.
+ */
+TEST(mailbox, secondary_to_primary_notification_on_configure)
+{
+ struct spci_value run_res;
+
+ set_up_mailbox();
+
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, 0,
+ SPCI_MSG_SEND_NOTIFY),
+ SPCI_BUSY);
+
+ /*
+ * Run first VM for it to configure itself. It should result in
+ * notifications having to be issued.
+ */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_RX_RELEASE_32);
+
+ /* A single waiter is returned. */
+ EXPECT_EQ(hf_mailbox_waiter_get(SERVICE_VM1), HF_PRIMARY_VM_ID);
+ EXPECT_EQ(hf_mailbox_waiter_get(SERVICE_VM1), -1);
+
+ /* Send should now succeed. */
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, 0, 0).func,
+ SPCI_SUCCESS_32);
+}
+
+/**
+ * Causes secondary VM to send two messages to primary VM. The second message
+ * will reach the mailbox while it's not writable. Checks that notifications are
+ * properly delivered when mailbox is cleared.
+ */
+TEST(mailbox, primary_to_secondary)
+{
+ char message[] = "not ready echo";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "echo_with_notification", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Send a message to echo service, and get response back. */
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(message));
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
+
+ /* Let secondary VM continue running so that it will wait again. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Without clearing our mailbox, send message again. */
+ reverse(message, strnlen_s(message, sizeof(message)));
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+
+ /* Message should be dropped since the mailbox was not cleared. */
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, HF_SPCI_RUN_WAIT_FOR_INTERRUPT);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Clear the mailbox. We expect to be told there are pending waiters. */
+ EXPECT_EQ(spci_rx_release().func, SPCI_RX_RELEASE_32);
+
+ /* Retrieve a single waiter. */
+ EXPECT_EQ(hf_mailbox_waiter_get(HF_PRIMARY_VM_ID), SERVICE_VM1);
+ EXPECT_EQ(hf_mailbox_waiter_get(HF_PRIMARY_VM_ID), -1);
+
+ /*
+ * Inject interrupt into VM and let it run again. We should receive
+ * the echoed message.
+ */
+ EXPECT_EQ(
+ hf_interrupt_inject(SERVICE_VM1, 0, HF_MAILBOX_WRITABLE_INTID),
+ 1);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(message));
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * Sends two messages to secondary VM without letting it run, so second message
+ * won't go through. Ensure that a notification is delivered when secondary VM
+ * clears the mailbox.
+ */
+TEST(mailbox, secondary_to_primary_notification)
+{
+ const char message[] = "not ready echo";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "echo_with_notification", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+
+ /* Send a message to echo service twice. The second should fail. */
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1,
+ sizeof(message), SPCI_MSG_SEND_NOTIFY),
+ SPCI_BUSY);
+
+ /* Receive a reply for the first message. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(message));
+ EXPECT_EQ(memcmp(mb.recv, message, sizeof(message)), 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Run VM again so that it clears its mailbox. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_RX_RELEASE_32);
+
+ /* Retrieve a single waiter. */
+ EXPECT_EQ(hf_mailbox_waiter_get(SERVICE_VM1), HF_PRIMARY_VM_ID);
+ EXPECT_EQ(hf_mailbox_waiter_get(SERVICE_VM1), -1);
+
+ /* Send should now succeed. */
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+}
diff --git a/test/vmapi/primary_with_secondaries/manifest.dts b/test/vmapi/primary_with_secondaries/manifest.dts
new file mode 100644
index 0000000..3d9a73a
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/manifest.dts
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ hypervisor {
+ compatible = "hafnium,hafnium";
+ vm1 {
+ debug_name = "primary_with_secondaries_test";
+ kernel_filename = "primary_with_secondaries_test";
+ };
+
+ vm2 {
+ debug_name = "services1";
+ vcpu_count = <1>;
+ mem_size = <0x100000>;
+ kernel_filename = "services1";
+ };
+
+ vm3 {
+ debug_name = "services2";
+ vcpu_count = <1>;
+ mem_size = <0x100000>;
+ kernel_filename = "services2";
+ };
+
+ vm4 {
+ debug_name = "services3";
+ vcpu_count = <2>;
+ mem_size = <0x100000>;
+ kernel_filename = "services3";
+ };
+ };
+};
diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c
new file mode 100644
index 0000000..fd742f0
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/memory_sharing.c
@@ -0,0 +1,1938 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "hf/mm.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/exception_handler.h"
+#include "test/vmapi/spci.h"
+
+alignas(PAGE_SIZE) static uint8_t pages[4 * PAGE_SIZE];
+
+/**
+ * Helper function to test sending memory in the different configurations.
+ */
+static void check_cannot_send_memory(
+ struct mailbox_buffers mb, uint32_t mode,
+ struct spci_memory_region_constituent constituents[],
+ int constituent_count, int32_t avoid_vm)
+
+{
+ enum spci_memory_access access[] = {SPCI_MEMORY_RO_NX, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_RW_NX,
+ SPCI_MEMORY_RW_X};
+ enum spci_memory_cacheability cacheability[] = {
+ SPCI_MEMORY_CACHE_NON_CACHEABLE,
+ SPCI_MEMORY_CACHE_WRITE_THROUGH, SPCI_MEMORY_CACHE_WRITE_BACK};
+ enum spci_memory_cacheability device[] = {
+ SPCI_MEMORY_DEV_NGNRNE, SPCI_MEMORY_DEV_NGNRE,
+ SPCI_MEMORY_DEV_NGRE, SPCI_MEMORY_DEV_GRE};
+ enum spci_memory_shareability shareability[] = {
+ SPCI_MEMORY_SHARE_NON_SHAREABLE, SPCI_MEMORY_RESERVED,
+ SPCI_MEMORY_OUTER_SHAREABLE, SPCI_MEMORY_INNER_SHAREABLE};
+ uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM1, SERVICE_VM2};
+
+ size_t i = 0;
+ size_t j = 0;
+ size_t k = 0;
+ size_t l = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+ /* Optionally skip one VM as the send would succeed. */
+ if (vms[i] == avoid_vm) {
+ continue;
+ }
+ for (j = 0; j < ARRAY_SIZE(access); ++j) {
+ for (k = 0; k < ARRAY_SIZE(shareability); ++k) {
+ for (l = 0; l < ARRAY_SIZE(cacheability); ++l) {
+ uint32_t msg_size =
+ spci_memory_region_init(
+ mb.send,
+ HF_PRIMARY_VM_ID,
+ vms[i], constituents,
+ constituent_count, 0, 0,
+ access[j],
+ SPCI_MEMORY_NORMAL_MEM,
+ cacheability[l],
+ shareability[k]);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID,
+ vms[i], msg_size,
+ mode),
+ SPCI_INVALID_PARAMETERS);
+ }
+ for (l = 0; l < ARRAY_SIZE(device); ++l) {
+ uint32_t msg_size =
+ spci_memory_region_init(
+ mb.send,
+ HF_PRIMARY_VM_ID,
+ vms[i], constituents,
+ constituent_count, 0, 0,
+ access[j],
+ SPCI_MEMORY_DEVICE_MEM,
+ device[l],
+ shareability[k]);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID,
+ vms[i], msg_size,
+ mode),
+ SPCI_INVALID_PARAMETERS);
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Helper function to test lending memory in the different configurations.
+ */
+static void check_cannot_lend_memory(
+ struct mailbox_buffers mb,
+ struct spci_memory_region_constituent constituents[],
+ int constituent_count, int32_t avoid_vm)
+
+{
+ check_cannot_send_memory(mb, SPCI_MSG_SEND_LEGACY_MEMORY_LEND,
+ constituents, constituent_count, avoid_vm);
+}
+
+/**
+ * Helper function to test sharing memory in the different configurations.
+ */
+static void check_cannot_share_memory(
+ struct mailbox_buffers mb,
+ struct spci_memory_region_constituent constituents[],
+ int constituent_count, int32_t avoid_vm)
+
+{
+ check_cannot_send_memory(mb, SPCI_MSG_SEND_LEGACY_MEMORY_SHARE,
+ constituents, constituent_count, avoid_vm);
+}
+
+/**
+ * Tries donating memory in available modes with different VMs and asserts that
+ * it will fail to all except the supplied VM ID as this would succeed if it
+ * is the only borrower.
+ */
+static void check_cannot_donate_memory(
+ struct mailbox_buffers mb,
+ struct spci_memory_region_constituent constituents[],
+ int constituent_count, int32_t avoid_vm)
+{
+ uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM1, SERVICE_VM2};
+
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+ uint32_t msg_size;
+ /* Optionally skip one VM as the donate would succeed. */
+ if (vms[i] == avoid_vm) {
+ continue;
+ }
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, vms[i], constituents,
+ constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, vms[i], msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE),
+ SPCI_INVALID_PARAMETERS);
+ }
+}
+
+/**
+ * Tries relinquishing memory with different VMs and asserts that
+ * it will fail.
+ */
+static void check_cannot_relinquish_memory(
+ struct mailbox_buffers mb,
+ struct spci_memory_region_constituent constituents[],
+ int constituent_count)
+{
+ uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM1, SERVICE_VM2};
+
+ size_t i;
+ size_t j;
+ for (i = 0; i < ARRAY_SIZE(vms); ++i) {
+ for (j = 0; j < ARRAY_SIZE(vms); ++j) {
+ uint32_t msg_size = spci_memory_region_init(
+ mb.send, vms[j], vms[i], constituents,
+ constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(
+ vms[j], vms[i], msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH),
+ SPCI_INVALID_PARAMETERS);
+ }
+ }
+}
+
+TEAR_DOWN(memory_sharing)
+{
+ EXPECT_SPCI_ERROR(spci_rx_release(), SPCI_DENIED);
+}
+
+/**
+ * Sharing memory concurrently gives both VMs access to the memory so it can be
+ * used for communication.
+ */
+TEST(memory_sharing, concurrent)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ SERVICE_SELECT(SERVICE_VM1, "memory_increment", mb.send);
+
+ memset_s(ptr, sizeof(pages), 'a', PAGE_SIZE);
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, SPCI_MEMORY_REGION_FLAG_CLEAR,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ pages[i] = i;
+ }
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ uint8_t value = i + 1;
+
+ EXPECT_EQ(pages[i], value);
+ }
+}
+
+/**
+ * Memory shared concurrently can be returned to the owner.
+ */
+TEST(memory_sharing, share_concurrently_and_get_back)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
+
+ /* Dirty the memory before sharing it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be returned. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 'c');
+ }
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Device address space cannot be shared, only normal memory.
+ */
+TEST(memory_sharing, cannot_share_device_memory)
+{
+ struct mailbox_buffers mb = set_up_mailbox();
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = PAGE_SIZE, .page_count = 1},
+ };
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_memory_return", mb.send);
+
+ check_cannot_lend_memory(mb, constituents, ARRAY_SIZE(constituents),
+ -1);
+ check_cannot_share_memory(mb, constituents, ARRAY_SIZE(constituents),
+ -1);
+ check_cannot_donate_memory(mb, constituents, ARRAY_SIZE(constituents),
+ -1);
+}
+
+/**
+ * Check that memory can be lent and is accessible by both parties.
+ */
+TEST(memory_sharing, lend_relinquish)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE, .page_count = 2},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+
+ /* Let the memory be returned. */
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Ensure that the secondary VM accessed the region. */
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 'c');
+ }
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Check that memory that is donated can't be relinquished.
+ */
+TEST(memory_sharing, donate_relinquish)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_donate_relinquish", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE, .page_count = 2},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /*
+ * Let the service access the memory, and try and fail to relinquish it.
+ */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/**
+ * Memory given away can be given back.
+ */
+TEST(memory_sharing, give_and_get_back)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
+
+ /* Dirty the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be returned. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 'c');
+ }
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Memory that has been lent can be returned to the owner.
+ */
+TEST(memory_sharing, lend_and_get_back)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
+
+ /* Dirty the memory before lending it. */
+ memset_s(ptr, sizeof(pages), 'c', PAGE_SIZE);
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be returned. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 'd');
+ }
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * After memory has been returned, it is free to be shared again.
+ */
+TEST(memory_sharing, reshare_after_return)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint32_t msg_size;
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
+
+ /* Share the memory initially. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be returned. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Share the memory again after it has been returned. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Observe the service doesn't fault when accessing the memory. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+}
+
+/**
+ * After memory has been returned, it is free to be shared with another VM.
+ */
+TEST(memory_sharing, share_elsewhere_after_return)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint32_t msg_size;
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_memory_lend_relinquish", mb.send);
+
+ /* Share the memory initially. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be returned. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Share the memory with a different VM after it has been returned. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * After memory has been given, it is no longer accessible by the sharing VM.
+ */
+TEST(memory_sharing, give_memory_and_lose_access)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ struct spci_memory_region *memory_region;
+ struct spci_memory_region_constituent *constituents;
+ uint8_t *ptr;
+
+ SERVICE_SELECT(SERVICE_VM1, "give_memory_and_fault", mb.send);
+
+ /* Have the memory be given. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(run_res),
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE);
+
+ /* Check the memory was cleared. */
+ memory_region = (struct spci_memory_region *)mb.recv;
+ constituents = spci_memory_region_get_constituents(memory_region);
+ ptr = (uint8_t *)constituents[0].address;
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 0);
+ }
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * After memory has been lent, it is no longer accessible by the sharing VM.
+ */
+TEST(memory_sharing, lend_memory_and_lose_access)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ struct spci_memory_region *memory_region;
+ struct spci_memory_region_constituent *constituents;
+ uint8_t *ptr;
+
+ SERVICE_SELECT(SERVICE_VM1, "lend_memory_and_fault", mb.send);
+
+ /* Have the memory be lent. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(run_res),
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND);
+
+ /* Check the memory was cleared. */
+ memory_region = (struct spci_memory_region *)mb.recv;
+ constituents = spci_memory_region_get_constituents(memory_region);
+ ptr = (uint8_t *)constituents[0].address;
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 0);
+ }
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Verify past the upper bound of the donated region cannot be accessed.
+ */
+TEST(memory_sharing, donate_check_upper_bounds)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_donate_check_upper_bound", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', 4 * PAGE_SIZE);
+
+ /* Specify non-contiguous memory regions. */
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE * 2, .page_count = 1},
+ };
+
+ /*
+ * Specify that we want to test the first constituent of the donated
+ * memory region. This is utilised by the test service.
+ */
+ pages[0] = 0;
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+
+ /* Use different memory regions for verifying the second constituent. */
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE * 1;
+ constituents[1].address = (uint64_t)pages + PAGE_SIZE * 3;
+
+ /*
+ * Specify that we now want to test the second constituent of the
+ * donated memory region.
+ */
+ pages[PAGE_SIZE] = 1;
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Verify past the lower bound of the donated region cannot be accessed.
+ */
+TEST(memory_sharing, donate_check_lower_bounds)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_donate_check_lower_bound", mb.send);
+
+ /* Initialise the memory before donating it. */
+ memset_s(ptr, sizeof(pages), 'b', 4 * PAGE_SIZE);
+
+ /* Specify non-contiguous memory regions. */
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE * 2, .page_count = 1},
+ };
+
+ /*
+ * Specify that we want to test the first constituent of the donated
+ * memory region. This is utilised by the test service.
+ */
+ pages[0] = 0;
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+
+ /* Use different memory regions for verifying the second constituent. */
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE * 1;
+ constituents[1].address = (uint64_t)pages + PAGE_SIZE * 3;
+
+ /*
+ * Specify that we now want to test the second constituent of the
+ * donated memory region.
+ */
+ pages[PAGE_SIZE] = 1;
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ /*
+ * NOTE: This generates two exceptions, one for the page fault, and one
+ * for accessing a region past the lower bound.
+ */
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 2);
+}
+
+/**
+ * After memory has been returned, it is free to be shared with another
+ * VM.
+ */
+TEST(memory_sharing, donate_elsewhere_after_return)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_memory_return", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', 1 * PAGE_SIZE);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+
+ /* Let the memory be returned. */
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Share the memory with another VM. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Check if memory can be donated between secondary VMs.
+ * Ensure that the memory can no longer be accessed by the first VM.
+ */
+TEST(memory_sharing, donate_vms)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_donate_secondary_and_fault", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_memory_receive", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', 1 * PAGE_SIZE);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ /* Set up VM2 to wait for message. */
+ run_res = spci_run(SERVICE_VM2, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_WAIT_32);
+
+ /* Donate memory. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be sent from VM1 to VM2. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_receiver(run_res), SERVICE_VM2);
+
+ /* Receive memory in VM2. */
+ run_res = spci_run(SERVICE_VM2, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /* Try to access memory in VM1. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+
+ /* Ensure that memory in VM2 remains the same. */
+ run_res = spci_run(SERVICE_VM2, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/**
+ * Check that memory is unable to be donated to multiple parties.
+ */
+TEST(memory_sharing, donate_twice)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_donate_twice", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_memory_receive", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', 1 * PAGE_SIZE);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ /* Donate memory to VM1. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be received. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /* Fail to share memory again with any VM. */
+ check_cannot_share_memory(mb, constituents, ARRAY_SIZE(constituents),
+ -1);
+ check_cannot_lend_memory(mb, constituents, ARRAY_SIZE(constituents),
+ -1);
+ check_cannot_donate_memory(mb, constituents, ARRAY_SIZE(constituents),
+ -1);
+ /* Fail to relinquish memory from any VM. */
+ check_cannot_relinquish_memory(mb, constituents,
+ ARRAY_SIZE(constituents));
+
+ /* Let the memory be sent from VM1 to PRIMARY (returned). */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Check we have access again. */
+ ptr[0] = 'f';
+
+ /* Try and fail to donate memory from VM1 to VM2. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/**
+ * Check cannot donate to self.
+ */
+TEST(memory_sharing, donate_to_self)
+{
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE),
+ SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * Check cannot lend to self.
+ */
+TEST(memory_sharing, lend_to_self)
+{
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND),
+ SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * Check cannot share to self.
+ */
+TEST(memory_sharing, share_to_self)
+{
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE),
+ SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * Check cannot donate from alternative VM.
+ */
+TEST(memory_sharing, donate_invalid_source)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_donate_invalid_source", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_memory_receive", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ /* Try invalid configurations. */
+ msg_size = spci_memory_region_init(
+ mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM1, HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE),
+ SPCI_INVALID_PARAMETERS);
+
+ msg_size = spci_memory_region_init(
+ mb.send, SERVICE_VM1, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM1, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE),
+ SPCI_INVALID_PARAMETERS);
+
+ msg_size = spci_memory_region_init(
+ mb.send, SERVICE_VM2, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM2, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE),
+ SPCI_INVALID_PARAMETERS);
+
+ /* Successfully donate to VM1. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Receive and return memory from VM1. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Use VM1 to fail to donate memory from the primary to VM2. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/**
+ * Check that unaligned addresses can not be shared.
+ */
+TEST(memory_sharing, give_and_get_back_unaligned)
+{
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
+
+ /* Check for unaligned pages for either constituent. */
+ for (int i = 0; i < PAGE_SIZE; i++) {
+ for (int j = 0; i < PAGE_SIZE; i++) {
+ /* Skip the case they're both aligned. */
+ if (i == 0 && j == 0) {
+ continue;
+ }
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages + i,
+ .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE + j,
+ .page_count = 1},
+ };
+ uint32_t msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
+ constituents, ARRAY_SIZE(constituents), 0, 0,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(
+ HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE),
+ SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1,
+ constituents, ARRAY_SIZE(constituents), 0, 0,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1,
+ msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND),
+ SPCI_INVALID_PARAMETERS);
+ }
+ }
+}
+
+/**
+ * Check cannot lend from alternative VM.
+ */
+TEST(memory_sharing, lend_invalid_source)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_lend_invalid_source", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ /* Check cannot swap VM IDs. */
+ msg_size = spci_memory_region_init(
+ mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_msg_send(SERVICE_VM1, HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND),
+ SPCI_INVALID_PARAMETERS);
+
+ /* Lend memory to VM1. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Receive and return memory from VM1. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Try to lend memory from primary in VM1. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/**
+ * Memory can be lent with executable permissions.
+ * Check RO and RW permissions.
+ */
+TEST(memory_sharing, lend_relinquish_X_RW)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /* Let service write to and return memory. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Re-initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Memory can be shared with executable permissions.
+ * Check RO and RW permissions.
+ */
+TEST(memory_sharing, share_relinquish_X_RW)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /* Ensure we still have access. */
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 'b');
+ ptr[i]++;
+ }
+
+ /* Let service write to and return memory. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Re-initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /* Ensure we still have access. */
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 'b');
+ ptr[i]++;
+ }
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Memory can be shared without executable permissions.
+ * Check RO and RW permissions.
+ */
+TEST(memory_sharing, share_relinquish_NX_RW)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_NX,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /* Ensure we still have access. */
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 'b');
+ }
+
+ /* Let service write to and return memory. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Re-initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE);
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_NX,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /* Ensure we still have access. */
+ for (int i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 'b');
+ ptr[i]++;
+ }
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Exercise execution permissions for lending memory.
+ */
+TEST(memory_sharing, lend_relinquish_RW_X)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_X", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 0, PAGE_SIZE);
+
+ uint64_t *ptr2 = (uint64_t *)pages;
+ /* Set memory to contain the RET instruction to attempt to execute. */
+ *ptr2 = 0xD65F03C0;
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Attempt to execute from memory. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_NX,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Exercise execution permissions for lending memory without write access.
+ */
+TEST(memory_sharing, lend_relinquish_RO_X)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_X", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 0, PAGE_SIZE);
+
+ uint64_t *ptr2 = (uint64_t *)pages;
+ /* Set memory to contain the RET instruction to attempt to execute. */
+ *ptr2 = 0xD65F03C0;
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Attempt to execute from memory. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_NX,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Memory can be lent, but then no part can be donated.
+ */
+TEST(memory_sharing, lend_donate)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_memory_lend_relinquish_RW", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages) * 2, 'b', PAGE_SIZE * 2);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 2},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /* Ensure we can't donate any sub section of memory to another VM. */
+ constituents[0].page_count = 1;
+ for (int i = 1; i < PAGE_SIZE * 2; i++) {
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE;
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE),
+ SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Ensure we can donate to the only borrower. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+}
+
+/**
+ * Memory can be shared, but then no part can be donated.
+ */
+TEST(memory_sharing, share_donate)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_memory_lend_relinquish_RW", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE * 4);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 2},
+ {.address = (uint64_t)pages + PAGE_SIZE * 2, .page_count = 2},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /* Attempt to share the same area of memory. */
+ check_cannot_share_memory(mb, constituents, ARRAY_SIZE(constituents),
+ SERVICE_VM1);
+
+ /* Ensure we can't donate any sub section of memory to another VM. */
+ constituents[0].page_count = 1;
+ for (int i = 1; i < PAGE_SIZE * 2; i++) {
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE;
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE),
+ SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Ensure we can donate to the only borrower. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+}
+
+/**
+ * Memory can be lent, but then no part can be lent again.
+ */
+TEST(memory_sharing, lend_twice)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_twice", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_memory_lend_twice", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages), 'b', PAGE_SIZE * 4);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 2},
+ {.address = (uint64_t)pages + PAGE_SIZE * 3, .page_count = 1},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /* Attempt to lend the same area of memory. */
+ check_cannot_lend_memory(mb, constituents, ARRAY_SIZE(constituents),
+ -1);
+ /* Attempt to share the same area of memory. */
+ check_cannot_share_memory(mb, constituents, ARRAY_SIZE(constituents),
+ -1);
+ /* Fail to donate to VM apart from VM1. */
+ check_cannot_donate_memory(mb, constituents, ARRAY_SIZE(constituents),
+ SERVICE_VM1);
+ /* Fail to relinquish from any VM. */
+ check_cannot_relinquish_memory(mb, constituents,
+ ARRAY_SIZE(constituents));
+
+ /* Now attempt to share only a portion of the same area of memory. */
+ struct spci_memory_region_constituent constituents_subsection[] = {
+ {.address = (uint64_t)pages + PAGE_SIZE * 3, .page_count = 1},
+ };
+ check_cannot_lend_memory(mb, constituents_subsection,
+ ARRAY_SIZE(constituents_subsection), -1);
+ check_cannot_donate_memory(mb, constituents_subsection,
+ ARRAY_SIZE(constituents_subsection),
+ SERVICE_VM1);
+ check_cannot_relinquish_memory(mb, constituents_subsection,
+ ARRAY_SIZE(constituents_subsection));
+
+ /* Attempt to lend again with different permissions. */
+ constituents[0].page_count = 1;
+ for (int i = 0; i < 2; i++) {
+ constituents[0].address = (uint64_t)pages + i * PAGE_SIZE;
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND),
+ SPCI_INVALID_PARAMETERS);
+ }
+}
+
+/**
+ * Memory can be shared, but then no part can be shared again.
+ */
+TEST(memory_sharing, share_twice)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_twice", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_memory_lend_twice", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages) * 2, 'b', PAGE_SIZE * 2);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 2},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Let the memory be accessed. */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+
+ /*
+ * Attempting to share or lend the same area of memory with any VM
+ * should fail.
+ */
+ check_cannot_share_memory(mb, constituents, ARRAY_SIZE(constituents),
+ -1);
+ check_cannot_lend_memory(mb, constituents, ARRAY_SIZE(constituents),
+ -1);
+ /* Fail to donate to VM apart from VM1. */
+ check_cannot_donate_memory(mb, constituents, ARRAY_SIZE(constituents),
+ SERVICE_VM1);
+ /* Fail to relinquish from any VM. */
+ check_cannot_relinquish_memory(mb, constituents,
+ ARRAY_SIZE(constituents));
+
+ /* Attempt to share again with different permissions. */
+ constituents[0].page_count = 1;
+ for (int i = 0; i < 2; i++) {
+ constituents[0].address = (uint64_t)pages + i * PAGE_SIZE;
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RO_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE),
+ SPCI_INVALID_PARAMETERS);
+ }
+}
+
+/**
+ * Memory can be cleared while being shared.
+ */
+TEST(memory_sharing, share_clear)
+{
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+ size_t i;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_memory_return", mb.send);
+
+ /* Initialise the memory before giving it. */
+ memset_s(ptr, sizeof(pages) * 2, 'b', PAGE_SIZE * 2);
+
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 2},
+ };
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, SPCI_MEMORY_REGION_FLAG_CLEAR,
+ SPCI_MEMORY_RO_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Check that it has been cleared. */
+ for (i = 0; i < PAGE_SIZE * 2; ++i) {
+ ASSERT_EQ(ptr[i], 0);
+ };
+}
+
+/**
+ * SPCI: Verify past the upper bound of the lent region cannot be accessed.
+ */
+TEST(memory_sharing, spci_lend_check_upper_bounds)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_lend_check_upper_bound", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_lend_check_upper_bound", mb.send);
+
+ /* Initialise the memory before lending it. */
+ memset_s(ptr, sizeof(pages), 'b', 4 * PAGE_SIZE);
+
+ /* Specify non-contiguous memory regions. */
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE * 2, .page_count = 1},
+ };
+
+ /*
+ * Specify that we want to test the first constituent of the donated
+ * memory region. This is utilised by the test service.
+ */
+ pages[0] = 0;
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+
+ /* Use different memory regions for verifying the second constituent. */
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE * 1;
+ constituents[1].address = (uint64_t)pages + PAGE_SIZE * 3;
+
+ /*
+ * Specify that we now want to test the second constituent of the
+ * lent memory region.
+ */
+ pages[PAGE_SIZE] = 1;
+
+ /* Use the secondary VM for this test as the first is now aborted. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM2, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * SPCI: Verify past the lower bound of the lent region cannot be accessed.
+ */
+TEST(memory_sharing, spci_lend_check_lower_bounds)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+ uint8_t *ptr = pages;
+ uint32_t msg_size;
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_lend_check_lower_bound", mb.send);
+ SERVICE_SELECT(SERVICE_VM2, "spci_lend_check_lower_bound", mb.send);
+
+ /* Initialise the memory before lending it. */
+ memset_s(ptr, sizeof(pages), 'b', 4 * PAGE_SIZE);
+
+ /* Specify non-contiguous memory regions. */
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)pages, .page_count = 1},
+ {.address = (uint64_t)pages + PAGE_SIZE * 2, .page_count = 1},
+ };
+
+ /*
+ * Specify that we want to test the first constituent of the lent
+ * memory region. This is utilised by the test service.
+ */
+ pages[0] = 0;
+
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM1, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+
+ /* Use different memory regions for verifying the second constituent. */
+ constituents[0].address = (uint64_t)pages + PAGE_SIZE * 1;
+ constituents[1].address = (uint64_t)pages + PAGE_SIZE * 3;
+
+ /*
+ * Specify that we now want to test the second constituent of the
+ * lent memory region.
+ */
+ pages[PAGE_SIZE] = 1;
+
+ /* Use the secondary VM for this test as the first is now aborted. */
+ msg_size = spci_memory_region_init(
+ mb.send, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM2, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
diff --git a/test/vmapi/primary_with_secondaries/no_services.c b/test/vmapi/primary_with_secondaries/no_services.c
new file mode 100644
index 0000000..2757df2
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/no_services.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdalign.h>
+#include <stdint.h>
+
+#include "hf/mm.h"
+#include "hf/static_assert.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
+static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
+static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page.");
+static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page.");
+
+static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
+static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
+
+/**
+ * Confirms the primary VM has the primary ID.
+ */
+TEST(hf_vm_get_id, primary_has_primary_id)
+{
+ EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID);
+}
+
+/**
+ * Confirm there are 3 secondary VMs as well as this primary VM.
+ */
+TEST(hf_vm_get_count, three_secondary_vms)
+{
+ EXPECT_EQ(hf_vm_get_count(), 4);
+}
+
+/**
+ * Confirm that secondary VM has 1 vCPU.
+ */
+TEST(hf_vcpu_get_count, secondary_has_one_vcpu)
+{
+ EXPECT_EQ(hf_vcpu_get_count(SERVICE_VM1), 1);
+}
+
+/**
+ * Confirm an error is returned when getting the vCPU count for a reserved ID.
+ */
+TEST(hf_vcpu_get_count, reserved_vm_id)
+{
+ spci_vm_id_t id;
+
+ for (id = 0; id < HF_VM_ID_OFFSET; ++id) {
+ EXPECT_EQ(hf_vcpu_get_count(id), 0);
+ }
+}
+
+/**
+ * Confirm it is an error to query how many vCPUs are assigned to a nonexistent
+ * secondary VM.
+ */
+TEST(hf_vcpu_get_count, large_invalid_vm_id)
+{
+ EXPECT_EQ(hf_vcpu_get_count(0xffff), 0);
+}
+
+/**
+ * The primary can't be run by the hypervisor.
+ */
+TEST(spci_run, cannot_run_primary)
+{
+ struct spci_value res = spci_run(HF_PRIMARY_VM_ID, 0);
+ EXPECT_SPCI_ERROR(res, SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * Can only run a VM that exists.
+ */
+TEST(spci_run, cannot_run_absent_secondary)
+{
+ struct spci_value res = spci_run(1234, 0);
+ EXPECT_SPCI_ERROR(res, SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * Can only run a vCPU that exists.
+ */
+TEST(spci_run, cannot_run_absent_vcpu)
+{
+ struct spci_value res = spci_run(SERVICE_VM1, 1234);
+ EXPECT_SPCI_ERROR(res, SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * The configured send/receive addresses can't be device memory.
+ */
+TEST(spci_rxtx_map, fails_with_device_memory)
+{
+ EXPECT_SPCI_ERROR(spci_rxtx_map(PAGE_SIZE, PAGE_SIZE * 2), SPCI_DENIED);
+}
+
+/**
+ * The configured send/receive addresses can't be unaligned.
+ */
+TEST(spci_rxtx_map, fails_with_unaligned_pointer)
+{
+ uint8_t maybe_aligned[2];
+ hf_ipaddr_t unaligned_addr = (hf_ipaddr_t)&maybe_aligned[1];
+ hf_ipaddr_t aligned_addr = (hf_ipaddr_t)send_page;
+
+ /* Check that the address is unaligned. */
+ ASSERT_EQ(unaligned_addr & 1, 1);
+
+ EXPECT_SPCI_ERROR(spci_rxtx_map(aligned_addr, unaligned_addr),
+ SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(unaligned_addr, aligned_addr),
+ SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(unaligned_addr, unaligned_addr),
+ SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * The configured send/receive addresses can't be the same page.
+ */
+TEST(spci_rxtx_map, fails_with_same_page)
+{
+ EXPECT_SPCI_ERROR(spci_rxtx_map(send_page_addr, send_page_addr),
+ SPCI_INVALID_PARAMETERS);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(recv_page_addr, recv_page_addr),
+ SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * The configuration of the send/receive addresses can only happen once.
+ */
+TEST(spci_rxtx_map, fails_if_already_succeeded)
+{
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
+ EXPECT_SPCI_ERROR(spci_rxtx_map(send_page_addr, recv_page_addr),
+ SPCI_DENIED);
+}
+
+/**
+ * The configuration of the send/receive address is successful with valid
+ * arguments.
+ */
+TEST(spci_rxtx_map, succeeds)
+{
+ EXPECT_EQ(spci_rxtx_map(send_page_addr, recv_page_addr).func,
+ SPCI_SUCCESS_32);
+}
+
+/**
+ * The primary receives messages from spci_run().
+ */
+TEST(hf_mailbox_receive, cannot_receive_from_primary_blocking)
+{
+ struct spci_value res = spci_msg_wait();
+ EXPECT_NE(res.func, SPCI_SUCCESS_32);
+}
+
+/**
+ * The primary receives messages from spci_run().
+ */
+TEST(hf_mailbox_receive, cannot_receive_from_primary_non_blocking)
+{
+ struct spci_value res = spci_msg_poll();
+ EXPECT_NE(res.func, SPCI_SUCCESS_32);
+}
diff --git a/test/vmapi/primary_with_secondaries/perfmon.c b/test/vmapi/primary_with_secondaries/perfmon.c
new file mode 100644
index 0000000..ad43de9
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/perfmon.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../../src/arch/aarch64/hypervisor/perfmon.h"
+
+#include "../../src/arch/aarch64/hypervisor/sysregs.h"
+#include "primary_with_secondary.h"
+#include "sysregs.h"
+#include "test/vmapi/spci.h"
+
+TEST(perfmon, secondary_basic)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "perfmon_secondary_basic", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/**
+ * Attempts to access performance monitor registers for read, without validating
+ * their value.
+ */
+TEST(perfmon, primary_basic)
+{
+ EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID);
+
+ TRY_READ(PMCEID0_EL0);
+ TRY_READ(PMCEID1_EL0);
+ TRY_READ(PMCCFILTR_EL0);
+ TRY_READ(PMCR_EL0);
+}
+
+/**
+ * Tests a few performance counter registers for read and write, and checks that
+ * the expected value is written/read.
+ */
+TEST(perfmon, primary_read_write)
+{
+ uintreg_t pmcr_el0 = read_msr(PMCR_EL0);
+ uintreg_t perf_mon_count = GET_PMCR_EL0_N(pmcr_el0);
+
+ EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID);
+
+ /*
+ * Ensure that there are enough performance counters in the underlying
+ * uArch for this test to pass.
+ */
+ EXPECT_GE(perf_mon_count, 4);
+
+ TRY_WRITE_READ(PMCCNTR_EL0, 0xaaaa);
+
+ write_msr(PMINTENCLR_EL1, 0xffff);
+ CHECK_READ(PMINTENSET_EL1, 0);
+
+ /*
+ * Enable the first and second performance counters.
+ * Bits set in PMINTENSET_EL1 can be read in PMINTENCLR_EL1.
+ */
+ write_msr(PMINTENSET_EL1, 0x3);
+ CHECK_READ(PMINTENCLR_EL1, 0x3);
+
+ /*
+ * Enable the third and fourth performance counters.
+ * Writes to PMINTENSET_EL1 do not clear already set bits.
+ */
+ write_msr(PMINTENSET_EL1, 0xc);
+ CHECK_READ(PMINTENCLR_EL1, 0xf);
+}
+
+/**
+ * Attempts to read all performance counters supported by the current CPU
+ * configuration.
+ */
+/* NOLINTNEXTLINE(readability-function-size) */
+TEST(perfmon, primary_counters)
+{
+ uintreg_t pmcr_el0 = read_msr(PMCR_EL0);
+ uintreg_t perf_mon_count = GET_PMCR_EL0_N(pmcr_el0);
+
+ EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID);
+
+ if (perf_mon_count == 0) {
+ return;
+ }
+
+ switch (perf_mon_count - 1) {
+ default:
+ FAIL("More performance monitor registers than supported.");
+ case 30:
+ TRY_READ(PMEVCNTR30_EL0);
+ TRY_WRITE_READ(PMEVTYPER30_EL0, 0x1);
+ /* fallthrough */
+ case 29:
+ TRY_READ(PMEVCNTR29_EL0);
+ TRY_WRITE_READ(PMEVTYPER29_EL0, 0x1);
+ /* fallthrough */
+ case 28:
+ TRY_READ(PMEVCNTR28_EL0);
+ TRY_WRITE_READ(PMEVTYPER28_EL0, 0x1);
+ /* fallthrough */
+ case 27:
+ TRY_READ(PMEVCNTR27_EL0);
+ TRY_WRITE_READ(PMEVTYPER27_EL0, 0x1);
+ /* fallthrough */
+ case 26:
+ TRY_READ(PMEVCNTR26_EL0);
+ TRY_WRITE_READ(PMEVTYPER26_EL0, 0x1);
+ /* fallthrough */
+ case 25:
+ TRY_READ(PMEVCNTR25_EL0);
+ TRY_WRITE_READ(PMEVTYPER25_EL0, 0x1);
+ /* fallthrough */
+ case 24:
+ TRY_READ(PMEVCNTR24_EL0);
+ TRY_WRITE_READ(PMEVTYPER24_EL0, 0x1);
+ /* fallthrough */
+ case 23:
+ TRY_READ(PMEVCNTR23_EL0);
+ TRY_WRITE_READ(PMEVTYPER23_EL0, 0x1);
+ /* fallthrough */
+ case 22:
+ TRY_READ(PMEVCNTR22_EL0);
+ TRY_WRITE_READ(PMEVTYPER22_EL0, 0x1);
+ /* fallthrough */
+ case 21:
+ TRY_READ(PMEVCNTR21_EL0);
+ TRY_WRITE_READ(PMEVTYPER21_EL0, 0x1);
+ /* fallthrough */
+ case 20:
+ TRY_READ(PMEVCNTR20_EL0);
+ TRY_WRITE_READ(PMEVTYPER20_EL0, 0x1);
+ /* fallthrough */
+ case 19:
+ TRY_READ(PMEVCNTR19_EL0);
+ TRY_WRITE_READ(PMEVTYPER19_EL0, 0x1);
+ /* fallthrough */
+ case 18:
+ TRY_READ(PMEVCNTR18_EL0);
+ TRY_WRITE_READ(PMEVTYPER18_EL0, 0x1);
+ /* fallthrough */
+ case 17:
+ TRY_READ(PMEVCNTR17_EL0);
+ TRY_WRITE_READ(PMEVTYPER17_EL0, 0x1);
+ /* fallthrough */
+ case 16:
+ TRY_READ(PMEVCNTR16_EL0);
+ TRY_WRITE_READ(PMEVTYPER16_EL0, 0x1);
+ /* fallthrough */
+ case 15:
+ TRY_READ(PMEVCNTR15_EL0);
+ TRY_WRITE_READ(PMEVTYPER15_EL0, 0x1);
+ /* fallthrough */
+ case 14:
+ TRY_READ(PMEVCNTR14_EL0);
+ TRY_WRITE_READ(PMEVTYPER14_EL0, 0x1);
+ /* fallthrough */
+ case 13:
+ TRY_READ(PMEVCNTR13_EL0);
+ TRY_WRITE_READ(PMEVTYPER13_EL0, 0x1);
+ /* fallthrough */
+ case 12:
+ TRY_READ(PMEVCNTR12_EL0);
+ TRY_WRITE_READ(PMEVTYPER12_EL0, 0x1);
+ /* fallthrough */
+ case 11:
+ TRY_READ(PMEVCNTR11_EL0);
+ TRY_WRITE_READ(PMEVTYPER11_EL0, 0x1);
+ /* fallthrough */
+ case 10:
+ TRY_READ(PMEVCNTR10_EL0);
+ TRY_WRITE_READ(PMEVTYPER10_EL0, 0x1);
+ /* fallthrough */
+ case 9:
+ TRY_READ(PMEVCNTR9_EL0);
+ TRY_WRITE_READ(PMEVTYPER9_EL0, 0x1);
+ /* fallthrough */
+ case 8:
+ TRY_READ(PMEVCNTR8_EL0);
+ TRY_WRITE_READ(PMEVTYPER8_EL0, 0x1);
+ /* fallthrough */
+ case 7:
+ TRY_READ(PMEVCNTR7_EL0);
+ TRY_WRITE_READ(PMEVTYPER7_EL0, 0x1);
+ /* fallthrough */
+ case 6:
+ TRY_READ(PMEVCNTR6_EL0);
+ TRY_WRITE_READ(PMEVTYPER6_EL0, 0x1);
+ /* fallthrough */
+ case 5:
+ TRY_READ(PMEVCNTR5_EL0);
+ TRY_WRITE_READ(PMEVTYPER5_EL0, 0x1);
+ /* fallthrough */
+ case 4:
+ TRY_READ(PMEVCNTR4_EL0);
+ TRY_WRITE_READ(PMEVTYPER4_EL0, 0x1);
+ /* fallthrough */
+ case 3:
+ TRY_READ(PMEVCNTR3_EL0);
+ TRY_WRITE_READ(PMEVTYPER3_EL0, 0x1);
+ /* fallthrough */
+ case 2:
+ TRY_READ(PMEVCNTR2_EL0);
+ TRY_WRITE_READ(PMEVTYPER2_EL0, 0x1);
+ /* fallthrough */
+ case 1:
+ TRY_READ(PMEVCNTR1_EL0);
+ TRY_WRITE_READ(PMEVTYPER1_EL0, 0x1);
+ /* fallthrough */
+ case 0:
+ TRY_READ(PMEVCNTR0_EL0);
+ TRY_WRITE_READ(PMEVTYPER0_EL0, 0x1);
+ break;
+ }
+}
diff --git a/test/vmapi/primary_with_secondaries/run_race.c b/test/vmapi/primary_with_secondaries/run_race.c
new file mode 100644
index 0000000..222f17a
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/run_race.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdalign.h>
+#include <stdint.h>
+
+#include "hf/arch/vm/power_mgmt.h"
+
+#include "hf/mm.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+/**
+ * Iterates trying to run vCPU of the secondary VM. Returns when a message
+ * of non-zero length is received.
+ */
+static bool run_loop(struct mailbox_buffers *mb)
+{
+ struct spci_value run_res;
+ bool ok = false;
+
+ for (;;) {
+ /* Run until it manages to schedule vCPU on this CPU. */
+ do {
+ run_res = spci_run(SERVICE_VM1, 0);
+ } while (run_res.func == SPCI_ERROR_32 &&
+ run_res.arg2 == SPCI_BUSY);
+
+ /* Break out if we received a message with non-zero length. */
+ if (run_res.func == SPCI_MSG_SEND_32 &&
+ spci_msg_send_size(run_res) != 0) {
+ break;
+ }
+
+ /* Clear mailbox so that next message can be received. */
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ }
+
+ /* Copies the contents of the received boolean to the return value. */
+ if (spci_msg_send_size(run_res) == sizeof(ok)) {
+ ok = *(bool *)mb->recv;
+ }
+
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ return ok;
+}
+
+/**
+ * This is the entry point of the additional primary VM vCPU. It just calls
+ * the run loop so that two CPUs compete for the chance to run a secondary VM.
+ */
+static void vm_cpu_entry(uintptr_t arg)
+{
+ run_loop((struct mailbox_buffers *)arg);
+}
+
+TEAR_DOWN(vcpu_state)
+{
+ EXPECT_SPCI_ERROR(spci_rx_release(), SPCI_DENIED);
+}
+
+/**
+ * This test tries to run the same secondary vCPU from two different physical
+ * CPUs concurrently. The vCPU checks that the state is ok while it bounces
+ * between the physical CPUs.
+ *
+ * Test is marked long-running because our implementation of spin-locks does not
+ * perform well under QEMU.
+ */
+TEST_LONG_RUNNING(vcpu_state, concurrent_save_restore)
+{
+ alignas(4096) static char stack[4096];
+ static struct mailbox_buffers mb;
+
+ mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "check_state", mb.send);
+
+ /* Start second vCPU. */
+ ASSERT_TRUE(hftest_cpu_start(hftest_get_cpu_id(1), stack, sizeof(stack),
+ vm_cpu_entry, (uintptr_t)&mb));
+
+ /* Run on a loop until the secondary VM is done. */
+ EXPECT_TRUE(run_loop(&mb));
+}
diff --git a/test/vmapi/primary_with_secondaries/services/BUILD.gn b/test/vmapi/primary_with_secondaries/services/BUILD.gn
new file mode 100644
index 0000000..d4a78c2
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/BUILD.gn
@@ -0,0 +1,265 @@
+# Copyright 2018 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/image/image.gni")
+
+# Service to expose race conditions when running a vCPU.
+source_set("check_state") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "check_state.c",
+ ]
+
+ deps = [
+ "//src/arch/aarch64/hftest:state",
+ ]
+}
+
+# Service to try to access EL1 debug registers.
+source_set("debug_el1") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "debug_el1.c",
+ ]
+}
+
+# Service to try to access performance monitor registers.
+source_set("perfmon") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "perfmon.c",
+ ]
+}
+
+# Service to listen for messages and echo them back to the sender.
+source_set("echo") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "echo.c",
+ ]
+}
+
+# Echo service that waits for recipient to become writable.
+source_set("echo_with_notification") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "echo_with_notification.c",
+ ]
+
+ deps = [
+ "//src/arch/aarch64/hftest:interrupts",
+ ]
+}
+
+# Service for floating point register save/restore checks.
+source_set("floating_point") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "floating_point.c",
+ ]
+
+ deps = [
+ "//src/arch/aarch64/hftest:registers",
+ ]
+}
+
+# Services related to memory sharing.
+source_set("memory") {
+ testonly = true
+ public_configs = [
+ "..:config",
+ "//test/hftest:hftest_config",
+ ]
+ deps = [
+ "//test/vmapi/common",
+ "//vmlib",
+ ]
+
+ sources = [
+ "memory.c",
+ ]
+}
+
+# Services related to VMs that access unmapped memory.
+source_set("unmapped") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "unmapped.c",
+ ]
+
+ deps = [
+ "//test/vmapi/common:common",
+ ]
+}
+
+# Services related to the boot process for VMs.
+source_set("boot") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "boot.c",
+ ]
+
+ deps = [
+ "//test/vmapi/common:common",
+ ]
+}
+
+# Service that can be interrupted.
+source_set("interruptible") {
+ testonly = true
+ public_configs = [
+ "..:config",
+ "//test/hftest:hftest_config",
+ ]
+
+ sources = [
+ "interruptible.c",
+ "interruptible_echo.c",
+ ]
+
+ deps = [
+ "//src/arch/aarch64/hftest:interrupts",
+ ]
+}
+
+# Service to check that hf_mailbox_receive can't block when there are pending
+# interrupts.
+source_set("receive_block") {
+ testonly = true
+ public_configs = [
+ "..:config",
+ "//test/hftest:hftest_config",
+ ]
+ sources = [
+ "receive_block.c",
+ ]
+ deps = [
+ "//src/arch/aarch64:arch",
+ "//src/arch/aarch64/hftest:interrupts",
+ "//test/vmapi/common",
+ ]
+}
+
+# Service to listen for messages and forward them on to another.
+source_set("relay") {
+ testonly = true
+ public_configs = [ "//test/hftest:hftest_config" ]
+
+ sources = [
+ "relay.c",
+ ]
+}
+
+# Service to start a second vCPU and send messages from both.
+source_set("smp") {
+ testonly = true
+ public_configs = [
+ "..:config",
+ "//test/hftest:hftest_config",
+ ]
+ sources = [
+ "smp.c",
+ ]
+}
+
+# Service to check that WFI is a no-op when there are pending interrupts.
+source_set("wfi") {
+ testonly = true
+ public_configs = [
+ "..:config",
+ "//test/hftest:hftest_config",
+ ]
+ sources = [
+ "wfi.c",
+ ]
+ deps = [
+ "//src/arch/aarch64/hftest:interrupts",
+ ]
+}
+
+# Service to receive messages in a secondary VM and ensure that the header fields are correctly set.
+source_set("spci_check") {
+ testonly = true
+ public_configs = [
+ "..:config",
+ "//test/hftest:hftest_config",
+ ]
+ deps = [
+ "//test/vmapi/common",
+ ]
+
+ sources = [
+ "spci_check.c",
+ ]
+}
+
+# Group services together into VMs.
+
+vm_kernel("service_vm1") {
+ testonly = true
+
+ deps = [
+ ":boot",
+ ":check_state",
+ ":debug_el1",
+ ":echo",
+ ":echo_with_notification",
+ ":floating_point",
+ ":interruptible",
+ ":memory",
+ ":perfmon",
+ ":receive_block",
+ ":relay",
+ ":spci_check",
+ ":unmapped",
+ ":wfi",
+ "//test/hftest:hftest_secondary_vm",
+ ]
+}
+
+vm_kernel("service_vm2") {
+ testonly = true
+
+ deps = [
+ ":interruptible",
+ ":memory",
+ ":relay",
+ "//test/hftest:hftest_secondary_vm",
+ ]
+}
+
+vm_kernel("service_vm3") {
+ testonly = true
+
+ deps = [
+ ":smp",
+ "//test/hftest:hftest_secondary_vm",
+ ]
+}
diff --git a/test/vmapi/primary_with_secondaries/services/boot.c b/test/vmapi/primary_with_secondaries/services/boot.c
new file mode 100644
index 0000000..a7b9b66
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/boot.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/mm.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "test/hftest.h"
+#include "test/vmapi/exception_handler.h"
+
+/*
+ * This must match the size specified for services1 in
+ * //test/vmapi/primary_with_secondaries:primary_with_secondaries_test.
+ */
+#define SECONDARY_MEMORY_SIZE 1048576
+
+extern uint8_t volatile text_begin[];
+
+TEST_SERVICE(boot_memory)
+{
+ uint8_t checksum = 0;
+
+ /* Check that the size passed in by Hafnium is what is expected. */
+ ASSERT_EQ(SERVICE_MEMORY_SIZE(), SECONDARY_MEMORY_SIZE);
+
+ /*
+ * Check that we can read all memory up to the given size. Calculate a
+ * basic checksum and check that it is non-zero, as a double-check that
+ * we are actually reading something.
+ */
+ for (size_t i = 0; i < SERVICE_MEMORY_SIZE(); ++i) {
+ checksum += text_begin[i];
+ }
+ ASSERT_NE(checksum, 0);
+ dlog("Checksum of all memory is %d\n", checksum);
+
+ spci_yield();
+}
+
+TEST_SERVICE(boot_memory_underrun)
+{
+ exception_setup(NULL, exception_handler_yield);
+ /*
+ * Try to read memory below the start of the image. This should result
+ * in the VM trapping and yielding.
+ */
+ dlog("Read memory below limit: %d\n", text_begin[-1]);
+ FAIL("Managed to read memory below limit");
+}
+
+TEST_SERVICE(boot_memory_overrun)
+{
+ exception_setup(NULL, exception_handler_yield);
+ /*
+ * Try to read memory above the limit defined by memory_size. This
+ * should result in the VM trapping and yielding.
+ */
+ dlog("Read memory above limit: %d\n",
+ text_begin[SERVICE_MEMORY_SIZE()]);
+ FAIL("Managed to read memory above limit");
+}
diff --git a/test/vmapi/primary_with_secondaries/services/check_state.c b/test/vmapi/primary_with_secondaries/services/check_state.c
new file mode 100644
index 0000000..da27cc6
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/check_state.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/state.h"
+
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "test/hftest.h"
+
+void send_with_retry(spci_vm_id_t sender_vm_id, spci_vm_id_t target_vm_id,
+ uint32_t size)
+{
+ struct spci_value res;
+
+ do {
+ res = spci_msg_send(sender_vm_id, target_vm_id, size, 0);
+ } while (res.func != SPCI_SUCCESS_32);
+}
+
+/**
+ * This service repeatedly takes the following steps: sets the per-CPU pointer
+ * to some value, makes a hypervisor call, check that the value is still what it
+ * was set to.
+ *
+ * This loop helps detect bugs where the hypervisor inadvertently destroys
+ * state.
+ *
+ * At the end of its iterations, the service reports the result to the primary
+ * VM, which then fails or succeeds the test.
+ */
+TEST_SERVICE(check_state)
+{
+ size_t i;
+ bool ok = true;
+ static volatile uintptr_t expected;
+ static volatile uintptr_t actual;
+
+ for (i = 0; i < 100000; i++) {
+ /*
+ * We store the expected/actual values in volatile static
+ * variables to avoid relying on registers that may have been
+ * modified by the hypervisor.
+ */
+ expected = i;
+ per_cpu_ptr_set(expected);
+ send_with_retry(hf_vm_get_id(), HF_PRIMARY_VM_ID, 0);
+ actual = per_cpu_ptr_get();
+ ok &= expected == actual;
+ }
+
+ /* Send two replies, one for each physical CPU. */
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, &ok, sizeof(ok));
+ send_with_retry(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ok));
+ send_with_retry(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(ok));
+}
diff --git a/test/vmapi/primary_with_secondaries/services/debug_el1.c b/test/vmapi/primary_with_secondaries/services/debug_el1.c
new file mode 100644
index 0000000..1e09f76
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/debug_el1.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/dlog.h"
+
+#include "../sysregs.h"
+#include "test/vmapi/exception_handler.h"
+
+TEST_SERVICE(debug_el1_secondary_basic)
+{
+ exception_setup(NULL, exception_handler_skip_instruction);
+
+ EXPECT_GT(hf_vm_get_id(), HF_PRIMARY_VM_ID);
+ TRY_READ(MDCCINT_EL1);
+ TRY_READ(DBGBCR0_EL1);
+ TRY_READ(DBGBVR0_EL1);
+ TRY_READ(DBGWCR0_EL1);
+ TRY_READ(DBGWVR0_EL1);
+
+ EXPECT_EQ(exception_handler_get_num(), 5);
+ spci_yield();
+}
diff --git a/test/vmapi/primary_with_secondaries/services/echo.c b/test/vmapi/primary_with_secondaries/services/echo.c
new file mode 100644
index 0000000..b1c99d9
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/echo.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/spci.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "test/hftest.h"
+
+TEST_SERVICE(echo)
+{
+ /* Loop, echo messages back to the sender. */
+ for (;;) {
+ struct spci_value ret = spci_msg_wait();
+ spci_vm_id_t target_vm_id = spci_msg_send_receiver(ret);
+ spci_vm_id_t source_vm_id = spci_msg_send_sender(ret);
+ void *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+
+ ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
+ memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX, recv_buf,
+ spci_msg_send_size(ret));
+
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ spci_msg_send(target_vm_id, source_vm_id,
+ spci_msg_send_size(ret), 0);
+ }
+}
diff --git a/test/vmapi/primary_with_secondaries/services/echo_with_notification.c b/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
new file mode 100644
index 0000000..a74391b
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/echo_with_notification.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/irq.h"
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/spci.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "../msr.h"
+#include "test/hftest.h"
+
+static void irq(void)
+{
+ hf_interrupt_get();
+}
+
+static void wait_for_vm(uint32_t vmid)
+{
+ for (;;) {
+ int64_t w = hf_mailbox_writable_get();
+ if (w == vmid) {
+ return;
+ }
+
+ if (w == -1) {
+ interrupt_wait();
+ arch_irq_enable();
+ arch_irq_disable();
+ }
+ }
+}
+
+TEST_SERVICE(echo_with_notification)
+{
+ exception_setup(irq, NULL);
+ hf_interrupt_enable(HF_MAILBOX_WRITABLE_INTID, true);
+
+ /* Loop, echo messages back to the sender. */
+ for (;;) {
+ void *send_buf = SERVICE_SEND_BUFFER();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ struct spci_value ret = spci_msg_wait();
+ spci_vm_id_t target_vm_id = spci_msg_send_receiver(ret);
+ spci_vm_id_t source_vm_id = spci_msg_send_sender(ret);
+
+ memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX, recv_buf,
+ spci_msg_send_size(ret));
+
+ while (spci_msg_send(target_vm_id, source_vm_id,
+ spci_msg_send_size(ret),
+ SPCI_MSG_SEND_NOTIFY)
+ .func != SPCI_SUCCESS_32) {
+ wait_for_vm(source_vm_id);
+ }
+
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ }
+}
diff --git a/test/vmapi/primary_with_secondaries/services/floating_point.c b/test/vmapi/primary_with_secondaries/services/floating_point.c
new file mode 100644
index 0000000..f5c0bcc
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/floating_point.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/std.h"
+#include "hf/arch/vm/registers.h"
+
+#include "hf/spci.h"
+
+#include "vmapi/hf/call.h"
+
+#include "../msr.h"
+#include "test/hftest.h"
+
+TEST_SERVICE(fp_fill)
+{
+ const double value = 0.75;
+ fill_fp_registers(value);
+ EXPECT_EQ(spci_yield().func, SPCI_SUCCESS_32);
+
+ ASSERT_TRUE(check_fp_register(value));
+ spci_yield();
+}
+
+TEST_SERVICE(fp_fpcr)
+{
+ uintreg_t value = 3 << 22; /* Set RMode to RZ */
+ write_msr(fpcr, value);
+ EXPECT_EQ(spci_yield().func, SPCI_SUCCESS_32);
+
+ ASSERT_EQ(read_msr(fpcr), value);
+ spci_yield();
+}
diff --git a/test/vmapi/primary_with_secondaries/services/interruptible.c b/test/vmapi/primary_with_secondaries/services/interruptible.c
new file mode 100644
index 0000000..fc79d20
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/interruptible.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/irq.h"
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/dlog.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+#include "vmapi/hf/spci.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+
+/*
+ * Secondary VM that sends messages in response to interrupts, and interrupts
+ * itself when it receives a message.
+ */
+
+static void irq(void)
+{
+ uint32_t interrupt_id = hf_interrupt_get();
+ char buffer[] = "Got IRQ xx.";
+ int size = sizeof(buffer);
+ dlog("secondary IRQ %d from current\n", interrupt_id);
+ buffer[8] = '0' + interrupt_id / 10;
+ buffer[9] = '0' + interrupt_id % 10;
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, buffer, size);
+ spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, size, 0);
+ dlog("secondary IRQ %d ended\n", interrupt_id);
+}
+
+/**
+ * Try to receive a message from the mailbox, blocking if necessary, and
+ * retrying if interrupted.
+ */
+struct spci_value mailbox_receive_retry()
+{
+ struct spci_value received;
+
+ do {
+ received = spci_msg_wait();
+ } while (received.func == SPCI_ERROR_32 &&
+ received.arg2 == SPCI_INTERRUPTED);
+
+ return received;
+}
+
+TEST_SERVICE(interruptible)
+{
+ spci_vm_id_t this_vm_id = hf_vm_get_id();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+
+ exception_setup(irq, NULL);
+ hf_interrupt_enable(SELF_INTERRUPT_ID, true);
+ hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true);
+ hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_B, true);
+ arch_irq_enable();
+
+ for (;;) {
+ const char ping_message[] = "Ping";
+ const char enable_message[] = "Enable interrupt C";
+
+ struct spci_value ret = mailbox_receive_retry();
+
+ ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
+ if (spci_msg_send_sender(ret) == HF_PRIMARY_VM_ID &&
+ spci_msg_send_size(ret) == sizeof(ping_message) &&
+ memcmp(recv_buf, ping_message, sizeof(ping_message)) == 0) {
+ /* Interrupt ourselves */
+ hf_interrupt_inject(this_vm_id, 0, SELF_INTERRUPT_ID);
+ } else if (spci_msg_send_sender(ret) == HF_PRIMARY_VM_ID &&
+ spci_msg_send_size(ret) == sizeof(enable_message) &&
+ memcmp(recv_buf, enable_message,
+ sizeof(enable_message)) == 0) {
+ /* Enable interrupt ID C. */
+ hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_C, true);
+ } else {
+ dlog("Got unexpected message from VM %d, size %d.\n",
+ spci_msg_send_sender(ret),
+ spci_msg_send_size(ret));
+ FAIL("Unexpected message");
+ }
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ }
+}
diff --git a/test/vmapi/primary_with_secondaries/services/interruptible_echo.c b/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
new file mode 100644
index 0000000..8903990
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/interruptible_echo.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/irq.h"
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/dlog.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+
+static void irq(void)
+{
+ /* Clear the interrupt. */
+ hf_interrupt_get();
+}
+
+TEST_SERVICE(interruptible_echo)
+{
+ exception_setup(irq, NULL);
+ hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true);
+ arch_irq_enable();
+
+ for (;;) {
+ struct spci_value res = spci_msg_wait();
+ void *message = SERVICE_SEND_BUFFER();
+ void *recv_message = SERVICE_RECV_BUFFER();
+
+ /* Retry if interrupted but made visible with the yield. */
+ while (res.func == SPCI_ERROR_32 &&
+ res.arg2 == SPCI_INTERRUPTED) {
+ spci_yield();
+ res = spci_msg_wait();
+ }
+
+ ASSERT_EQ(res.func, SPCI_MSG_SEND_32);
+ memcpy_s(message, SPCI_MSG_PAYLOAD_MAX, recv_message,
+ spci_msg_send_size(res));
+
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ spci_msg_send(SERVICE_VM1, HF_PRIMARY_VM_ID,
+ spci_msg_send_size(res), 0);
+ }
+}
diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c
new file mode 100644
index 0000000..34909cc
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/memory.c
@@ -0,0 +1,841 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/mm.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/exception_handler.h"
+#include "test/vmapi/spci.h"
+
+alignas(PAGE_SIZE) static uint8_t page[PAGE_SIZE];
+
+TEST_SERVICE(memory_increment)
+{
+ /* Loop, writing message to the shared memory. */
+ for (;;) {
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ size_t i;
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+ spci_vm_id_t sender = memory_region->sender;
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE);
+
+ ptr = (uint8_t *)constituents[0].address;
+
+ /* Check the memory was cleared. */
+ for (i = 0; i < PAGE_SIZE; ++i) {
+ ASSERT_EQ(ptr[i], 0);
+ }
+
+ /* Allow the memory to be populated. */
+ EXPECT_EQ(spci_yield().func, SPCI_SUCCESS_32);
+
+ /* Increment each byte of memory. */
+ for (i = 0; i < PAGE_SIZE; ++i) {
+ ++ptr[i];
+ }
+
+ /* Signal completion and reset. */
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ spci_msg_send(hf_vm_get_id(), sender, sizeof(ptr), 0);
+ }
+}
+
+TEST_SERVICE(give_memory_and_fault)
+{
+ void *send_buf = SERVICE_SEND_BUFFER();
+
+ /* Give memory to the primary. */
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)&page, .page_count = 1},
+ };
+ uint32_t msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID, constituents,
+ ARRAY_SIZE(constituents), 0, SPCI_MEMORY_REGION_FLAG_CLEAR,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ exception_setup(NULL, exception_handler_yield);
+
+ /* Try using the memory that isn't valid unless it's been returned. */
+ page[16] = 123;
+
+ FAIL("Exception not generated by invalid access.");
+}
+
+TEST_SERVICE(lend_memory_and_fault)
+{
+ void *send_buf = SERVICE_SEND_BUFFER();
+
+ /* Lend memory to the primary. */
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)&page, .page_count = 1},
+ };
+ uint32_t msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID, constituents,
+ ARRAY_SIZE(constituents), 0, SPCI_MEMORY_REGION_FLAG_CLEAR,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND)
+ .func,
+ SPCI_SUCCESS_32);
+
+ exception_setup(NULL, exception_handler_yield);
+
+ /* Try using the memory that isn't valid unless it's been returned. */
+ page[633] = 180;
+
+ FAIL("Exception not generated by invalid access.");
+}
+
+TEST_SERVICE(spci_memory_return)
+{
+ exception_setup(NULL, exception_handler_yield);
+
+ /* Loop, giving memory back to the sender. */
+ for (;;) {
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ uint32_t msg_size;
+ size_t i;
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_memory_region *memory_region;
+ struct spci_memory_region_constituent *constituents;
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ /*
+ * The memory may have been sent in one of several different
+ * ways, but there shouldn't be any other attributes to the
+ * message.
+ */
+ EXPECT_NE(spci_msg_send_attributes(ret) &
+ SPCI_MSG_SEND_LEGACY_MEMORY_MASK,
+ 0);
+ EXPECT_EQ(spci_msg_send_attributes(ret) &
+ ~SPCI_MSG_SEND_LEGACY_MEMORY_MASK,
+ 0);
+
+ memory_region = (struct spci_memory_region *)recv_buf;
+ constituents =
+ spci_memory_region_get_constituents(memory_region);
+ ptr = (uint8_t *)constituents[0].address;
+
+ /* Check that one has access to the shared region. */
+ for (i = 0; i < PAGE_SIZE; ++i) {
+ ptr[i]++;
+ }
+
+ /* Give the memory back and notify the sender. */
+ msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), memory_region->sender,
+ constituents, memory_region->constituent_count, 0, 0,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
+ spci_msg_send_sender(ret), msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /*
+ * Try and access the memory which will cause a fault unless the
+ * memory has been shared back again.
+ */
+ ptr[0] = 123;
+
+ FAIL("Exception not generated by invalid access.");
+ }
+}
+
+TEST_SERVICE(spci_donate_check_upper_bound)
+{
+ exception_setup(NULL, exception_handler_yield);
+
+ for (;;) {
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ uint8_t index;
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ struct spci_memory_region *memory_region;
+ struct spci_memory_region_constituent *constituents;
+
+ exception_handler_reset();
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE);
+
+ memory_region = (struct spci_memory_region *)recv_buf;
+ constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ /* Choose which constituent we want to test. */
+ index = *(uint8_t *)constituents[0].address;
+ ptr = (uint8_t *)constituents[index].address;
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /*
+ * Check that one cannot access out of bounds after donated
+ * region. This should trigger the exception handler.
+ */
+ ptr[PAGE_SIZE]++;
+ }
+}
+
+TEST_SERVICE(spci_donate_check_lower_bound)
+{
+ exception_setup(NULL, exception_handler_yield);
+
+ for (;;) {
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ uint8_t index;
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ struct spci_memory_region *memory_region;
+ struct spci_memory_region_constituent *constituents;
+
+ exception_handler_reset();
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE);
+
+ memory_region = (struct spci_memory_region *)recv_buf;
+ constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ /* Choose which constituent we want to test. */
+ index = *(uint8_t *)constituents[0].address;
+ ptr = (uint8_t *)constituents[index].address;
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /*
+ * Check that one cannot access out of bounds after donated
+ * region. This should trigger the exception handler.
+ */
+ ptr[-1]++;
+ }
+}
+
+/**
+ * Attempt to donate memory and then modify.
+ */
+TEST_SERVICE(spci_donate_secondary_and_fault)
+{
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ uint32_t msg_size;
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE);
+
+ exception_setup(NULL, exception_handler_yield);
+
+ ptr = (uint8_t *)constituents[0].address;
+
+ /* Donate memory to next VM. */
+ msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), SERVICE_VM2, constituents,
+ memory_region->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret), SERVICE_VM2,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Ensure that we are unable to modify memory any more. */
+ ptr[0] = 'c';
+
+ FAIL("Exception not generated by invalid access.");
+}
+
+/**
+ * Attempt to donate memory twice from VM.
+ */
+TEST_SERVICE(spci_donate_twice)
+{
+ uint32_t msg_size;
+ struct spci_value ret = spci_msg_wait();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent constituent =
+ spci_memory_region_get_constituents(memory_region)[0];
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Yield to allow attempt to re donate from primary. */
+ spci_yield();
+
+ /* Give the memory back and notify the sender. */
+ msg_size = spci_memory_region_init(
+ send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID, &constituent, 1, 0, 0,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(SERVICE_VM1, HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Attempt to donate the memory to another VM. */
+ msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), SERVICE_VM2, &constituent, 1, 0, 0,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK, SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(spci_msg_send_receiver(ret), SERVICE_VM2,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY_DONATE),
+ SPCI_INVALID_PARAMETERS);
+
+ spci_yield();
+}
+
+/**
+ * Continually receive memory, check if we have access and ensure it is not
+ * changed by a third party.
+ */
+TEST_SERVICE(spci_memory_receive)
+{
+ for (;;) {
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE);
+
+ ptr = (uint8_t *)constituents[0].address;
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ ptr[0] = 'd';
+ spci_yield();
+
+ /* Ensure memory has not changed. */
+ EXPECT_EQ(ptr[0], 'd');
+ spci_yield();
+ }
+}
+
+/**
+ * Receive memory and attempt to donate from primary VM.
+ */
+TEST_SERVICE(spci_donate_invalid_source)
+{
+ uint32_t msg_size;
+ struct spci_value ret = spci_msg_wait();
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE);
+
+ /* Give the memory back and notify the sender. */
+ msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID, constituents,
+ memory_region->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret), HF_PRIMARY_VM_ID,
+ msg_size, SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Fail to donate the memory from the primary to VM2. */
+ msg_size = spci_memory_region_init(
+ send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ memory_region->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE),
+ SPCI_INVALID_PARAMETERS);
+ spci_yield();
+}
+
+TEST_SERVICE(spci_memory_lend_relinquish)
+{
+ exception_setup(NULL, exception_handler_yield);
+
+ /* Loop, giving memory back to the sender. */
+ for (;;) {
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ uint8_t *ptr2;
+ uint32_t count;
+ uint32_t count2;
+ uint32_t msg_size;
+ size_t i;
+
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ /*
+ * The memory may have been sent in one of several different
+ * ways, but there shouldn't be any other attributes to the
+ * message.
+ */
+ EXPECT_NE(spci_msg_send_attributes(ret) &
+ SPCI_MSG_SEND_LEGACY_MEMORY_MASK,
+ 0);
+ EXPECT_EQ(spci_msg_send_attributes(ret) &
+ ~SPCI_MSG_SEND_LEGACY_MEMORY_MASK,
+ 0);
+
+ ptr = (uint8_t *)constituents[0].address;
+ count = constituents[0].page_count;
+ ptr2 = (uint8_t *)constituents[1].address;
+ count2 = constituents[1].page_count;
+
+ /* Check that one has access to the shared region. */
+ for (i = 0; i < PAGE_SIZE * count; ++i) {
+ ptr[i]++;
+ }
+ for (i = 0; i < PAGE_SIZE * count2; ++i) {
+ ptr2[i]++;
+ }
+
+ /* Give the memory back and notify the sender. */
+ msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), memory_region->sender,
+ constituents, memory_region->constituent_count, 0, 0,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ /* Relevant information read, mailbox can be cleared. */
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
+ spci_msg_send_sender(ret), msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /*
+ * Try and access the memory which will cause a fault unless the
+ * memory has been shared back again.
+ */
+ ptr[0] = 123;
+ }
+}
+
+/**
+ * Ensure that we can't relinquish donated memory.
+ */
+TEST_SERVICE(spci_memory_donate_relinquish)
+{
+ for (;;) {
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ uint32_t msg_size;
+ size_t i;
+
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE);
+
+ ptr = (uint8_t *)constituents[0].address;
+
+ /* Check that one has access to the shared region. */
+ for (i = 0; i < PAGE_SIZE; ++i) {
+ ptr[i]++;
+ }
+ /* Give the memory back and notify the sender. */
+ msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID,
+ constituents, memory_region->constituent_count, 0, 0,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(spci_msg_send_receiver(ret),
+ HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH),
+ SPCI_INVALID_PARAMETERS);
+
+ /* Ensure we still have access to the memory. */
+ ptr[0] = 123;
+
+ spci_yield();
+ }
+}
+
+/**
+ * Receive memory and attempt to donate from primary VM.
+ */
+TEST_SERVICE(spci_lend_invalid_source)
+{
+ uint32_t msg_size;
+ struct spci_value ret = spci_msg_wait();
+
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND);
+
+ /* Attempt to relinquish from primary VM. */
+ msg_size = spci_memory_region_init(
+ send_buf, HF_PRIMARY_VM_ID, hf_vm_get_id(), constituents,
+ memory_region->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, hf_vm_get_id(), msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH),
+ SPCI_INVALID_PARAMETERS);
+
+ /* Give the memory back and notify the sender. */
+ msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID, constituents,
+ memory_region->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Ensure we cannot lend from the primary to another secondary. */
+ msg_size = spci_memory_region_init(
+ send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ memory_region->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND),
+ SPCI_INVALID_PARAMETERS);
+
+ /* Ensure we cannot share from the primary to another secondary. */
+ msg_size = spci_memory_region_init(
+ send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2, constituents,
+ memory_region->constituent_count, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ EXPECT_SPCI_ERROR(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE),
+ SPCI_INVALID_PARAMETERS);
+
+ spci_yield();
+}
+
+/**
+ * Attempt to execute an instruction from the lent memory.
+ */
+TEST_SERVICE(spci_memory_lend_relinquish_X)
+{
+ exception_setup(NULL, exception_handler_yield);
+
+ for (;;) {
+ struct spci_value ret = spci_msg_wait();
+ uint64_t *ptr;
+ uint32_t msg_size;
+
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND);
+
+ ptr = (uint64_t *)constituents[0].address;
+ /*
+ * Verify that the instruction in memory is the encoded RET
+ * instruction.
+ */
+ EXPECT_EQ(*ptr, 0xD65F03C0);
+ /* Try to execute instruction from the shared memory region. */
+ __asm__ volatile("blr %0" ::"r"(ptr));
+
+ /* Release the memory again. */
+ msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID,
+ constituents, memory_region->constituent_count, 0, 0,
+ SPCI_MEMORY_RW_X, SPCI_MEMORY_NORMAL_MEM,
+ SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
+ HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH)
+ .func,
+ SPCI_SUCCESS_32);
+ }
+}
+
+/**
+ * Attempt to read and write to a shared page.
+ */
+TEST_SERVICE(spci_memory_lend_relinquish_RW)
+{
+ exception_setup(NULL, exception_handler_yield);
+
+ for (;;) {
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ uint32_t msg_size;
+ size_t i;
+
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+ struct spci_memory_region_constituent constituent_copy =
+ constituents[0];
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ /*
+ * The memory may have been sent in one of several different
+ * ways, but there shouldn't be any other attributes to the
+ * message.
+ */
+ EXPECT_NE(spci_msg_send_attributes(ret) &
+ SPCI_MSG_SEND_LEGACY_MEMORY_MASK,
+ 0);
+ EXPECT_EQ(spci_msg_send_attributes(ret) &
+ ~SPCI_MSG_SEND_LEGACY_MEMORY_MASK,
+ 0);
+
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ ptr = (uint8_t *)constituent_copy.address;
+
+ /* Check that we have read access. */
+ for (i = 0; i < PAGE_SIZE; ++i) {
+ EXPECT_EQ(ptr[i], 'b');
+ }
+
+ /* Return control to primary, to verify shared access. */
+ spci_yield();
+
+ /* Attempt to modify the memory. */
+ for (i = 0; i < PAGE_SIZE; ++i) {
+ ptr[i]++;
+ }
+
+ msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID,
+ &constituent_copy, 1, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_EQ(spci_msg_send(spci_msg_send_receiver(ret),
+ HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH)
+ .func,
+ SPCI_SUCCESS_32);
+ }
+}
+
+/**
+ * Attempt to modify above the upper bound for the lent memory.
+ */
+TEST_SERVICE(spci_lend_check_upper_bound)
+{
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ uint8_t index;
+
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND);
+
+ exception_setup(NULL, exception_handler_yield);
+
+ /* Choose which constituent we want to test. */
+ index = *(uint8_t *)constituents[0].address;
+ ptr = (uint8_t *)constituents[index].address;
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Check that one cannot access after lent region. */
+ ASSERT_EQ(ptr[PAGE_SIZE], 0);
+
+ FAIL("Exception not generated by invalid access.");
+}
+
+/**
+ * Attempt to modify below the lower bound for the lent memory.
+ */
+TEST_SERVICE(spci_lend_check_lower_bound)
+{
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ uint8_t index;
+
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ exception_setup(NULL, exception_handler_yield);
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_attributes(ret),
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND);
+
+ /* Choose which constituent we want to test. */
+ index = *(uint8_t *)constituents[0].address;
+ ptr = (uint8_t *)constituents[index].address;
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Check that one cannot access after lent region. */
+ ptr[-1]++;
+
+ FAIL("Exception not generated by invalid access.");
+}
+
+TEST_SERVICE(spci_memory_lend_twice)
+{
+ struct spci_value ret = spci_msg_wait();
+ uint8_t *ptr;
+ uint32_t msg_size;
+ size_t i;
+
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ struct spci_memory_region *memory_region =
+ (struct spci_memory_region *)recv_buf;
+ struct spci_memory_region_constituent *constituents =
+ spci_memory_region_get_constituents(memory_region);
+ struct spci_memory_region_constituent constituent_copy =
+ constituents[0];
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+ /*
+ * The memory may have been sent in one of several different ways, but
+ * there shouldn't be any other attributes to the message.
+ */
+ EXPECT_NE(spci_msg_send_attributes(ret) &
+ SPCI_MSG_SEND_LEGACY_MEMORY_MASK,
+ 0);
+ EXPECT_EQ(spci_msg_send_attributes(ret) &
+ ~SPCI_MSG_SEND_LEGACY_MEMORY_MASK,
+ 0);
+
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ ptr = (uint8_t *)constituent_copy.address;
+
+ /* Check that we have read access. */
+ for (i = 0; i < PAGE_SIZE; ++i) {
+ EXPECT_EQ(ptr[i], 'b');
+ }
+
+ /* Attempt to modify the memory. */
+ for (i = 0; i < PAGE_SIZE; ++i) {
+ ptr[i]++;
+ }
+
+ for (i = 1; i < PAGE_SIZE * 2; i++) {
+ constituent_copy.address = (uint64_t)ptr + i;
+
+ /* Fail to lend or share the memory from the primary. */
+ msg_size = spci_memory_region_init(
+ send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
+ &constituent_copy, 1, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_LEND),
+ SPCI_INVALID_PARAMETERS);
+ msg_size = spci_memory_region_init(
+ send_buf, HF_PRIMARY_VM_ID, SERVICE_VM2,
+ &constituent_copy, 1, 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM2, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_SHARE),
+ SPCI_INVALID_PARAMETERS);
+ }
+
+ /* Return control to primary. */
+ spci_yield();
+}
diff --git a/test/vmapi/primary_with_secondaries/services/perfmon.c b/test/vmapi/primary_with_secondaries/services/perfmon.c
new file mode 100644
index 0000000..1e19ffb
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/perfmon.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/dlog.h"
+
+#include "../sysregs.h"
+#include "test/vmapi/exception_handler.h"
+
+TEST_SERVICE(perfmon_secondary_basic)
+{
+ exception_setup(NULL, exception_handler_skip_instruction);
+
+ EXPECT_GT(hf_vm_get_id(), HF_PRIMARY_VM_ID);
+ TRY_READ(PMCCFILTR_EL0);
+ TRY_READ(PMCR_EL0);
+ write_msr(PMINTENSET_EL1, 0xf);
+
+ EXPECT_EQ(exception_handler_get_num(), 3);
+ spci_yield();
+}
diff --git a/test/vmapi/primary_with_secondaries/services/receive_block.c b/test/vmapi/primary_with_secondaries/services/receive_block.c
new file mode 100644
index 0000000..49ea5f2
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/receive_block.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/irq.h"
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/dlog.h"
+#include "hf/spci.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+/*
+ * Secondary VM that enables an interrupt, disables interrupts globally, and
+ * calls hf_mailbox_receive with block=true but expects it to fail.
+ */
+
+static void irq(void)
+{
+ uint32_t interrupt_id = hf_interrupt_get();
+ FAIL("Unexpected secondary IRQ %d from current", interrupt_id);
+}
+
+TEST_SERVICE(receive_block)
+{
+ int32_t i;
+ const char message[] = "Done waiting";
+
+ exception_setup(irq, NULL);
+ arch_irq_disable();
+ hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true);
+
+ for (i = 0; i < 10; ++i) {
+ struct spci_value res = spci_msg_wait();
+ EXPECT_SPCI_ERROR(res, SPCI_INTERRUPTED);
+ }
+
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, message,
+ sizeof(message));
+
+ spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(message), 0);
+}
diff --git a/test/vmapi/primary_with_secondaries/services/relay.c b/test/vmapi/primary_with_secondaries/services/relay.c
new file mode 100644
index 0000000..7917251
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/relay.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "test/hftest.h"
+
+TEST_SERVICE(relay)
+{
+ /*
+ * Loop, forward messages to the next VM.
+ *
+ * The first 32-bits of the message are the little-endian 32-bit ID of
+ * the VM to forward the message to. This ID will be dropped from the
+ * message so multiple IDs can be places at the start of the message.
+ */
+ for (;;) {
+ spci_vm_id_t *chain;
+ spci_vm_id_t next_vm_id;
+ void *next_message;
+ uint32_t next_message_size;
+
+ /* Receive the message to relay. */
+ struct spci_value ret = spci_msg_wait();
+ ASSERT_EQ(ret.func, SPCI_MSG_SEND_32);
+
+ /* Prepare to relay the message. */
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ void *send_buf = SERVICE_SEND_BUFFER();
+ ASSERT_GE(spci_msg_send_size(ret), sizeof(spci_vm_id_t));
+
+ chain = (spci_vm_id_t *)recv_buf;
+ next_vm_id = le16toh(*chain);
+ next_message = chain + 1;
+ next_message_size =
+ spci_msg_send_size(ret) - sizeof(spci_vm_id_t);
+
+ /* Send the message to the next stage. */
+ memcpy_s(send_buf, SPCI_MSG_PAYLOAD_MAX, next_message,
+ next_message_size);
+
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+ spci_msg_send(hf_vm_get_id(), next_vm_id, next_message_size, 0);
+ }
+}
diff --git a/test/vmapi/primary_with_secondaries/services/smp.c b/test/vmapi/primary_with_secondaries/services/smp.c
new file mode 100644
index 0000000..9030fb5
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/smp.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdalign.h>
+#include <stdint.h>
+
+#include "hf/arch/vm/power_mgmt.h"
+
+#include "hf/dlog.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+#include "vmapi/hf/spci.h"
+
+#include "../psci.h"
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+
+#define ARG_VALUE 42
+
+/*
+ * Secondary VM that starts a second vCPU and then sends messages from both.
+ */
+
+alignas(4096) static char stack[4096];
+
+/** Send a message back to the primary. */
+void send_message(const char *message, uint32_t size)
+{
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, message, size);
+
+ ASSERT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, size, 0).func,
+ SPCI_SUCCESS_32);
+}
+
+/**
+ * Entry point of the second vCPU.
+ */
+static void vm_cpu_entry(uintptr_t arg)
+{
+ ASSERT_EQ(arg, ARG_VALUE);
+
+ /* Check that vCPU statuses are as expected. */
+ ASSERT_EQ(arch_cpu_status(0), POWER_STATUS_ON);
+ ASSERT_EQ(arch_cpu_status(1), POWER_STATUS_ON);
+
+ dlog("Secondary second vCPU started.\n");
+ send_message("vCPU 1", sizeof("vCPU 1"));
+ dlog("Secondary second vCPU finishing\n");
+}
+
+TEST_SERVICE(smp)
+{
+ /* Check that vCPU statuses are as expected. */
+ ASSERT_EQ(arch_cpu_status(0), POWER_STATUS_ON);
+ ASSERT_EQ(arch_cpu_status(1), POWER_STATUS_OFF);
+
+ /* Start second vCPU. */
+ dlog("Secondary starting second vCPU.\n");
+ ASSERT_TRUE(hftest_cpu_start(1, stack, sizeof(stack), vm_cpu_entry,
+ ARG_VALUE));
+ dlog("Secondary started second vCPU.\n");
+
+ /* Check that vCPU statuses are as expected. */
+ ASSERT_EQ(arch_cpu_status(0), POWER_STATUS_ON);
+ ASSERT_EQ(arch_cpu_status(1), POWER_STATUS_ON);
+
+ send_message("vCPU 0", sizeof("vCPU 0"));
+}
diff --git a/test/vmapi/primary_with_secondaries/services/spci_check.c b/test/vmapi/primary_with_secondaries/services/spci_check.c
new file mode 100644
index 0000000..9e342ba
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/spci_check.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/spci.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+TEST_SERVICE(spci_check)
+{
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ const char message[] = "spci_msg_send";
+
+ /* Wait for single message to be sent by the primary VM. */
+ struct spci_value ret = spci_msg_wait();
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+
+ /* Ensure message header has all fields correctly set. */
+ EXPECT_EQ(spci_msg_send_size(ret), sizeof(message));
+ EXPECT_EQ(spci_msg_send_receiver(ret), hf_vm_get_id());
+ EXPECT_EQ(spci_msg_send_sender(ret), HF_PRIMARY_VM_ID);
+
+ /* Ensure that the payload was correctly transmitted. */
+ EXPECT_EQ(memcmp(recv_buf, message, sizeof(message)), 0);
+
+ spci_yield();
+}
+
+TEST_SERVICE(spci_length)
+{
+ void *recv_buf = SERVICE_RECV_BUFFER();
+ const char message[] = "this should be truncated";
+
+ /* Wait for single message to be sent by the primary VM. */
+ struct spci_value ret = spci_msg_wait();
+
+ EXPECT_EQ(ret.func, SPCI_MSG_SEND_32);
+
+ /* Verify the length is as expected. */
+ EXPECT_EQ(16, spci_msg_send_size(ret));
+
+ /* Check only part of the message is sent correctly. */
+ EXPECT_NE(memcmp(recv_buf, message, sizeof(message)), 0);
+ EXPECT_EQ(memcmp(recv_buf, message, spci_msg_send_size(ret)), 0);
+
+ spci_yield();
+}
+
+TEST_SERVICE(spci_recv_non_blocking)
+{
+ /* Wait for single message to be sent by the primary VM. */
+ struct spci_value ret = spci_msg_poll();
+
+ EXPECT_SPCI_ERROR(ret, SPCI_RETRY);
+
+ spci_yield();
+}
diff --git a/test/vmapi/primary_with_secondaries/services/unmapped.c b/test/vmapi/primary_with_secondaries/services/unmapped.c
new file mode 100644
index 0000000..13d78c5
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/unmapped.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/mm.h"
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "../sysregs.h"
+#include "test/hftest.h"
+#include "test/vmapi/exception_handler.h"
+
+alignas(PAGE_SIZE) static uint8_t pages[2 * PAGE_SIZE];
+
+TEST_SERVICE(data_unmapped)
+{
+ /* Not using NULL so static analysis doesn't complain. */
+ int *p = (int *)1;
+ exception_setup(NULL, exception_handler_yield);
+ *p = 12;
+ FAIL("Exception not generated by invalid access.");
+}
+
+TEST_SERVICE(straddling_data_unmapped)
+{
+ void *send_buf = SERVICE_SEND_BUFFER();
+ /* Give some memory to the primary VM so that it's unmapped. */
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)(&pages[PAGE_SIZE]), .page_count = 1},
+ };
+ uint32_t msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+ exception_setup(NULL, exception_handler_yield);
+
+ EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ *(volatile uint64_t *)(&pages[PAGE_SIZE - 6]);
+ FAIL("Exception not generated by invalid access.");
+}
+
+TEST_SERVICE(instruction_unmapped)
+{
+ /* Not using NULL so static analysis doesn't complain. */
+ int (*f)(void) = (int (*)(void))4;
+ exception_setup(NULL, exception_handler_yield);
+ f();
+ FAIL("Exception not generated by invalid access.");
+}
+
+TEST_SERVICE(straddling_instruction_unmapped)
+{
+ void *send_buf = SERVICE_SEND_BUFFER();
+
+ /*
+ * Get a function pointer which, when branched to, will attempt to
+ * execute a 4-byte instruction straddling two pages.
+ */
+ int (*f)(void) = (int (*)(void))(&pages[PAGE_SIZE - 2]);
+
+ /* Give second page to the primary VM so that it's unmapped. */
+ struct spci_memory_region_constituent constituents[] = {
+ {.address = (uint64_t)(&pages[PAGE_SIZE]), .page_count = 1},
+ };
+ uint32_t msg_size = spci_memory_region_init(
+ send_buf, hf_vm_get_id(), HF_PRIMARY_VM_ID, constituents,
+ ARRAY_SIZE(constituents), 0, 0, SPCI_MEMORY_RW_X,
+ SPCI_MEMORY_NORMAL_MEM, SPCI_MEMORY_CACHE_WRITE_BACK,
+ SPCI_MEMORY_OUTER_SHAREABLE);
+
+ exception_setup(NULL, exception_handler_yield);
+
+ EXPECT_EQ(spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, msg_size,
+ SPCI_MSG_SEND_LEGACY_MEMORY_DONATE)
+ .func,
+ SPCI_SUCCESS_32);
+
+ /* Branch to instruction whose 2 bytes are now in an unmapped page. */
+ f();
+ FAIL("Exception not generated by invalid access.");
+}
diff --git a/test/vmapi/primary_with_secondaries/services/wfi.c b/test/vmapi/primary_with_secondaries/services/wfi.c
new file mode 100644
index 0000000..6d61194
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/services/wfi.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/irq.h"
+#include "hf/arch/vm/interrupts.h"
+
+#include "hf/dlog.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+
+/*
+ * Secondary VM that enables an interrupt, disables interrupts globally, and
+ * calls WFI.
+ */
+
+static void irq(void)
+{
+ uint32_t interrupt_id = hf_interrupt_get();
+ FAIL("Unexpected secondary IRQ %d from current", interrupt_id);
+}
+
+TEST_SERVICE(wfi)
+{
+ int32_t i;
+ const char message[] = "Done waiting";
+
+ exception_setup(irq, NULL);
+ arch_irq_disable();
+ hf_interrupt_enable(EXTERNAL_INTERRUPT_ID_A, true);
+
+ for (i = 0; i < 10; ++i) {
+ interrupt_wait();
+ }
+
+ memcpy_s(SERVICE_SEND_BUFFER(), SPCI_MSG_PAYLOAD_MAX, message,
+ sizeof(message));
+
+ spci_msg_send(hf_vm_get_id(), HF_PRIMARY_VM_ID, sizeof(message), 0);
+}
diff --git a/test/vmapi/primary_with_secondaries/smp.c b/test/vmapi/primary_with_secondaries/smp.c
new file mode 100644
index 0000000..d76b5f0
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/smp.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+TEAR_DOWN(smp)
+{
+ EXPECT_SPCI_ERROR(spci_rx_release(), SPCI_DENIED);
+}
+
+/**
+ * Run a service that starts a second vCPU, and check that both the first and
+ * second vCPU send messages to us.
+ */
+TEST(smp, two_vcpus)
+{
+ const char expected_response_0[] = "vCPU 0";
+ const char expected_response_1[] = "vCPU 1";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM3, "smp", mb.send);
+
+ /* Let the first vCPU start the second vCPU. */
+ run_res = spci_run(SERVICE_VM3, 0);
+ EXPECT_EQ(run_res.func, HF_SPCI_RUN_WAKE_UP);
+ EXPECT_EQ(spci_vm_id(run_res), SERVICE_VM3);
+ EXPECT_EQ(spci_vcpu_index(run_res), 1);
+
+ /* Run the second vCPU and wait for a message. */
+ dlog("Run second vCPU for message\n");
+ run_res = spci_run(SERVICE_VM3, 1);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response_1));
+ EXPECT_EQ(memcmp(mb.recv, expected_response_1,
+ sizeof(expected_response_1)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Run the first vCPU and wait for a different message. */
+ dlog("Run first vCPU for message\n");
+ run_res = spci_run(SERVICE_VM3, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_msg_send_size(run_res), sizeof(expected_response_0));
+ EXPECT_EQ(memcmp(mb.recv, expected_response_0,
+ sizeof(expected_response_0)),
+ 0);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ /* Run the second vCPU again, and expect it to turn itself off. */
+ dlog("Run second vCPU for poweroff.\n");
+ run_res = spci_run(SERVICE_VM3, 1);
+ EXPECT_EQ(run_res.func, HF_SPCI_RUN_WAIT_FOR_INTERRUPT);
+ EXPECT_EQ(run_res.arg2, SPCI_SLEEP_INDEFINITE);
+}
diff --git a/test/vmapi/primary_with_secondaries/spci.c b/test/vmapi/primary_with_secondaries/spci.c
new file mode 100644
index 0000000..6ffc85f
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/spci.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/spci.h"
+
+#include <stdint.h>
+
+#include "hf/std.h"
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/spci.h"
+
+/**
+ * Send a message to a secondary VM which checks the validity of the received
+ * header.
+ */
+TEST(spci, msg_send)
+{
+ const char message[] = "spci_msg_send";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_check", mb.send);
+
+ /* Set the payload, init the message header and send the message. */
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_EQ(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, sizeof(message), 0)
+ .func,
+ SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/**
+ * Send a message to a secondary VM spoofing the source VM id.
+ */
+TEST(spci, msg_send_spoof)
+{
+ const char message[] = "spci_msg_send";
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_check", mb.send);
+
+ /* Set the payload, init the message header and send the message. */
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(SERVICE_VM2, SERVICE_VM1, sizeof(message), 0),
+ SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * Send a message to a secondary VM with incorrect destination id.
+ */
+TEST(spci, spci_invalid_destination_id)
+{
+ const char message[] = "fail to send";
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_check", mb.send);
+ /* Set the payload, init the message header and send the message. */
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, -1, sizeof(message), 0),
+ SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * Ensure that the length parameter is respected when sending messages.
+ */
+TEST(spci, spci_incorrect_length)
+{
+ const char message[] = "this should be truncated";
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "spci_length", mb.send);
+
+ /* Send the message and compare if truncated. */
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ /* Hard code incorrect length. */
+ EXPECT_EQ(spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, 16, 0).func,
+ SPCI_SUCCESS_32);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
+
+/**
+ * Attempt to send a message larger than what is supported.
+ */
+TEST(spci, spci_large_message)
+{
+ const char message[] = "fail to send";
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ memcpy_s(mb.send, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message));
+ /* Send a message that is larger than the mailbox supports (4KB). */
+ EXPECT_SPCI_ERROR(
+ spci_msg_send(HF_PRIMARY_VM_ID, SERVICE_VM1, 4 * 1024 + 1, 0),
+ SPCI_INVALID_PARAMETERS);
+}
+
+/**
+ * Verify secondary VM non blocking recv.
+ */
+TEST(spci, spci_recv_non_blocking)
+{
+ struct mailbox_buffers mb = set_up_mailbox();
+ struct spci_value run_res;
+
+ /* Check is performed in secondary VM. */
+ SERVICE_SELECT(SERVICE_VM1, "spci_recv_non_blocking", mb.send);
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_YIELD_32);
+}
diff --git a/test/vmapi/primary_with_secondaries/sysregs.c b/test/vmapi/primary_with_secondaries/sysregs.c
new file mode 100644
index 0000000..42f0636
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/sysregs.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sysregs.h"
+
+#include "hf/arch/vm/interrupts.h"
+
+#include "primary_with_secondary.h"
+#include "test/vmapi/exception_handler.h"
+#include "test/vmapi/spci.h"
+
+SET_UP(sysregs)
+{
+ exception_setup(NULL, exception_handler_skip_instruction);
+}
+
+/**
+ * Test that accessing LOR registers would inject an exception.
+ */
+TEST(sysregs, lor_exception)
+{
+ EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID);
+ TRY_READ(MSR_LORC_EL1);
+
+ EXPECT_EQ(exception_handler_get_num(), 1);
+}
diff --git a/test/vmapi/primary_with_secondaries/sysregs.h b/test/vmapi/primary_with_secondaries/sysregs.h
new file mode 100644
index 0000000..50faab4
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/sysregs.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "vmapi/hf/call.h"
+
+#include "../msr.h"
+#include "test/hftest.h"
+
+#define TRY_READ(REG) dlog(#REG "=%#x\n", read_msr(REG))
+
+#define CHECK_READ(REG, VALUE) \
+ do { \
+ uintreg_t x; \
+ x = read_msr(REG); \
+ EXPECT_EQ(x, VALUE); \
+ } while (0)
+
+#define TRY_WRITE_READ(REG, VALUE) \
+ do { \
+ uintreg_t x; \
+ x = read_msr(REG); \
+ EXPECT_NE(x, VALUE); \
+ write_msr(REG, VALUE); \
+ x = read_msr(REG); \
+ EXPECT_EQ(x, VALUE); \
+ } while (0)
diff --git a/test/vmapi/primary_with_secondaries/unmapped.c b/test/vmapi/primary_with_secondaries/unmapped.c
new file mode 100644
index 0000000..0b9c116
--- /dev/null
+++ b/test/vmapi/primary_with_secondaries/unmapped.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vmapi/hf/call.h"
+
+#include "primary_with_secondary.h"
+#include "test/hftest.h"
+#include "test/vmapi/exception_handler.h"
+#include "test/vmapi/spci.h"
+
+/**
+ * Accessing unmapped memory traps the VM.
+ */
+TEST(unmapped, data_unmapped)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "data_unmapped", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Accessing partially unmapped memory traps the VM.
+ */
+TEST(unmapped, straddling_data_unmapped)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "straddling_data_unmapped", mb.send);
+
+ /*
+ * First we get a message about the memory being donated to us, then we
+ * get the trap.
+ */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Executing unmapped memory traps the VM.
+ */
+TEST(unmapped, instruction_unmapped)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "instruction_unmapped", mb.send);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
+
+/**
+ * Executing partially unmapped memory traps the VM.
+ */
+TEST(unmapped, straddling_instruction_unmapped)
+{
+ struct spci_value run_res;
+ struct mailbox_buffers mb = set_up_mailbox();
+
+ SERVICE_SELECT(SERVICE_VM1, "straddling_instruction_unmapped", mb.send);
+
+ /*
+ * First we get a message about the memory being donated to us, then we
+ * get the trap.
+ */
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(run_res.func, SPCI_MSG_SEND_32);
+ EXPECT_EQ(spci_rx_release().func, SPCI_SUCCESS_32);
+
+ run_res = spci_run(SERVICE_VM1, 0);
+ EXPECT_EQ(exception_handler_receive_exception_count(&run_res, mb.recv),
+ 1);
+}
diff --git a/third_party/googletest b/third_party/googletest
new file mode 160000
index 0000000..90435b5
--- /dev/null
+++ b/third_party/googletest
@@ -0,0 +1 @@
+Subproject commit 90435b5c42b2492a7105c56bd36505b190ce54be
diff --git a/third_party/linux b/third_party/linux
new file mode 160000
index 0000000..0f672f6
--- /dev/null
+++ b/third_party/linux
@@ -0,0 +1 @@
+Subproject commit 0f672f6c0b52b7b0700b0915c72b540721af4465
diff --git a/vmlib/BUILD.gn b/vmlib/BUILD.gn
new file mode 100644
index 0000000..9ba8c3d
--- /dev/null
+++ b/vmlib/BUILD.gn
@@ -0,0 +1,21 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/toolchain/platform.gni")
+
+source_set("vmlib") {
+ sources = [
+ "spci.c",
+ ]
+}
diff --git a/vmlib/aarch64/BUILD.gn b/vmlib/aarch64/BUILD.gn
new file mode 100644
index 0000000..4ddd659
--- /dev/null
+++ b/vmlib/aarch64/BUILD.gn
@@ -0,0 +1,21 @@
+# Copyright 2019 The Hafnium Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build/toolchain/platform.gni")
+
+source_set("call") {
+ sources = [
+ "call.c",
+ ]
+}
diff --git a/vmlib/aarch64/call.c b/vmlib/aarch64/call.c
new file mode 100644
index 0000000..0c1a078
--- /dev/null
+++ b/vmlib/aarch64/call.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/call.h"
+
+#include "hf/spci.h"
+#include "hf/types.h"
+
+int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3)
+{
+ register uint64_t r0 __asm__("x0") = arg0;
+ register uint64_t r1 __asm__("x1") = arg1;
+ register uint64_t r2 __asm__("x2") = arg2;
+ register uint64_t r3 __asm__("x3") = arg3;
+
+ __asm__ volatile(
+ "hvc #0"
+ : /* Output registers, also used as inputs ('+' constraint). */
+ "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+ :
+ : /* Clobber registers. */
+ "x4", "x5", "x6", "x7");
+
+ return r0;
+}
+
+struct spci_value spci_call(struct spci_value args)
+{
+ register uint64_t r0 __asm__("x0") = args.func;
+ register uint64_t r1 __asm__("x1") = args.arg1;
+ register uint64_t r2 __asm__("x2") = args.arg2;
+ register uint64_t r3 __asm__("x3") = args.arg3;
+ register uint64_t r4 __asm__("x4") = args.arg4;
+ register uint64_t r5 __asm__("x5") = args.arg5;
+ register uint64_t r6 __asm__("x6") = args.arg6;
+ register uint64_t r7 __asm__("x7") = args.arg7;
+
+ __asm__ volatile(
+ "hvc #0"
+ : /* Output registers, also used as inputs ('+' constraint). */
+ "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5),
+ "+r"(r6), "+r"(r7));
+
+ return (struct spci_value){.func = r0,
+ .arg1 = r1,
+ .arg2 = r2,
+ .arg3 = r3,
+ .arg4 = r4,
+ .arg5 = r5,
+ .arg6 = r6,
+ .arg7 = r7};
+}
diff --git a/vmlib/spci.c b/vmlib/spci.c
new file mode 100644
index 0000000..c6f745a
--- /dev/null
+++ b/vmlib/spci.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/spci.h"
+
+#include "hf/types.h"
+
+/*
+ * Copied from hf/arch/std.h because we can't include Hafnium internal headers
+ * here.
+ */
+#ifndef align_up
+#define align_up(v, a) (((uintptr_t)(v) + ((a)-1)) & ~((a)-1))
+#endif
+
+/**
+ * Initialises the given `spci_memory_region` and copies the constituent
+ * information to it. Returns the length in bytes occupied by the data copied to
+ * `memory_region` (attributes, constituents and memory region header size).
+ */
+uint32_t spci_memory_region_init(
+ struct spci_memory_region *memory_region, spci_vm_id_t sender, spci_vm_id_t receiver,
+ const struct spci_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t tag, spci_memory_region_flags_t flags,
+ enum spci_memory_access access, enum spci_memory_type type,
+ enum spci_memory_cacheability cacheability,
+ enum spci_memory_shareability shareability)
+{
+ uint32_t constituents_length =
+ constituent_count *
+ sizeof(struct spci_memory_region_constituent);
+ uint32_t index;
+ struct spci_memory_region_constituent *region_constituents;
+ uint16_t attributes = 0;
+
+ /* Set memory region's page attributes. */
+ spci_set_memory_access_attr(&attributes, access);
+ spci_set_memory_type_attr(&attributes, type);
+ spci_set_memory_cacheability_attr(&attributes, cacheability);
+ spci_set_memory_shareability_attr(&attributes, shareability);
+
+ memory_region->tag = tag;
+ memory_region->flags = flags;
+ memory_region->sender = sender;
+ memory_region->reserved = 0;
+ memory_region->page_count = 0;
+ memory_region->constituent_count = constituent_count;
+ memory_region->attribute_count = 1;
+ memory_region->attributes[0].receiver = receiver;
+ memory_region->attributes[0].memory_attributes = attributes;
+
+ /*
+ * Constituent offset must be aligned to a 64-bit boundary so that
+ * 64-bit addresses can be copied without alignment faults.
+ */
+ memory_region->constituent_offset = align_up(
+ sizeof(struct spci_memory_region) +
+ memory_region->attribute_count *
+ sizeof(struct spci_memory_region_attributes),
+ 8);
+ region_constituents =
+ spci_memory_region_get_constituents(memory_region);
+
+ for (index = 0; index < constituent_count; index++) {
+ region_constituents[index] = constituents[index];
+ region_constituents[index].reserved = 0;
+ memory_region->page_count += constituents[index].page_count;
+ }
+
+ /*
+ * TODO: Add assert ensuring that the specified message
+ * length is not greater than SPCI_MSG_PAYLOAD_MAX.
+ */
+
+ return memory_region->constituent_offset + constituents_length;
+}