Compare commits

...

No commits in common. "fret" and "remake_fret" have entirely different histories.

9988 changed files with 3132262 additions and 158980 deletions

109
.cirrus.yml Normal file
View File

@ -0,0 +1,109 @@
env:
CIRRUS_CLONE_DEPTH: 1
windows_msys2_task:
timeout_in: 90m
windows_container:
image: cirrusci/windowsservercore:2019
os_version: 2019
cpu: 8
memory: 8G
env:
CIRRUS_SHELL: powershell
MSYS: winsymlinks:native
MSYSTEM: MINGW64
MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe
MSYS2_FINGERPRINT: 0
MSYS2_PACKAGES: "
diffutils git grep make pkg-config sed
mingw-w64-x86_64-python
mingw-w64-x86_64-python-sphinx
mingw-w64-x86_64-toolchain
mingw-w64-x86_64-SDL2
mingw-w64-x86_64-SDL2_image
mingw-w64-x86_64-gtk3
mingw-w64-x86_64-glib2
mingw-w64-x86_64-ninja
mingw-w64-x86_64-jemalloc
mingw-w64-x86_64-lzo2
mingw-w64-x86_64-zstd
mingw-w64-x86_64-libjpeg-turbo
mingw-w64-x86_64-pixman
mingw-w64-x86_64-libgcrypt
mingw-w64-x86_64-libpng
mingw-w64-x86_64-libssh
mingw-w64-x86_64-snappy
mingw-w64-x86_64-libusb
mingw-w64-x86_64-usbredir
mingw-w64-x86_64-libtasn1
mingw-w64-x86_64-nettle
mingw-w64-x86_64-cyrus-sasl
mingw-w64-x86_64-curl
mingw-w64-x86_64-gnutls
mingw-w64-x86_64-libnfs
"
CHERE_INVOKING: 1
msys2_cache:
folder: C:\tools\archive
reupload_on_changes: false
# These env variables are used to generate fingerprint to trigger the cache procedure
# If wanna to force re-populate msys2, increase MSYS2_FINGERPRINT
fingerprint_script:
- |
echo $env:CIRRUS_TASK_NAME
echo $env:MSYS2_URL
echo $env:MSYS2_FINGERPRINT
echo $env:MSYS2_PACKAGES
populate_script:
- |
md -Force C:\tools\archive\pkg
$start_time = Get-Date
bitsadmin /transfer msys_download /dynamic /download /priority FOREGROUND $env:MSYS2_URL C:\tools\archive\base.exe
Write-Output "Download time taken: $((Get-Date).Subtract($start_time))"
cd C:\tools
C:\tools\archive\base.exe -y
del -Force C:\tools\archive\base.exe
Write-Output "Base install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
((Get-Content -path C:\tools\msys64\etc\\post-install\\07-pacman-key.post -Raw) -replace '--refresh-keys', '--version') | Set-Content -Path C:\tools\msys64\etc\\post-install\\07-pacman-key.post
C:\tools\msys64\usr\bin\bash.exe -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf"
C:\tools\msys64\usr\bin\bash.exe -lc "export"
C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Sy
echo Y | C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Suu --overwrite=*
taskkill /F /FI "MODULES eq msys-2.0.dll"
tasklist
C:\tools\msys64\usr\bin\bash.exe -lc "mv -f /etc/pacman.conf.pacnew /etc/pacman.conf || true"
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -Syuu --overwrite=*"
Write-Output "Core install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -S --needed $env:MSYS2_PACKAGES"
Write-Output "Package install time taken: $((Get-Date).Subtract($start_time))"
$start_time = Get-Date
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\etc\mtab
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\fd
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stderr
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdin
del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdout
del -Force -Recurse -ErrorAction SilentlyContinue C:\tools\msys64\var\cache\pacman\pkg
tar cf C:\tools\archive\msys64.tar -C C:\tools\ msys64
Write-Output "Package archive time taken: $((Get-Date).Subtract($start_time))"
del -Force -Recurse -ErrorAction SilentlyContinue c:\tools\msys64
install_script:
- |
$start_time = Get-Date
cd C:\tools
ls C:\tools\archive\msys64.tar
tar xf C:\tools\archive\msys64.tar
Write-Output "Extract msys2 time taken: $((Get-Date).Subtract($start_time))"
script:
- C:\tools\msys64\usr\bin\bash.exe -lc "mkdir build"
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && ../configure --python=python3"
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make -j8"
- exit $LastExitCode
test_script:
- C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make V=1 check"
- exit $LastExitCode

View File

@ -1,148 +0,0 @@
---
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: true
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
AllowShortIfStatementsOnASingleLine: true
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeCategories:
- Regex: '^<ext/.*\.h>'
Priority: 2
- Regex: '^<.*\.h>'
Priority: 1
- Regex: '^<.*'
Priority: 2
- Regex: '.*'
Priority: 3
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: BeforeHash
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Right
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
BasedOnStyle: google
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
ReflowComments: true
SortIncludes: false
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Auto
TabWidth: 8
UseTab: Never
...

View File

@ -1,29 +0,0 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.191.0/containers/docker-existing-dockerfile
{
"name": "LibAFL Dockerfile",
// Sets the run context to one level up instead of the .devcontainer folder.
"context": "..",
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
"dockerFile": "../Dockerfile",
// Set *default* container specific settings.json values on container create.
"settings": {},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"matklad.rust-analyzer"
],
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line to run commands after the container is created - for example installing curl.
// "postCreateCommand": "apt-get update && apt-get install -y curl",
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
"runArgs": [
"--cap-add=SYS_PTRACE",
"--security-opt",
"seccomp=unconfined"
],
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
// "remoteUser": "vscode"
}

2
.dir-locals.el Normal file
View File

@ -0,0 +1,2 @@
((c-mode . ((c-file-style . "stroustrup")
(indent-tabs-mode . nil))))

View File

@ -1,35 +0,0 @@
target
Cargo.lock
*.o
*.a
*.so
*.out
*.elf
*.bin
*.dll
*.exe
*.dSYM
.cur_input
crashes
callgrind.out.*
perf.data
perf.data.old
.vscode
test.dict
# Ignore all built fuzzers
fuzzer_*
AFLplusplus
# Ignore common dummy and logfiles
*.log
a
# ignore files from concolic tests
symcc_build
symcc

49
.editorconfig Normal file
View File

@ -0,0 +1,49 @@
# EditorConfig is a file format and collection of text editor plugins
# for maintaining consistent coding styles between different editors
# and IDEs. Most popular editors support this either natively or via
# plugin.
#
# Check https://editorconfig.org for details.
#
# Emacs: you need https://github.com/10sr/editorconfig-custom-majormode-el
# to automatically enable the appropriate major-mode for your files
# that aren't already caught by your existing config.
#
root = true
[*]
end_of_line = lf
insert_final_newline = true
charset = utf-8
[*.mak]
indent_style = tab
indent_size = 8
emacs_mode = makefile
[Makefile*]
indent_style = tab
indent_size = 8
emacs_mode = makefile
[*.{c,h,c.inc,h.inc}]
indent_style = space
indent_size = 4
emacs_mode = c
[*.sh]
indent_style = space
indent_size = 4
[*.{s,S}]
indent_style = tab
indent_size = 8
emacs_mode = asm
[*.{vert,frag}]
emacs_mode = glsl
[*.json]
indent_style = space
emacs_mode = python

7
.exrc Normal file
View File

@ -0,0 +1,7 @@
"VIM settings to match QEMU coding style. They are activated by adding the
"following settings (without the " symbol) as last two lines in $HOME/.vimrc:
"set secure
"set exrc
set expandtab
set shiftwidth=4
set smarttab

8
.gdbinit Normal file
View File

@ -0,0 +1,8 @@
# GDB may have ./.gdbinit loading disabled by default. In that case you can
# follow the instructions it prints. They boil down to adding the following to
# your home directory's ~/.gdbinit file:
#
# add-auto-load-safe-path /path/to/qemu/.gdbinit
# Load QEMU-specific sub-commands and settings
source scripts/qemu-gdb.py

4
.gitattributes vendored Normal file
View File

@ -0,0 +1,4 @@
*.c.inc diff=c
*.h.inc diff=c
*.m diff=objc
*.py diff=python

13
.github/FUNDING.yml vendored
View File

@ -1,13 +0,0 @@
# These are supported funding model platforms
# Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
github: AFLplusplus
patreon: # Replace with a single Patreon username
open_collective: AFLplusplusEU
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

View File

@ -1,29 +0,0 @@
---
name: Bug Report
about: Create a report to help us improve
title: ''
labels: 'bug'
assignees: ''
---
**IMPORTANT**
1. You have verified that the issue to be present in the current `main` branch
Thank you for making LibAFL better!
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. ...
2. ...
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screen output/Screenshots**
If applicable, add copy-paste of the screen output or screenshot that shows the issue. Please ensure the output is in **English** and not in Chinese, Russian, German, etc.
**Additional context**
Add any other context about the problem here.

View File

@ -1,8 +0,0 @@
---
name: Empty
about: A question or issue that doesn't fit the templates
title: ''
labels: ''
assignees: ''
---

View File

@ -1,20 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: 'enhancement'
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@ -1,331 +0,0 @@
name: build and test
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
env:
CARGO_TERM_COLOR: always
jobs:
common:
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
- name: install mdbook
uses: baptiste0928/cargo-install@v1.3.0
with:
crate: mdbook
- name: install linkcheck
uses: baptiste0928/cargo-install@v1.3.0
with:
crate: mdbook-linkcheck
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Install mimetype
if: runner.os == 'Linux'
run: sudo apt-get install libfile-mimeinfo-perl
- name: Check for binary blobs
if: runner.os == 'Linux'
run: ./scripts/check_for_blobs.sh
- name: Build libafl debug
run: cargo build -p libafl
- name: Build the book
run: cd docs && mdbook build
- name: Test the book
# TODO: fix books test fail with updated windows-rs
if: runner.os != 'Windows'
run: cd docs && mdbook test -L ../target/debug/deps
- name: Run tests
run: cargo test
- name: Test libafl no_std
run: cd libafl && cargo test --no-default-features
- name: Test libafl_targets no_std
run: cd libafl_targets && cargo test --no-default-features
ubuntu:
runs-on: ubuntu-22.04
steps:
- name: Remove Dotnet & Haskell
run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- name: set mold linker as default linker
uses: rui314/setup-mold@v1
- name: Install and cache deps
uses: awalsh128/cache-apt-pkgs-action@v1.1.0
with:
packages: llvm llvm-dev clang ninja-build clang-format-13 shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev
- name: get clang version
run: command -v llvm-config && clang -v
- name: Install cargo-hack
run: curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin
- name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
# ---- format check ----
# pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately
- name: Check pcguard edges
run: cargo check --features=sancov_pcguard_edges
- name: Format
run: cargo fmt -- --check
- name: Run clang-format style check for C/C++ programs.
run: clang-format-13 -n -Werror --style=file $(find . -type f \( -name '*.cpp' -o -iname '*.hpp' -o -name '*.cc' -o -name '*.cxx' -o -name '*.cc' -o -name '*.h' \) | grep -v '/target/' | grep -v 'libpng-1\.6\.37' | grep -v 'stb_image\.h' | grep -v 'dlmalloc\.c' | grep -v 'QEMU-Nyx')
- name: run shellcheck
run: shellcheck ./scripts/*.sh
- name: Run clippy
run: ./scripts/clippy.sh
# ---- doc check ----
- name: Build Docs
run: cargo doc
- name: Test Docs
run: cargo +nightly test --doc --all-features
# ---- build and feature check ----
- name: Run a normal build
run: cargo build --verbose
# cargo-hack's --feature-powerset would be nice here but libafl has a too many knobs
- name: Check each feature
# Skipping `python` as it has to be built with the `maturin` tool
# `agpl`, `nautilus` require nightly
# `sancov_pcguard_edges` is tested seperately
run: cargo hack check --each-feature --clean-per-run --exclude-features=prelude,agpl,nautilus,python,sancov_pcguard_edges,arm,aarch64,i386,be,systemmode --no-dev-deps
- name: Check nightly features
run: cargo +nightly check --features=agpl && cargo +nightly check --features=nautilus
- name: Build examples
run: cargo build --examples --verbose
ubuntu-concolic:
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Install smoke test deps
run: sudo ./libafl_concolic/test/smoke_test_ubuntu_deps.sh
- name: Run smoke test
run: ./libafl_concolic/test/smoke_test.sh
bindings:
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- name: set mold linker as default linker
uses: rui314/setup-mold@v1
- name: Install deps
run: sudo apt-get install -y llvm llvm-dev clang ninja-build python3-dev python3-pip python3-venv
- name: Install maturin
run: python3 -m pip install maturin
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Run a maturin build
run: cd ./bindings/pylibafl && maturin build
fuzzers:
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- name: set mold linker as default linker
if: runner.os == 'Linux' # mold only support linux until now
uses: rui314/setup-mold@v1
- name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- name: Add no_std toolchain
run: rustup toolchain install nightly-x86_64-unknown-linux-gnu ; rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu
- name: Install python
if: runner.os == 'macOS'
run: brew install --force-bottle --overwrite python@3.11
- uses: lyricwulf/abc@v1
with:
# todo: remove afl++-clang when nyx support samcov_pcguard
linux: llvm llvm-dev clang nasm ninja-build gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libgtk-3-dev afl++-clang pax-utils
# update bash for macos to support `declare -A` command`
macos: llvm libpng nasm coreutils z3 bash
- name: pip install
run: python3 -m pip install msgpack jinja2
# Note that nproc needs to have coreutils installed on macOS, so the order of CI commands matters.
- name: enable mult-thread for `make`
run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)"
- name: install cargo-make
uses: baptiste0928/cargo-install@v1.3.0
with:
crate: cargo-make
- uses: actions/checkout@v3
with:
submodules: true # recursively checkout submodules
- uses: Swatinem/rust-cache@v2
- name: Build and run example fuzzers (Linux)
if: runner.os == 'Linux'
run: ./scripts/test_all_fuzzers.sh
- name: Build and run example fuzzers (macOS)
if: runner.os == 'macOS' # use bash v4
run: /usr/local/bin/bash ./scripts/test_all_fuzzers.sh
nostd-build:
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
components: rustfmt, clippy, rust-src
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Add targets
run: rustup target add arm-linux-androideabi && rustup target add thumbv6m-none-eabi
- name: Build aarch64-unknown-none
run: cd ./fuzzers/baby_no_std && cargo +nightly build -Zbuild-std=core,alloc --target aarch64-unknown-none -v --release && cd ../..
- name: run x86_64 until panic!
run: cd ./fuzzers/baby_no_std && cargo +nightly run || test $? -ne 0 || exit 1
- name: no_std tests
run: cd ./libafl && cargo test --no-default-features
- name: libafl armv6m-none-eabi (32 bit no_std) clippy
run: cd ./libafl && cargo clippy --target thumbv6m-none-eabi --no-default-features
build-docker:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Build docker
run: docker build -t libafl .
windows:
runs-on: windows-latest
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Windows Build
run: cargo build --verbose
- name: Run clippy
uses: actions-rs/cargo@v1
with:
command: clippy
- name: Build docs
run: cargo doc
- name: Set LIBCLANG_PATH
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
- name: install cargo-make
run: cargo install --force cargo-make
- uses: ilammy/msvc-dev-cmd@v1
- name: Build fuzzers/frida_libpng
run: cd fuzzers/frida_libpng/ && cargo make test
- name: Build fuzzers/frida_gdiplus
run: cd fuzzers/frida_gdiplus/ && cargo make test
macos:
runs-on: macOS-latest
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- name: Install deps
run: brew install z3 gtk+3
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: MacOS Build
run: cargo build --verbose
- name: Run clippy
run: ./scripts/clippy.sh
- name: Increase map sizes
run: ./scripts/shmem_limits_macos.sh
- name: Run Tests
run: cargo test
other_targets:
runs-on: macOS-latest
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: nttld/setup-ndk@v1
with:
ndk-version: r21e
- name: install ios
run: rustup target add aarch64-apple-ios
- name: install android
run: rustup target add aarch64-linux-android
- name: install cargo ndk
run: cargo install cargo-ndk
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Build iOS
run: cargo build --target aarch64-apple-ios
- name: Build Android
run: cargo ndk -t arm64-v8a build --release
#run: cargo build --target aarch64-linux-android
# TODO: Figure out how to properly build stuff with clang
#- name: Add clang path to $PATH env
# if: runner.os == 'Windows'
# run: echo "C:\msys64\mingw64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8
#- name: Try if clang works
# run: clang -v
#- name: Windows Test
# run: C:\Rust\.cargo\bin\cargo.exe test --verbose
freebsd:
runs-on: macos-12
name: Simple build in FreeBSD
steps:
- uses: actions/checkout@v3
- name: Test in FreeBSD
id: test
uses: vmactions/freebsd-vm@v0
with:
usesh: true
sync: rsync
copyback: false
mem: 2048
release: 13.1
prepare: |
pkg install -y curl bash sudo llvm14
curl https://sh.rustup.rs -sSf | sh -s -- -y
run: |
freebsd-version
. "$HOME/.cargo/env"
rustup toolchain install nightly
export LLVM_CONFIG=/usr/local/bin/llvm-config14
pwd
ls -lah
echo "local/bin"
ls -lah /usr/local/bin/
which llvm-config
chmod +x ./scripts/clippy.sh
bash ./scripts/shmem_limits_fbsd.sh
bash ./scripts/clippy.sh
cargo test

78
.gitignore vendored
View File

@ -1,56 +1,24 @@
target
target-bin
out
Cargo.lock
vendor
.DS_Store
.env
*.tmp
/qemu_libafl_bridge/Cargo.lock
/qemu_libafl_bridge/target/
/GNUmakefile
/build/
/.cache/
/.vscode/
*.pyc
.sdk
.stgit-*
.git-submodule-status
.clang-format
.gdb_history
cscope.*
tags
TAGS
GPATH
GRTAGS
GTAGS
*~
*.ast_raw
*.depend_raw
*.swp
*.o
*.a
*.so
*.out
*.elf
*.bin
*.dll
*.exe
*.dSYM
*.obj
.cur_input
.venv
crashes
callgrind.out.*
perf.data
perf.data.old
.vscode
test.dict
# Ignore all built fuzzers
fuzzer_*
AFLplusplus
test_*
*_fuzzer
*_harness
# Ignore common dummy and logfiles
*.log
a
forkserver_test
__pycache__
*.lafl_lock
*atomic_file_testfile*
**/libxml2
**/corpus_discovered
**/libxml2-*.tar.gz
libafl_nyx/QEMU-Nyx
libafl_nyx/packer
*.patch
*.gcov

72
.gitlab-ci.d/base.yml Normal file
View File

@ -0,0 +1,72 @@
# The order of rules defined here is critically important.
# They are evaluated in order and first match wins.
#
# Thus we group them into a number of stages, ordered from
# most restrictive to least restrictive
#
.base_job_template:
rules:
#############################################################
# Stage 1: exclude scenarios where we definitely don't
# want jobs to run
#############################################################
# Cirrus jobs can't run unless the creds / target repo are set
- if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)'
when: never
# Publishing jobs should only run on the default branch in upstream
- if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
when: never
# Non-publishing jobs should only run on staging branches in upstream
- if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH !~ /staging/'
when: never
# Jobs only intended for forks should always be skipped on upstream
- if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == "qemu-project"'
when: never
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never
# Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set
- if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never
#############################################################
# Stage 2: fine tune execution of jobs in specific scenarios
# where the catch all logic is inapprorpaite
#############################################################
# Optional jobs should not be run unless manually triggered
- if: '$QEMU_JOB_OPTIONAL'
when: manual
allow_failure: true
# Skipped jobs should not be run unless manually triggered
- if: '$QEMU_JOB_SKIPPED'
when: manual
allow_failure: true
# Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset
- if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: manual
allow_failure: true
#############################################################
# Stage 3: catch all logic applying to any job not matching
# an earlier criteria
#############################################################
# Forks pipeline jobs don't start automatically unless
# QEMU_CI=2 is set
- if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: manual
# Jobs can run if any jobs they depend on were successfull
- when: on_success

View File

@ -0,0 +1,83 @@
.native_build_job_template:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
before_script:
- JOBS=$(expr $(nproc) + 1)
script:
- if test -n "$LD_JOBS";
then
scripts/git-submodule.sh update meson ;
fi
- mkdir build
- cd build
- if test -n "$TARGETS";
then
../configure --enable-werror --disable-docs ${LD_JOBS:+--meson=git} $CONFIGURE_ARGS --target-list="$TARGETS" ;
else
../configure --enable-werror --disable-docs ${LD_JOBS:+--meson=git} $CONFIGURE_ARGS ;
fi || { cat config.log meson-logs/meson-log.txt && exit 1; }
- if test -n "$LD_JOBS";
then
../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ;
fi || exit 1;
- make -j"$JOBS"
- if test -n "$MAKE_CHECK_ARGS";
then
make -j"$JOBS" $MAKE_CHECK_ARGS ;
fi
.common_test_job_template:
extends: .base_job_template
stage: test
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
script:
- scripts/git-submodule.sh update
$(sed -n '/GIT_SUBMODULES=/ s/.*=// p' build/config-host.mak)
- cd build
- find . -type f -exec touch {} +
# Avoid recompiling by hiding ninja with NINJA=":"
- make NINJA=":" $MAKE_CHECK_ARGS
.native_test_job_template:
extends: .common_test_job_template
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
expire_in: 7 days
paths:
- build/meson-logs/testlog.txt
reports:
junit: build/meson-logs/testlog.junit.xml
.avocado_test_job_template:
extends: .common_test_job_template
cache:
key: "${CI_JOB_NAME}-cache"
paths:
- ${CI_PROJECT_DIR}/avocado-cache
policy: pull-push
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: on_failure
expire_in: 7 days
paths:
- build/tests/results/latest/results.xml
- build/tests/results/latest/test-results
reports:
junit: build/tests/results/latest/results.xml
before_script:
- mkdir -p ~/.config/avocado
- echo "[datadir.paths]" > ~/.config/avocado/avocado.conf
- echo "cache_dirs = ['${CI_PROJECT_DIR}/avocado-cache']"
>> ~/.config/avocado/avocado.conf
- echo -e '[job.output.testlogs]\nstatuses = ["FAIL", "INTERRUPT"]'
>> ~/.config/avocado/avocado.conf
- if [ -d ${CI_PROJECT_DIR}/avocado-cache ]; then
du -chs ${CI_PROJECT_DIR}/avocado-cache ;
fi
- export AVOCADO_ALLOW_UNTRUSTED_CODE=1
after_script:
- cd build
- du -chs ${CI_PROJECT_DIR}/avocado-cache
variables:
QEMU_JOB_AVOCADO: 1

640
.gitlab-ci.d/buildtest.yml Normal file
View File

@ -0,0 +1,640 @@
include:
- local: '/.gitlab-ci.d/buildtest-template.yml'
build-system-alpine:
extends: .native_build_job_template
needs:
- job: amd64-alpine-container
variables:
IMAGE: alpine
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog
artifacts:
expire_in: 2 days
paths:
- .git-submodule-status
- build
check-system-alpine:
extends: .native_test_job_template
needs:
- job: build-system-alpine
artifacts: true
variables:
IMAGE: alpine
MAKE_CHECK_ARGS: check-unit check-qtest
avocado-system-alpine:
extends: .avocado_test_job_template
needs:
- job: build-system-alpine
artifacts: true
variables:
IMAGE: alpine
MAKE_CHECK_ARGS: check-avocado
build-system-ubuntu:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-docs --enable-fdt=system --enable-capstone
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-ubuntu:
extends: .native_test_job_template
needs:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check
avocado-system-ubuntu:
extends: .avocado_test_job_template
needs:
- job: build-system-ubuntu
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check-avocado
build-system-debian:
extends: .native_build_job_template
needs:
job: amd64-debian-container
variables:
IMAGE: debian-amd64
TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu
riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-debian:
extends: .native_test_job_template
needs:
- job: build-system-debian
artifacts: true
variables:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check
avocado-system-debian:
extends: .avocado_test_job_template
needs:
- job: build-system-debian
artifacts: true
variables:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check-avocado
crash-test-debian:
extends: .native_test_job_template
needs:
- job: build-system-debian
artifacts: true
variables:
IMAGE: debian-amd64
script:
- cd build
- make check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-i386
build-system-fedora:
extends: .native_build_job_template
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
--enable-fdt=system --enable-slirp --enable-capstone
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-fedora:
extends: .native_test_job_template
needs:
- job: build-system-fedora
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
avocado-system-fedora:
extends: .avocado_test_job_template
needs:
- job: build-system-fedora
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-avocado
crash-test-fedora:
extends: .native_test_job_template
needs:
- job: build-system-fedora
artifacts: true
variables:
IMAGE: fedora
script:
- cd build
- make check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
build-system-centos:
extends: .native_build_job_template
needs:
job: amd64-centos8-container
variables:
IMAGE: centos8
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-fdt=system
--enable-modules --enable-trace-backends=dtrace --enable-docs
--enable-vfio-user-server
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-centos:
extends: .native_test_job_template
needs:
- job: build-system-centos
artifacts: true
variables:
IMAGE: centos8
MAKE_CHECK_ARGS: check
avocado-system-centos:
extends: .avocado_test_job_template
needs:
- job: build-system-centos
artifacts: true
variables:
IMAGE: centos8
MAKE_CHECK_ARGS: check-avocado
build-system-opensuse:
extends: .native_build_job_template
needs:
job: amd64-opensuse-leap-container
variables:
IMAGE: opensuse-leap
CONFIGURE_ARGS: --enable-fdt=system
TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
expire_in: 2 days
paths:
- build
check-system-opensuse:
extends: .native_test_job_template
needs:
- job: build-system-opensuse
artifacts: true
variables:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check
avocado-system-opensuse:
extends: .avocado_test_job_template
needs:
- job: build-system-opensuse
artifacts: true
variables:
IMAGE: opensuse-leap
MAKE_CHECK_ARGS: check-avocado
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
# the configure script. The container doesn't contain Xen headers so
# Xen accelerator is not detected / selected. As result it build the
# i386-softmmu and x86_64-softmmu with KVM being the single accelerator
# available.
# Also use a different coroutine implementation (which is only really of
# interest to KVM users, i.e. with TCG disabled)
build-tcg-disabled:
extends: .native_build_job_template
needs:
job: amd64-centos8-container
variables:
IMAGE: centos8
script:
- mkdir build
- cd build
- ../configure --disable-tcg --audio-drv-list="" --with-coroutine=ucontext
|| { cat config.log meson-logs/meson-log.txt && exit 1; }
- make -j"$JOBS"
- make check-unit
- make check-qapi-schema
- cd tests/qemu-iotests/
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
170 171 183 184 192 194 208 221 226 227 236 253 277 image-fleecing
- ./check -qcow2 028 051 056 057 058 065 068 082 085 091 095 096 102 122
124 132 139 142 144 145 151 152 155 157 165 194 196 200 202
208 209 216 218 227 234 246 247 248 250 254 255 257 258
260 261 262 263 264 270 272 273 277 279 image-fleecing
build-user:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-system
MAKE_CHECK_ARGS: check-tcg
build-user-static:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-system --static
MAKE_CHECK_ARGS: check-tcg
# Because the hexagon cross-compiler takes so long to build we don't rely
# on the CI system to build it and hence this job has an optional dependency
# declared. The image is manually uploaded.
build-user-hexagon:
extends: .native_build_job_template
needs:
job: hexagon-cross-container
optional: true
variables:
IMAGE: debian-hexagon-cross
TARGETS: hexagon-linux-user
CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg
MAKE_CHECK_ARGS: check-tcg
# Only build the softmmu targets we have check-tcg tests for
build-some-softmmu:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --enable-debug
TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
MAKE_CHECK_ARGS: check-tcg
# We build tricore in a very minimal tricore only container
build-tricore-softmmu:
extends: .native_build_job_template
needs:
job: tricore-debian-cross-container
variables:
IMAGE: debian-tricore-cross
CONFIGURE_ARGS: --disable-tools --disable-fdt --enable-debug
TARGETS: tricore-softmmu
MAKE_CHECK_ARGS: check-tcg
clang-system:
extends: .native_build_job_template
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu
ppc-softmmu s390x-softmmu
MAKE_CHECK_ARGS: check-qtest check-tcg
clang-user:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
timeout: 70m
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
--target-list-exclude=microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
--extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
MAKE_CHECK_ARGS: check-unit check-tcg
# Set LD_JOBS=1 because this requires LTO and ld consumes a large amount of memory.
# On gitlab runners, default value sometimes end up calling 2 lds concurrently and
# triggers an Out-Of-Memory error
#
# Since slirp callbacks are used in QEMU Timers, we cannot use libslirp with
# CFI builds, and thus have to disable it here.
#
# Split in three sets of build/check/avocado to limit the execution time of each
# job
build-cfi-aarch64:
extends: .native_build_job_template
needs:
- job: amd64-fedora-container
variables:
LD_JOBS: 1
AR: llvm-ar
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --disable-slirp
TARGETS: aarch64-softmmu
MAKE_CHECK_ARGS: check-build
# FIXME: This job is often failing, likely due to out-of-memory problems in
# the constrained containers of the shared runners. Thus this is marked as
# skipped until the situation has been solved.
QEMU_JOB_SKIPPED: 1
timeout: 90m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-aarch64:
extends: .native_test_job_template
needs:
- job: build-cfi-aarch64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
avocado-cfi-aarch64:
extends: .avocado_test_job_template
needs:
- job: build-cfi-aarch64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-avocado
build-cfi-ppc64-s390x:
extends: .native_build_job_template
needs:
- job: amd64-fedora-container
variables:
LD_JOBS: 1
AR: llvm-ar
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --disable-slirp
TARGETS: ppc64-softmmu s390x-softmmu
MAKE_CHECK_ARGS: check-build
# FIXME: This job is often failing, likely due to out-of-memory problems in
# the constrained containers of the shared runners. Thus this is marked as
# skipped until the situation has been solved.
QEMU_JOB_SKIPPED: 1
timeout: 80m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-ppc64-s390x:
extends: .native_test_job_template
needs:
- job: build-cfi-ppc64-s390x
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
avocado-cfi-ppc64-s390x:
extends: .avocado_test_job_template
needs:
- job: build-cfi-ppc64-s390x
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-avocado
build-cfi-x86_64:
extends: .native_build_job_template
needs:
- job: amd64-fedora-container
variables:
LD_JOBS: 1
AR: llvm-ar
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
--enable-safe-stack --disable-slirp
TARGETS: x86_64-softmmu
MAKE_CHECK_ARGS: check-build
timeout: 70m
artifacts:
expire_in: 2 days
paths:
- build
check-cfi-x86_64:
extends: .native_test_job_template
needs:
- job: build-cfi-x86_64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
avocado-cfi-x86_64:
extends: .avocado_test_job_template
needs:
- job: build-cfi-x86_64
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-avocado
tsan-build:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10
--enable-trace-backends=ust --enable-fdt=system --disable-slirp
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
MAKE_CHECK_ARGS: bench V=1
# gprof/gcov are GCC features
build-gprof-gcov:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-gprof --enable-gcov
TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
artifacts:
expire_in: 1 days
paths:
- build
check-gprof-gcov:
extends: .native_test_job_template
needs:
- job: build-gprof-gcov
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check
after_script:
- ${CI_PROJECT_DIR}/scripts/ci/coverage-summary.sh
build-oss-fuzz:
extends: .native_build_job_template
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
script:
- mkdir build-oss-fuzz
- CC="clang" CXX="clang++" CFLAGS="-fsanitize=address"
./scripts/oss-fuzz/build.sh
- export ASAN_OPTIONS="fast_unwind_on_malloc=0"
- for fuzzer in $(find ./build-oss-fuzz/DEST_DIR/ -executable -type f
| grep -v slirp); do
grep "LLVMFuzzerTestOneInput" ${fuzzer} > /dev/null 2>&1 || continue ;
echo Testing ${fuzzer} ... ;
"${fuzzer}" -runs=1 -seed=1 || exit 1 ;
done
build-tci:
extends: .native_build_job_template
needs:
job: amd64-debian-user-cross-container
variables:
IMAGE: debian-all-test-cross
script:
- TARGETS="aarch64 alpha arm hppa m68k microblaze ppc64 s390x x86_64"
- mkdir build
- cd build
- ../configure --enable-tcg-interpreter
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)" || { cat config.log meson-logs/meson-log.txt && exit 1; }
- make -j"$JOBS"
- make tests/qtest/boot-serial-test tests/qtest/cdrom-test tests/qtest/pxe-test
- for tg in $TARGETS ; do
export QTEST_QEMU_BINARY="./qemu-system-${tg}" ;
./tests/qtest/boot-serial-test || exit 1 ;
./tests/qtest/cdrom-test || exit 1 ;
done
- QTEST_QEMU_BINARY="./qemu-system-x86_64" ./tests/qtest/pxe-test
- QTEST_QEMU_BINARY="./qemu-system-s390x" ./tests/qtest/pxe-test -m slow
- make check-tcg
# Alternate coroutines implementations are only really of interest to KVM users
# However we can't test against KVM on Gitlab-CI so we can only run unit tests
build-coroutine-sigaltstack:
extends: .native_build_job_template
needs:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --with-coroutine=sigaltstack --disable-tcg
--enable-trace-backends=ftrace
MAKE_CHECK_ARGS: check-unit
# Check our reduced build configurations
build-without-default-devices:
extends: .native_build_job_template
needs:
job: amd64-centos8-container
variables:
IMAGE: centos8
CONFIGURE_ARGS: --without-default-devices --disable-user
build-without-default-features:
extends: .native_build_job_template
needs:
job: amd64-fedora-container
variables:
IMAGE: fedora
CONFIGURE_ARGS:
--without-default-features
--disable-capstone
--disable-pie
--disable-qom-cast-debug
--disable-strip
TARGETS: avr-softmmu i386-softmmu mips64-softmmu s390x-softmmu sh4-softmmu
sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user
MAKE_CHECK_ARGS: check-unit check-qtest SPEED=slow
build-libvhost-user:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/fedora:latest
needs:
job: amd64-fedora-container
script:
- mkdir subprojects/libvhost-user/build
- cd subprojects/libvhost-user/build
- meson
- ninja
# No targets are built here, just tools, docs, and unit tests. This
# also feeds into the eventual documentation deployment steps later
build-tools-and-docs-debian:
extends: .native_build_job_template
needs:
job: amd64-debian-container
# when running on 'master' we use pre-existing container
optional: true
variables:
IMAGE: debian-amd64
MAKE_CHECK_ARGS: check-unit ctags TAGS cscope
CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
QEMU_JOB_PUBLISH: 1
artifacts:
expire_in: 2 days
paths:
- build
# Prepare for GitLab pages deployment. Anything copied into the
# "public" directory will be deployed to $USER.gitlab.io/$PROJECT
#
# GitLab publishes from any branch that triggers a CI pipeline
#
# For the main repo we don't want to publish from 'staging'
# since that content may not be pushed, nor do we wish to
# publish from 'stable-NNN' branches as that content is outdated.
# Thus we restrict to just the default branch
#
# For contributor forks we want to publish from any repo so
# that users can see the results of their commits, regardless
# of what topic branch they're currently using
pages:
extends: .base_job_template
image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:latest
stage: test
needs:
- job: build-tools-and-docs-debian
script:
- mkdir -p public
# HTML-ised source tree
- make gtags
- htags -anT --tree-view=filetree -m qemu_init
-t "Welcome to the QEMU sourcecode"
- mv HTML public/src
# Project documentation
- make -C build install DESTDIR=$(pwd)/temp-install
- mv temp-install/usr/local/share/doc/qemu/* public/
artifacts:
paths:
- public
variables:
QEMU_JOB_PUBLISH: 1

94
.gitlab-ci.d/check-dco.py Executable file
View File

@ -0,0 +1,94 @@
#!/usr/bin/env python3
#
# check-dco.py: validate all commits are signed off
#
# Copyright (C) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: GPL-2.0-or-later
import os
import os.path
import sys
import subprocess
namespace = "qemu-project"
if len(sys.argv) >= 2:
namespace = sys.argv[1]
cwd = os.getcwd()
reponame = os.path.basename(cwd)
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
subprocess.check_call(["git", "remote", "add", "check-dco", repourl])
subprocess.check_call(["git", "fetch", "check-dco", "master"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
ancestor = subprocess.check_output(["git", "merge-base",
"check-dco/master", "HEAD"],
universal_newlines=True)
ancestor = ancestor.strip()
subprocess.check_call(["git", "remote", "rm", "check-dco"])
errors = False
print("\nChecking for 'Signed-off-by: NAME <EMAIL>' " +
"on all commits since %s...\n" % ancestor)
log = subprocess.check_output(["git", "log", "--format=%H %s",
ancestor + "..."],
universal_newlines=True)
if log == "":
commits = []
else:
commits = [[c[0:40], c[41:]] for c in log.strip().split("\n")]
for sha, subject in commits:
msg = subprocess.check_output(["git", "show", "-s", sha],
universal_newlines=True)
lines = msg.strip().split("\n")
print("🔍 %s %s" % (sha, subject))
sob = False
for line in lines:
if "Signed-off-by:" in line:
sob = True
if "localhost" in line:
print(" ❌ FAIL: bad email in %s" % line)
errors = True
if not sob:
print(" ❌ FAIL missing Signed-off-by tag")
errors = True
if errors:
print("""
ERROR: One or more commits are missing a valid Signed-off-By tag.
This project requires all contributors to assert that their contributions
are provided in compliance with the terms of the Developer's Certificate
of Origin 1.1 (DCO):
https://developercertificate.org/
To indicate acceptance of the DCO every commit must have a tag
Signed-off-by: REAL NAME <EMAIL>
This can be achieved by passing the "-s" flag to the "git commit" command.
To bulk update all commits on current branch "git rebase" can be used:
git rebase -i master -x 'git commit --amend --no-edit -s'
""")
sys.exit(1)
sys.exit(0)

56
.gitlab-ci.d/check-patch.py Executable file
View File

@ -0,0 +1,56 @@
#!/usr/bin/env python3
#
# check-patch.py: run checkpatch.pl across all commits in a branch
#
# Copyright (C) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: GPL-2.0-or-later
import os
import os.path
import sys
import subprocess
namespace = "qemu-project"
if len(sys.argv) >= 2:
namespace = sys.argv[1]
cwd = os.getcwd()
reponame = os.path.basename(cwd)
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
# GitLab CI environment does not give us any direct info about the
# base for the user's branch. We thus need to figure out a common
# ancestor between the user's branch and current git master.
subprocess.check_call(["git", "remote", "add", "check-patch", repourl])
subprocess.check_call(["git", "fetch", "check-patch", "master"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
ancestor = subprocess.check_output(["git", "merge-base",
"check-patch/master", "HEAD"],
universal_newlines=True)
ancestor = ancestor.strip()
log = subprocess.check_output(["git", "log", "--format=%H %s",
ancestor + "..."],
universal_newlines=True)
subprocess.check_call(["git", "remote", "rm", "check-patch"])
if log == "":
print("\nNo commits since %s, skipping checks\n" % ancestor)
sys.exit(0)
errors = False
print("\nChecking all commits since %s...\n" % ancestor, flush=True)
ret = subprocess.run(["scripts/checkpatch.pl", "--terse", ancestor + "..."])
if ret.returncode != 0:
print(" ❌ FAIL one or more commits failed scripts/checkpatch.pl")
sys.exit(1)
sys.exit(0)

123
.gitlab-ci.d/cirrus.yml Normal file
View File

@ -0,0 +1,123 @@
# Jobs that we delegate to Cirrus CI because they require an operating
# system other than Linux. These jobs will only run if the required
# setup has been performed on the GitLab account.
#
# The Cirrus CI configuration is generated by replacing target-specific
# variables in a generic template: some of these variables are provided
# when the GitLab CI job is defined, others are taken from a shell
# snippet generated using lcitool.
#
# Note that the $PATH environment variable has to be treated with
# special care, because we can't just override it at the GitLab CI job
# definition level or we risk breaking it completely.
.cirrus_build_job:
extends: .base_job_template
stage: build
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
needs: []
timeout: 80m
allow_failure: true
script:
- source .gitlab-ci.d/cirrus/$NAME.vars
- sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
-e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g"
-e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g"
-e "s|[@]CIRRUS_VM_INSTANCE_TYPE@|$CIRRUS_VM_INSTANCE_TYPE|g"
-e "s|[@]CIRRUS_VM_IMAGE_SELECTOR@|$CIRRUS_VM_IMAGE_SELECTOR|g"
-e "s|[@]CIRRUS_VM_IMAGE_NAME@|$CIRRUS_VM_IMAGE_NAME|g"
-e "s|[@]CIRRUS_VM_CPUS@|$CIRRUS_VM_CPUS|g"
-e "s|[@]CIRRUS_VM_RAM@|$CIRRUS_VM_RAM|g"
-e "s|[@]UPDATE_COMMAND@|$UPDATE_COMMAND|g"
-e "s|[@]INSTALL_COMMAND@|$INSTALL_COMMAND|g"
-e "s|[@]PATH@|$PATH_EXTRA${PATH_EXTRA:+:}\$PATH|g"
-e "s|[@]PKG_CONFIG_PATH@|$PKG_CONFIG_PATH|g"
-e "s|[@]PKGS@|$PKGS|g"
-e "s|[@]MAKE@|$MAKE|g"
-e "s|[@]PYTHON@|$PYTHON|g"
-e "s|[@]PIP3@|$PIP3|g"
-e "s|[@]PYPI_PKGS@|$PYPI_PKGS|g"
-e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g"
-e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g"
<.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml
- cat .gitlab-ci.d/cirrus/$NAME.yml
- cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml
variables:
QEMU_JOB_CIRRUS: 1
x64-freebsd-12-build:
extends: .cirrus_build_job
variables:
NAME: freebsd-12
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-12-3
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update
INSTALL_COMMAND: pkg install -y
TEST_TARGETS: check
x64-freebsd-13-build:
extends: .cirrus_build_job
variables:
NAME: freebsd-13
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
CIRRUS_VM_IMAGE_SELECTOR: image_family
CIRRUS_VM_IMAGE_NAME: freebsd-13-1
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
UPDATE_COMMAND: pkg update
INSTALL_COMMAND: pkg install -y
TEST_TARGETS: check
x64-macos-11-base-build:
extends: .cirrus_build_job
variables:
NAME: macos-11
CIRRUS_VM_INSTANCE_TYPE: osx_instance
CIRRUS_VM_IMAGE_SELECTOR: image
CIRRUS_VM_IMAGE_NAME: big-sur-base
CIRRUS_VM_CPUS: 12
CIRRUS_VM_RAM: 24G
UPDATE_COMMAND: brew update
INSTALL_COMMAND: brew install
PATH_EXTRA: /usr/local/opt/ccache/libexec:/usr/local/opt/gettext/bin
PKG_CONFIG_PATH: /usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/ncurses/lib/pkgconfig:/usr/local/opt/readline/lib/pkgconfig
TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64
# The following jobs run VM-based tests via KVM on a Linux-based Cirrus-CI job
.cirrus_kvm_job:
extends: .base_job_template
stage: build
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
needs: []
timeout: 80m
script:
- sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
-e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g"
-e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g"
-e "s|[@]NAME@|$NAME|g"
-e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g"
-e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g"
<.gitlab-ci.d/cirrus/kvm-build.yml >.gitlab-ci.d/cirrus/$NAME.yml
- cat .gitlab-ci.d/cirrus/$NAME.yml
- cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml
variables:
QEMU_JOB_CIRRUS: 1
QEMU_JOB_OPTIONAL: 1
x86-netbsd:
extends: .cirrus_kvm_job
variables:
NAME: netbsd
CONFIGURE_ARGS: --target-list=x86_64-softmmu,ppc64-softmmu,aarch64-softmmu
TEST_TARGETS: check
x86-openbsd:
extends: .cirrus_kvm_job
variables:
NAME: openbsd
CONFIGURE_ARGS: --target-list=i386-softmmu,riscv64-softmmu,mips64-softmmu
TEST_TARGETS: check

View File

@ -0,0 +1,54 @@
Cirrus CI integration
=====================
GitLab CI shared runners only provide a docker environment running on Linux.
While it is possible to provide private runners for non-Linux platforms this
is not something most contributors/maintainers will wish to do.
To work around this limitation, we take advantage of `Cirrus CI`_'s free
offering: more specifically, we use the `cirrus-run`_ script to trigger Cirrus
CI jobs from GitLab CI jobs so that Cirrus CI job output is integrated into
the main GitLab CI pipeline dashboard.
There is, however, some one-time setup required. If you want FreeBSD and macOS
builds to happen when you push to your GitLab repository, you need to
* set up a GitHub repository for the project, eg. ``yourusername/qemu``.
This repository needs to exist for cirrus-run to work, but it doesn't need to
be kept up to date, so you can create it and then forget about it;
* enable the `Cirrus CI GitHub app`_ for your GitHub account;
* sign up for Cirrus CI. It's enough to log into the website using your GitHub
account;
* grab an API token from the `Cirrus CI settings`_ page;
* it may be necessary to push an empty ``.cirrus.yml`` file to your github fork
for Cirrus CI to properly recognize the project. You can check whether
Cirrus CI knows about your project by navigating to:
``https://cirrus-ci.com/yourusername/qemu``
* in the *CI/CD / Variables* section of the settings page for your GitLab
repository, create two new variables:
* ``CIRRUS_GITHUB_REPO``, containing the name of the GitHub repository
created earlier, eg. ``yourusername/qemu``;
* ``CIRRUS_API_TOKEN``, containing the Cirrus CI API token generated earlier.
This variable **must** be marked as *Masked*, because anyone with knowledge
of it can impersonate you as far as Cirrus CI is concerned.
Neither of these variables should be marked as *Protected*, because in
general you'll want to be able to trigger Cirrus CI builds from non-protected
branches.
Once this one-time setup is complete, you can just keep pushing to your GitLab
repository as usual and you'll automatically get the additional CI coverage.
.. _Cirrus CI GitHub app: https://github.com/marketplace/cirrus-ci
.. _Cirrus CI settings: https://cirrus-ci.com/settings/profile/
.. _Cirrus CI: https://cirrus-ci.com/
.. _cirrus-run: https://github.com/sio/cirrus-run/

View File

@ -0,0 +1,37 @@
@CIRRUS_VM_INSTANCE_TYPE@:
@CIRRUS_VM_IMAGE_SELECTOR@: @CIRRUS_VM_IMAGE_NAME@
cpu: @CIRRUS_VM_CPUS@
memory: @CIRRUS_VM_RAM@
env:
CIRRUS_CLONE_DEPTH: 1
CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@"
CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@"
CI_COMMIT_SHA: "@CI_COMMIT_SHA@"
PATH: "@PATH@"
PKG_CONFIG_PATH: "@PKG_CONFIG_PATH@"
PYTHON: "@PYTHON@"
MAKE: "@MAKE@"
CONFIGURE_ARGS: "@CONFIGURE_ARGS@"
TEST_TARGETS: "@TEST_TARGETS@"
build_task:
install_script:
- @UPDATE_COMMAND@
- @INSTALL_COMMAND@ @PKGS@
- if test -n "@PYPI_PKGS@" ; then @PIP3@ install @PYPI_PKGS@ ; fi
clone_script:
- git clone --depth 100 "$CI_REPOSITORY_URL" .
- git fetch origin "$CI_COMMIT_REF_NAME"
- git reset --hard "$CI_COMMIT_SHA"
build_script:
- mkdir build
- cd build
- ../configure --enable-werror $CONFIGURE_ARGS
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- $MAKE -j$(sysctl -n hw.ncpu)
- for TARGET in $TEST_TARGETS ;
do
$MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
done

View File

@ -0,0 +1,16 @@
# THIS FILE WAS AUTO-GENERATED
#
# $ lcitool variables freebsd-12 qemu
#
# https://gitlab.com/libvirt/libvirt-ci
CCACHE='/usr/local/bin/ccache'
CPAN_PKGS=''
CROSS_PKGS=''
MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
PYPI_PKGS=''
PYTHON='/usr/local/bin/python3'

View File

@ -0,0 +1,16 @@
# THIS FILE WAS AUTO-GENERATED
#
# $ lcitool variables freebsd-13 qemu
#
# https://gitlab.com/libvirt/libvirt-ci
CCACHE='/usr/local/bin/ccache'
CPAN_PKGS=''
CROSS_PKGS=''
MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='pkg'
PIP3='/usr/local/bin/pip-3.8'
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd'
PYPI_PKGS=''
PYTHON='/usr/local/bin/python3'

View File

@ -0,0 +1,31 @@
container:
image: fedora:35
cpu: 4
memory: 8Gb
kvm: true
env:
CIRRUS_CLONE_DEPTH: 1
CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@"
CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@"
CI_COMMIT_SHA: "@CI_COMMIT_SHA@"
@NAME@_task:
@NAME@_vm_cache:
folder: $HOME/.cache/qemu-vm
install_script:
- dnf update -y
- dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget
clone_script:
- git clone --depth 100 "$CI_REPOSITORY_URL" .
- git fetch origin "$CI_COMMIT_REF_NAME"
- git reset --hard "$CI_COMMIT_SHA"
build_script:
- if [ -f $HOME/.cache/qemu-vm/images/@NAME@.img ]; then
make vm-build-@NAME@ J=$(getconf _NPROCESSORS_ONLN)
EXTRA_CONFIGURE_OPTS="@CONFIGURE_ARGS@"
BUILD_TARGET="@TEST_TARGETS@" ;
else
make vm-build-@NAME@ J=$(getconf _NPROCESSORS_ONLN) BUILD_TARGET=help
EXTRA_CONFIGURE_OPTS="--disable-system --disable-user --disable-tools" ;
fi

View File

@ -0,0 +1,16 @@
# THIS FILE WAS AUTO-GENERATED
#
# $ lcitool variables macos-11 qemu
#
# https://gitlab.com/libvirt/libvirt-ci
CCACHE='/usr/local/bin/ccache'
CPAN_PKGS=''
CROSS_PKGS=''
MAKE='/usr/local/bin/gmake'
NINJA='/usr/local/bin/ninja'
PACKAGING_COMMAND='brew'
PIP3='/usr/local/bin/pip3'
PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja perl pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy sparse spice-protocol tesseract texinfo usbredir vde vte3 zlib zstd'
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme'
PYTHON='/usr/local/bin/python3'

View File

@ -0,0 +1,12 @@
include:
- local: '/.gitlab-ci.d/container-template.yml'
amd64-centos8-container:
extends: .container_job_template
variables:
NAME: centos8
amd64-fedora-container:
extends: .container_job_template
variables:
NAME: fedora

View File

@ -0,0 +1,173 @@
alpha-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-alpha-cross
amd64-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-amd64-cross
amd64-debian-user-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-all-test-cross
arm64-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-arm64-cross
armel-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-armel-cross
armhf-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-armhf-cross
# We never want to build hexagon in the CI system and by default we
# always want to refer to the master registry where it lives.
hexagon-cross-container:
extends: .base_job_template
image: docker:stable
stage: containers
variables:
NAME: debian-hexagon-cross
GIT_DEPTH: 1
QEMU_JOB_ONLY_FORKS: 1
services:
- docker:dind
before_script:
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest"
- docker info
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
script:
- echo "TAG:$TAG"
- echo "COMMON_TAG:$COMMON_TAG"
- docker pull $COMMON_TAG
- docker tag $COMMON_TAG $TAG
- docker push "$TAG"
after_script:
- docker logout
hppa-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-hppa-cross
m68k-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-m68k-cross
mips64-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-mips64-cross
mips64el-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-mips64el-cross
mips-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-mips-cross
mipsel-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-mipsel-cross
powerpc-test-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-powerpc-test-cross
ppc64el-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-ppc64el-cross
riscv64-debian-cross-container:
extends: .container_job_template
stage: containers
# as we are currently based on 'sid/unstable' we may break so...
allow_failure: true
variables:
NAME: debian-riscv64-cross
# we can however build TCG tests using a non-sid base
riscv64-debian-test-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-riscv64-test-cross
s390x-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-s390x-cross
sh4-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-sh4-cross
sparc64-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-sparc64-cross
tricore-debian-cross-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-tricore-cross
xtensa-debian-cross-container:
extends: .container_job_template
variables:
NAME: debian-xtensa-cross
cris-fedora-cross-container:
extends: .container_job_template
variables:
NAME: fedora-cris-cross
i386-fedora-cross-container:
extends: .container_job_template
variables:
NAME: fedora-i386-cross
win32-fedora-cross-container:
extends: .container_job_template
variables:
NAME: fedora-win32-cross
win64-fedora-cross-container:
extends: .container_job_template
variables:
NAME: fedora-win64-cross

View File

@ -0,0 +1,22 @@
.container_job_template:
extends: .base_job_template
image: docker:stable
stage: containers
services:
- docker:dind
before_script:
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/$NAME:latest"
- apk add python3
- docker info
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
script:
- echo "TAG:$TAG"
- echo "COMMON_TAG:$COMMON_TAG"
- ./tests/docker/docker.py --engine docker build
-t "qemu/$NAME" -f "tests/docker/dockerfiles/$NAME.docker"
-r $CI_REGISTRY/qemu-project/qemu
- docker tag "qemu/$NAME" "$TAG"
- docker push "$TAG"
after_script:
- docker logout

View File

@ -0,0 +1,29 @@
include:
- local: '/.gitlab-ci.d/container-core.yml'
- local: '/.gitlab-ci.d/container-cross.yml'
amd64-alpine-container:
extends: .container_job_template
variables:
NAME: alpine
amd64-debian-container:
extends: .container_job_template
stage: containers
variables:
NAME: debian-amd64
amd64-ubuntu2004-container:
extends: .container_job_template
variables:
NAME: ubuntu2004
amd64-opensuse-leap-container:
extends: .container_job_template
variables:
NAME: opensuse-leap
python-container:
extends: .container_job_template
variables:
NAME: python

View File

@ -0,0 +1,53 @@
.cross_system_build_job:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
timeout: 80m
script:
- mkdir build
- cd build
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-user --target-list-exclude="arm-softmmu cris-softmmu
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu
sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS"
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
- if grep -q "EXESUF=.exe" config-host.mak;
then make installer;
version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
mv -v qemu-setup*.exe qemu-setup-${version}.exe;
fi
# Job to cross-build specific accelerators.
#
# Set the $ACCEL variable to select the specific accelerator (default to
# KVM), and set extra options (such disabling other accelerators) via the
# $EXTRA_CONFIGURE_OPTS variable.
.cross_accel_build_job:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
timeout: 30m
script:
- mkdir build
- cd build
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-tools --enable-${ACCEL:-kvm} $EXTRA_CONFIGURE_OPTS
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
.cross_user_build_job:
extends: .base_job_template
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
script:
- mkdir build
- cd build
- PKG_CONFIG_PATH=$PKG_CONFIG_PATH
../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
--disable-system --target-list-exclude="aarch64_be-linux-user
alpha-linux-user cris-linux-user m68k-linux-user microblazeel-linux-user
nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user
xtensa-linux-user $CROSS_SKIP_TARGETS"
- make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS

View File

@ -0,0 +1,204 @@
include:
- local: '/.gitlab-ci.d/crossbuild-template.yml'
cross-armel-system:
extends: .cross_system_build_job
needs:
job: armel-debian-cross-container
variables:
IMAGE: debian-armel-cross
cross-armel-user:
extends: .cross_user_build_job
needs:
job: armel-debian-cross-container
variables:
IMAGE: debian-armel-cross
cross-armhf-system:
extends: .cross_system_build_job
needs:
job: armhf-debian-cross-container
variables:
IMAGE: debian-armhf-cross
cross-armhf-user:
extends: .cross_user_build_job
needs:
job: armhf-debian-cross-container
variables:
IMAGE: debian-armhf-cross
cross-arm64-system:
extends: .cross_system_build_job
needs:
job: arm64-debian-cross-container
variables:
IMAGE: debian-arm64-cross
cross-arm64-user:
extends: .cross_user_build_job
needs:
job: arm64-debian-cross-container
variables:
IMAGE: debian-arm64-cross
cross-i386-system:
extends: .cross_system_build_job
needs:
job: i386-fedora-cross-container
variables:
IMAGE: fedora-i386-cross
MAKE_CHECK_ARGS: check-qtest
cross-i386-user:
extends: .cross_user_build_job
needs:
job: i386-fedora-cross-container
variables:
IMAGE: fedora-i386-cross
MAKE_CHECK_ARGS: check
cross-i386-tci:
extends: .cross_accel_build_job
timeout: 60m
needs:
job: i386-fedora-cross-container
variables:
IMAGE: fedora-i386-cross
ACCEL: tcg-interpreter
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user
MAKE_CHECK_ARGS: check check-tcg
cross-mipsel-system:
extends: .cross_system_build_job
needs:
job: mipsel-debian-cross-container
variables:
IMAGE: debian-mipsel-cross
cross-mipsel-user:
extends: .cross_user_build_job
needs:
job: mipsel-debian-cross-container
variables:
IMAGE: debian-mipsel-cross
cross-mips64el-system:
extends: .cross_system_build_job
needs:
job: mips64el-debian-cross-container
variables:
IMAGE: debian-mips64el-cross
cross-mips64el-user:
extends: .cross_user_build_job
needs:
job: mips64el-debian-cross-container
variables:
IMAGE: debian-mips64el-cross
cross-ppc64el-system:
extends: .cross_system_build_job
needs:
job: ppc64el-debian-cross-container
variables:
IMAGE: debian-ppc64el-cross
cross-ppc64el-user:
extends: .cross_user_build_job
needs:
job: ppc64el-debian-cross-container
variables:
IMAGE: debian-ppc64el-cross
# The riscv64 cross-builds currently use a 'sid' container to get
# compilers and libraries. Until something more stable is found we
# allow_failure so as not to block CI.
cross-riscv64-system:
extends: .cross_system_build_job
allow_failure: true
needs:
job: riscv64-debian-cross-container
variables:
IMAGE: debian-riscv64-cross
cross-riscv64-user:
extends: .cross_user_build_job
allow_failure: true
needs:
job: riscv64-debian-cross-container
variables:
IMAGE: debian-riscv64-cross
cross-s390x-system:
extends: .cross_system_build_job
needs:
job: s390x-debian-cross-container
variables:
IMAGE: debian-s390x-cross
cross-s390x-user:
extends: .cross_user_build_job
needs:
job: s390x-debian-cross-container
variables:
IMAGE: debian-s390x-cross
cross-s390x-kvm-only:
extends: .cross_accel_build_job
needs:
job: s390x-debian-cross-container
variables:
IMAGE: debian-s390x-cross
EXTRA_CONFIGURE_OPTS: --disable-tcg
cross-mips64el-kvm-only:
extends: .cross_accel_build_job
needs:
job: mips64el-debian-cross-container
variables:
IMAGE: debian-mips64el-cross
EXTRA_CONFIGURE_OPTS: --disable-tcg --target-list=mips64el-softmmu
cross-win32-system:
extends: .cross_system_build_job
needs:
job: win32-fedora-cross-container
variables:
IMAGE: fedora-win32-cross
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
microblazeel-softmmu mips64el-softmmu nios2-softmmu
artifacts:
paths:
- build/qemu-setup*.exe
cross-win64-system:
extends: .cross_system_build_job
needs:
job: win64-fedora-cross-container
variables:
IMAGE: fedora-win64-cross
CROSS_SKIP_TARGETS: or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
tricore-softmmu xtensaeb-softmmu
artifacts:
paths:
- build/qemu-setup*.exe
cross-amd64-xen-only:
extends: .cross_accel_build_job
needs:
job: amd64-debian-cross-container
variables:
IMAGE: debian-amd64-cross
ACCEL: xen
EXTRA_CONFIGURE_OPTS: --disable-tcg --disable-kvm
cross-arm64-xen-only:
extends: .cross_accel_build_job
needs:
job: arm64-debian-cross-container
variables:
IMAGE: debian-arm64-cross
ACCEL: xen
EXTRA_CONFIGURE_OPTS: --disable-tcg --disable-kvm

View File

@ -0,0 +1,20 @@
# The CI jobs defined here require GitLab runners installed and
# registered on machines that match their operating system names,
# versions and architectures. This is in contrast to the other CI
# jobs that are intended to run on GitLab's "shared" runners.
# Different than the default approach on "shared" runners, based on
# containers, the custom runners have no such *requirement*, as those
# jobs should be capable of running on operating systems with no
# compatible container implementation, or no support from
# gitlab-runner. To avoid problems that gitlab-runner can cause while
# reusing the GIT repository, let's enable the clone strategy, which
# guarantees a fresh repository on each job run.
variables:
GIT_STRATEGY: clone
include:
- local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml'
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml'
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml'
- local: '/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml'

View File

@ -0,0 +1,30 @@
centos-stream-8-x86_64:
allow_failure: true
needs: []
stage: build
tags:
- centos_stream_8
- x86_64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE"
artifacts:
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
when: on_failure
expire_in: 7 days
paths:
- build/tests/results/latest/results.xml
- build/tests/results/latest/test-results
reports:
junit: build/tests/results/latest/results.xml
before_script:
- JOBS=$(expr $(nproc) + 1)
script:
- mkdir build
- cd build
- ../scripts/ci/org.centos/stream/8/x86_64/configure
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make -j"$JOBS"
- make NINJA=":" check
|| { cat meson-logs/testlog.txt; exit 1; } ;
- ../scripts/ci/org.centos/stream/8/x86_64/test-avocado

View File

@ -0,0 +1,131 @@
# All ubuntu-20.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 20.04/20.04"
ubuntu-20.04-s390x-all-linux-static:
needs: []
stage: build
tags:
- ubuntu_20.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$S390X_RUNNER_AVAILABLE"
script:
# --disable-libssh is needed because of https://bugs.launchpad.net/qemu/+bug/1838763
# --disable-glusterfs is needed because there's no static version of those libs in distro supplied packages
- mkdir build
- cd build
- ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
- make --output-sync -j`nproc` check-tcg V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-s390x-all:
needs: []
stage: build
tags:
- ubuntu_20.04
- s390x
timeout: 75m
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$S390X_RUNNER_AVAILABLE"
script:
- mkdir build
- cd build
- ../configure --disable-libssh
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-s390x-alldbg:
needs: []
stage: build
tags:
- ubuntu_20.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --enable-debug --disable-libssh
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make clean
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-s390x-clang:
needs: []
stage: build
tags:
- ubuntu_20.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-20.04-s390x-tci:
needs: []
stage: build
tags:
- ubuntu_20.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --disable-libssh --enable-tcg-interpreter
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc`
ubuntu-20.04-s390x-notcg:
needs: []
stage: build
tags:
- ubuntu_20.04
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --disable-libssh --disable-tcg
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc`
- make --output-sync -j`nproc` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;

View File

@ -0,0 +1,25 @@
# All ubuntu-22.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 20.04"
ubuntu-22.04-aarch32-all:
needs: []
stage: build
tags:
- ubuntu_22.04
- aarch32
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH32_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --cross-prefix=arm-linux-gnueabihf-
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;

View File

@ -0,0 +1,130 @@
# All ubuntu-20.04 jobs should run successfully in an environment
# setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 20.04"
ubuntu-22.04-aarch64-all-linux-static:
needs: []
stage: build
tags:
- ubuntu_22.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$AARCH64_RUNNER_AVAILABLE"
script:
- mkdir build
- cd build
# Disable -static-pie due to build error with system libc:
# https://bugs.launchpad.net/ubuntu/+source/glibc/+bug/1987438
- ../configure --enable-debug --static --disable-system --disable-pie
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
- make --output-sync -j`nproc --ignore=40` check-tcg V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-22.04-aarch64-all:
needs: []
stage: build
tags:
- ubuntu_22.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-22.04-aarch64-alldbg:
needs: []
stage: build
tags:
- ubuntu_22.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$AARCH64_RUNNER_AVAILABLE"
script:
- mkdir build
- cd build
- ../configure --enable-debug
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make clean
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-22.04-aarch64-clang:
needs: []
stage: build
tags:
- ubuntu_22.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --disable-libssh --cc=clang-10 --cxx=clang++-10 --enable-sanitizers
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;
ubuntu-22.04-aarch64-tci:
needs: []
stage: build
tags:
- ubuntu_22.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --enable-tcg-interpreter
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
ubuntu-22.04-aarch64-notcg:
needs: []
stage: build
tags:
- ubuntu_22.04
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
- ../configure --disable-tcg
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make --output-sync -j`nproc --ignore=40`
- make --output-sync -j`nproc --ignore=40` check V=1
|| { cat meson-logs/testlog.txt; exit 1; } ;

85
.gitlab-ci.d/edk2.yml Normal file
View File

@ -0,0 +1,85 @@
# All jobs needing docker-edk2 must use the same rules it uses.
.edk2_job_rules:
rules:
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never
# In forks, if QEMU_CI=1 is set, then create manual job
# if any of the files affecting the build are touched
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
changes:
- .gitlab-ci.d/edk2.yml
- .gitlab-ci.d/edk2/Dockerfile
- roms/edk2/*
when: manual
# In forks, if QEMU_CI=1 is set, then create manual job
# if the branch/tag starts with 'edk2'
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^edk2/'
when: manual
# In forks, if QEMU_CI=1 is set, then create manual job
# if last commit msg contains 'EDK2' (case insensitive)
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /edk2/i'
when: manual
# Run if any files affecting the build output are touched
- changes:
- .gitlab-ci.d/edk2.yml
- .gitlab-ci.d/edk2/Dockerfile
- roms/edk2/*
when: on_success
# Run if the branch/tag starts with 'edk2'
- if: '$CI_COMMIT_REF_NAME =~ /^edk2/'
when: on_success
# Run if last commit msg contains 'EDK2' (case insensitive)
- if: '$CI_COMMIT_MESSAGE =~ /edk2/i'
when: on_success
docker-edk2:
extends: .edk2_job_rules
stage: containers
image: docker:19.03.1
services:
- docker:19.03.1-dind
variables:
GIT_DEPTH: 3
IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build
# We don't use TLS
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- docker pull $IMAGE_TAG || true
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
--tag $IMAGE_TAG .gitlab-ci.d/edk2
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker push $IMAGE_TAG
build-edk2:
extends: .edk2_job_rules
stage: build
needs: ['docker-edk2']
artifacts:
paths: # 'artifacts.zip' will contains the following files:
- pc-bios/edk2*bz2
- pc-bios/edk2-licenses.txt
- edk2-stdout.log
- edk2-stderr.log
image: $CI_REGISTRY_IMAGE:edk2-cross-build
variables:
GIT_DEPTH: 3
script: # Clone the required submodules and build EDK2
- git submodule update --init roms/edk2
- git -C roms/edk2 submodule update --init --
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
BaseTools/Source/C/BrotliCompress/brotli
CryptoPkg/Library/OpensslLib/openssl
MdeModulePkg/Library/BrotliCustomDecompressLib/brotli
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
- echo "=== Using ${JOBS} simultaneous jobs ==="
- make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2

View File

@ -0,0 +1,27 @@
#
# Docker image to cross-compile EDK2 firmware binaries
#
FROM ubuntu:18.04
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
# Install packages required to build EDK2
RUN apt update \
&& \
\
DEBIAN_FRONTEND=noninteractive \
apt install --assume-yes --no-install-recommends \
build-essential \
ca-certificates \
dos2unix \
gcc-aarch64-linux-gnu \
gcc-arm-linux-gnueabi \
git \
iasl \
make \
nasm \
python3 \
uuid-dev \
&& \
\
rm -rf /var/lib/apt/lists/*

85
.gitlab-ci.d/opensbi.yml Normal file
View File

@ -0,0 +1,85 @@
# All jobs needing docker-opensbi must use the same rules it uses.
.opensbi_job_rules:
rules:
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
when: never
# In forks, if QEMU_CI=1 is set, then create manual job
# if any files affecting the build output are touched
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
changes:
- .gitlab-ci.d/opensbi.yml
- .gitlab-ci.d/opensbi/Dockerfile
- roms/opensbi/*
when: manual
# In forks, if QEMU_CI=1 is set, then create manual job
# if the branch/tag starts with 'opensbi'
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^opensbi/'
when: manual
# In forks, if QEMU_CI=1 is set, then create manual job
# if the last commit msg contains 'OpenSBI' (case insensitive)
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /opensbi/i'
when: manual
# Run if any files affecting the build output are touched
- changes:
- .gitlab-ci.d/opensbi.yml
- .gitlab-ci.d/opensbi/Dockerfile
- roms/opensbi/*
when: on_success
# Run if the branch/tag starts with 'opensbi'
- if: '$CI_COMMIT_REF_NAME =~ /^opensbi/'
when: on_success
# Run if the last commit msg contains 'OpenSBI' (case insensitive)
- if: '$CI_COMMIT_MESSAGE =~ /opensbi/i'
when: on_success
docker-opensbi:
extends: .opensbi_job_rules
stage: containers
image: docker:19.03.1
services:
- docker:19.03.1-dind
variables:
GIT_DEPTH: 3
IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build
# We don't use TLS
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- docker pull $IMAGE_TAG || true
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
--tag $IMAGE_TAG .gitlab-ci.d/opensbi
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker push $IMAGE_TAG
build-opensbi:
extends: .opensbi_job_rules
stage: build
needs: ['docker-opensbi']
artifacts:
paths: # 'artifacts.zip' will contains the following files:
- pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
- pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
- opensbi32-generic-stdout.log
- opensbi32-generic-stderr.log
- opensbi64-generic-stdout.log
- opensbi64-generic-stderr.log
image: $CI_REGISTRY_IMAGE:opensbi-cross-build
variables:
GIT_DEPTH: 3
script: # Clone the required submodules and build OpenSBI
- git submodule update --init roms/opensbi
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
- echo "=== Using ${JOBS} simultaneous jobs ==="
- make -j${JOBS} -C roms/opensbi clean
- make -j${JOBS} -C roms opensbi32-generic 2>&1 1>opensbi32-generic-stdout.log | tee -a opensbi32-generic-stderr.log >&2
- make -j${JOBS} -C roms/opensbi clean
- make -j${JOBS} -C roms opensbi64-generic 2>&1 1>opensbi64-generic-stdout.log | tee -a opensbi64-generic-stderr.log >&2

View File

@ -0,0 +1,33 @@
#
# Docker image to cross-compile OpenSBI firmware binaries
#
FROM ubuntu:18.04
MAINTAINER Bin Meng <bmeng.cn@gmail.com>
# Install packages required to build OpenSBI
RUN apt update \
&& \
\
DEBIAN_FRONTEND=noninteractive \
apt install --assume-yes --no-install-recommends \
build-essential \
ca-certificates \
git \
make \
wget \
&& \
\
rm -rf /var/lib/apt/lists/*
# Manually install the kernel.org "Crosstool" based toolchains for gcc-8.3
RUN wget -O - \
https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/8.3.0/x86_64-gcc-8.3.0-nolibc-riscv32-linux.tar.xz \
| tar -C /opt -xJ
RUN wget -O - \
https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/8.3.0/x86_64-gcc-8.3.0-nolibc-riscv64-linux.tar.xz \
| tar -C /opt -xJ
# Export the toolchains to the system path
ENV PATH="/opt/gcc-8.3.0-nolibc/riscv32-linux/bin:${PATH}"
ENV PATH="/opt/gcc-8.3.0-nolibc/riscv64-linux/bin:${PATH}"

View File

@ -0,0 +1,15 @@
# This file contains the set of jobs run by the QEMU project:
# https://gitlab.com/qemu-project/qemu/-/pipelines
include:
- local: '/.gitlab-ci.d/base.yml'
- local: '/.gitlab-ci.d/stages.yml'
- local: '/.gitlab-ci.d/edk2.yml'
- local: '/.gitlab-ci.d/opensbi.yml'
- local: '/.gitlab-ci.d/containers.yml'
- local: '/.gitlab-ci.d/crossbuilds.yml'
- local: '/.gitlab-ci.d/buildtest.yml'
- local: '/.gitlab-ci.d/static_checks.yml'
- local: '/.gitlab-ci.d/custom-runners.yml'
- local: '/.gitlab-ci.d/cirrus.yml'
- local: '/.gitlab-ci.d/windows.yml'

7
.gitlab-ci.d/stages.yml Normal file
View File

@ -0,0 +1,7 @@
# Currently we have two build stages after our containers are built:
# - build (for traditional build and test or first stage build)
# - test (for test stages, using build artefacts from a build stage)
stages:
- containers
- build
- test

View File

@ -0,0 +1,48 @@
check-patch:
extends: .base_job_template
stage: build
image: python:3.10-alpine
needs: []
script:
- .gitlab-ci.d/check-patch.py
variables:
GIT_DEPTH: 1000
QEMU_JOB_ONLY_FORKS: 1
before_script:
- apk -U add git perl
allow_failure: true
check-dco:
extends: .base_job_template
stage: build
image: python:3.10-alpine
needs: []
script: .gitlab-ci.d/check-dco.py
variables:
GIT_DEPTH: 1000
before_script:
- apk -U add git
check-python-pipenv:
extends: .base_job_template
stage: test
image: $CI_REGISTRY_IMAGE/qemu/python:latest
script:
- make -C python check-pipenv
variables:
GIT_DEPTH: 1
needs:
job: python-container
check-python-tox:
extends: .base_job_template
stage: test
image: $CI_REGISTRY_IMAGE/qemu/python:latest
script:
- make -C python check-tox
variables:
GIT_DEPTH: 1
QEMU_TOX_EXTRA_ARGS: --skip-missing-interpreters=false
QEMU_JOB_OPTIONAL: 1
needs:
job: python-container

99
.gitlab-ci.d/windows.yml Normal file
View File

@ -0,0 +1,99 @@
.shared_msys2_builder:
extends: .base_job_template
tags:
- shared-windows
- windows
- windows-1809
cache:
key: "${CI_JOB_NAME}-cache"
paths:
- ${CI_PROJECT_DIR}/msys64/var/cache
needs: []
stage: build
timeout: 70m
before_script:
- If ( !(Test-Path -Path msys64\var\cache ) ) {
mkdir msys64\var\cache
}
- If ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
Invoke-WebRequest
"https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe"
-outfile "msys64\var\cache\msys2.exe"
}
- msys64\var\cache\msys2.exe -y
- ((Get-Content -path .\msys64\etc\\post-install\\07-pacman-key.post -Raw)
-replace '--refresh-keys', '--version') |
Set-Content -Path ${CI_PROJECT_DIR}\msys64\etc\\post-install\\07-pacman-key.post
- .\msys64\usr\bin\bash -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf"
- .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Core update
- .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Normal update
- taskkill /F /FI "MODULES eq msys-2.0.dll"
msys2-64bit:
extends: .shared_msys2_builder
script:
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
bison diffutils flex
git grep make sed
mingw-w64-x86_64-capstone
mingw-w64-x86_64-curl
mingw-w64-x86_64-cyrus-sasl
mingw-w64-x86_64-gcc
mingw-w64-x86_64-glib2
mingw-w64-x86_64-gnutls
mingw-w64-x86_64-libnfs
mingw-w64-x86_64-libpng
mingw-w64-x86_64-libssh
mingw-w64-x86_64-libtasn1
mingw-w64-x86_64-libusb
mingw-w64-x86_64-nettle
mingw-w64-x86_64-ninja
mingw-w64-x86_64-pixman
mingw-w64-x86_64-pkgconf
mingw-w64-x86_64-python
mingw-w64-x86_64-SDL2
mingw-w64-x86_64-SDL2_image
mingw-w64-x86_64-snappy
mingw-w64-x86_64-usbredir
mingw-w64-x86_64-zstd "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYSTEM = 'MINGW64' # Start a 64 bit Mingw environment
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- .\msys64\usr\bin\bash -lc './configure --target-list=x86_64-softmmu
--enable-capstone --without-default-devices'
- .\msys64\usr\bin\bash -lc 'make'
- .\msys64\usr\bin\bash -lc 'make check || { cat build/meson-logs/testlog.txt; exit 1; } ;'
msys2-32bit:
extends: .shared_msys2_builder
script:
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
bison diffutils flex
git grep make sed
mingw-w64-i686-capstone
mingw-w64-i686-curl
mingw-w64-i686-cyrus-sasl
mingw-w64-i686-gcc
mingw-w64-i686-glib2
mingw-w64-i686-gnutls
mingw-w64-i686-gtk3
mingw-w64-i686-libgcrypt
mingw-w64-i686-libjpeg-turbo
mingw-w64-i686-libssh
mingw-w64-i686-libtasn1
mingw-w64-i686-libusb
mingw-w64-i686-lzo2
mingw-w64-i686-ninja
mingw-w64-i686-pixman
mingw-w64-i686-pkgconf
mingw-w64-i686-python
mingw-w64-i686-snappy
mingw-w64-i686-usbredir "
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
- $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinG environment
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- mkdir output
- cd output
- ..\msys64\usr\bin\bash -lc "../configure --target-list=ppc64-softmmu"
- ..\msys64\usr\bin\bash -lc 'make'
- ..\msys64\usr\bin\bash -lc 'make check || { cat meson-logs/testlog.txt; exit 1; } ;'

24
.gitlab-ci.yml Normal file
View File

@ -0,0 +1,24 @@
#
# This is the GitLab CI configuration file for the mainstream QEMU
# project: https://gitlab.com/qemu-project/qemu/-/pipelines
#
# !!! DO NOT ADD ANY NEW CONFIGURATION TO THIS FILE !!!
#
# Only documentation or comments is accepted.
#
# To use a different set of jobs than the mainstream QEMU project,
# you need to set the location of your custom yml file at "custom CI/CD
# configuration path", on your GitLab CI namespace:
# https://docs.gitlab.com/ee/ci/pipelines/settings.html#custom-cicd-configuration-path
#
# ----------------------------------------------------------------------
#
# QEMU CI jobs are based on templates. Some templates provide
# user-configurable options, modifiable via configuration variables.
#
# See https://qemu-project.gitlab.io/qemu/devel/ci.html#custom-ci-cd-variables
# for more information.
#
include:
- local: '/.gitlab-ci.d/qemu-project.yml'

View File

@ -0,0 +1,64 @@
<!--
This is the upstream QEMU issue tracker.
If you are able to, it will greatly facilitate bug triage if you attempt
to reproduce the problem with the latest qemu.git master built from
source. See https://www.qemu.org/download/#source for instructions on
how to do this.
QEMU generally supports the last two releases advertised on
https://www.qemu.org/. Problems with distro-packaged versions of QEMU
older than this should be reported to the distribution instead.
See https://www.qemu.org/contribute/report-a-bug/ for additional
guidance.
If this is a security issue, please consult
https://www.qemu.org/contribute/security-process/
-->
## Host environment
- Operating system: (Windows 10 21H1, Fedora 34, etc.)
- OS/kernel version: (For POSIX hosts, use `uname -a`)
- Architecture: (x86, ARM, s390x, etc.)
- QEMU flavor: (qemu-system-x86_64, qemu-aarch64, qemu-img, etc.)
- QEMU version: (e.g. `qemu-system-x86_64 --version`)
- QEMU command line:
<!--
Give the smallest, complete command line that exhibits the problem.
If you are using libvirt, virsh, or vmm, you can likely find the QEMU
command line arguments in /var/log/libvirt/qemu/$GUEST.log.
-->
```
./qemu-system-x86_64 -M q35 -m 4096 -enable-kvm -hda fedora32.qcow2
```
## Emulated/Virtualized environment
- Operating system: (Windows 10 21H1, Fedora 34, etc.)
- OS/kernel version: (For POSIX guests, use `uname -a`.)
- Architecture: (x86, ARM, s390x, etc.)
## Description of problem
<!-- Describe the problem, including any error/crash messages seen. -->
## Steps to reproduce
1.
2.
3.
## Additional information
<!--
Attach logs, stack traces, screenshots, etc. Compress the files if necessary.
If using libvirt, libvirt logs and XML domain information may be relevant.
-->
<!--
The line below ensures that proper tags are added to the issue.
Please do not remove it.
-->
/label ~"kind::Bug"

View File

@ -0,0 +1,32 @@
<!--
This is the upstream QEMU issue tracker.
Please note that QEMU, like most open source projects, relies on
contributors who have motivation, skills and available time to work on
implementing particular features.
Feature requests can be helpful for determining demand and interest, but
they are not a guarantee that a contributor will volunteer to implement
it. We welcome and encourage even draft patches to implement a feature
be sent to the mailing list where it can be discussed and developed
further by the community.
Thank you for your interest in helping us to make QEMU better!
-->
## Goal
<!-- Describe the final result you want to achieve. Avoid design specifics. -->
## Technical details
<!-- Describe technical details, design specifics, suggestions, versions, etc. -->
## Additional information
<!-- Patch or branch references, any other useful information -->
<!--
The line below ensures that proper tags are added to the issue.
Please do not remove it.
-->
/label ~"kind::Feature Request"

69
.gitmodules vendored
View File

@ -1,3 +1,66 @@
[submodule "libafl_concolic/symcc_runtime/symcc"]
path = libafl_concolic/symcc_runtime/symcc
url = https://github.com/AFLplusplus/symcc.git
[submodule "roms/seabios"]
path = roms/seabios
url = https://gitlab.com/qemu-project/seabios.git/
[submodule "roms/SLOF"]
path = roms/SLOF
url = https://gitlab.com/qemu-project/SLOF.git
[submodule "roms/ipxe"]
path = roms/ipxe
url = https://gitlab.com/qemu-project/ipxe.git
[submodule "roms/openbios"]
path = roms/openbios
url = https://gitlab.com/qemu-project/openbios.git
[submodule "roms/qemu-palcode"]
path = roms/qemu-palcode
url = https://gitlab.com/qemu-project/qemu-palcode.git
[submodule "roms/sgabios"]
path = roms/sgabios
url = https://gitlab.com/qemu-project/sgabios.git
[submodule "dtc"]
path = dtc
url = https://gitlab.com/qemu-project/dtc.git
[submodule "roms/u-boot"]
path = roms/u-boot
url = https://gitlab.com/qemu-project/u-boot.git
[submodule "roms/skiboot"]
path = roms/skiboot
url = https://gitlab.com/qemu-project/skiboot.git
[submodule "roms/QemuMacDrivers"]
path = roms/QemuMacDrivers
url = https://gitlab.com/qemu-project/QemuMacDrivers.git
[submodule "ui/keycodemapdb"]
path = ui/keycodemapdb
url = https://gitlab.com/qemu-project/keycodemapdb.git
[submodule "roms/seabios-hppa"]
path = roms/seabios-hppa
url = https://gitlab.com/qemu-project/seabios-hppa.git
[submodule "roms/u-boot-sam460ex"]
path = roms/u-boot-sam460ex
url = https://gitlab.com/qemu-project/u-boot-sam460ex.git
[submodule "tests/fp/berkeley-testfloat-3"]
path = tests/fp/berkeley-testfloat-3
url = https://gitlab.com/qemu-project/berkeley-testfloat-3.git
[submodule "tests/fp/berkeley-softfloat-3"]
path = tests/fp/berkeley-softfloat-3
url = https://gitlab.com/qemu-project/berkeley-softfloat-3.git
[submodule "roms/edk2"]
path = roms/edk2
url = https://gitlab.com/qemu-project/edk2.git
[submodule "roms/opensbi"]
path = roms/opensbi
url = https://gitlab.com/qemu-project/opensbi.git
[submodule "roms/qboot"]
path = roms/qboot
url = https://gitlab.com/qemu-project/qboot.git
[submodule "meson"]
path = meson
url = https://gitlab.com/qemu-project/meson.git
[submodule "roms/vbootrom"]
path = roms/vbootrom
url = https://gitlab.com/qemu-project/vbootrom.git
[submodule "tests/lcitool/libvirt-ci"]
path = tests/lcitool/libvirt-ci
url = https://gitlab.com/libvirt/libvirt-ci.git
[submodule "subprojects/libvfio-user"]
path = subprojects/libvfio-user
url = https://gitlab.com/qemu-project/libvfio-user.git

51
.gitpublish Normal file
View File

@ -0,0 +1,51 @@
#
# Common git-publish profiles that can be used to send patches to QEMU upstream.
#
# See https://github.com/stefanha/git-publish for more information
#
[gitpublishprofile "default"]
base = master
to = qemu-devel@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "rfc"]
base = master
prefix = RFC PATCH
to = qemu-devel@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "stable"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-stable@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "trivial"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-trivial@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "block"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-block@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "arm"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-arm@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "s390"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-s390@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
[gitpublishprofile "ppc"]
base = master
to = qemu-devel@nongnu.org
cc = qemu-ppc@nongnu.org
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null

197
.mailmap Normal file
View File

@ -0,0 +1,197 @@
# This mailmap fixes up author names/addresses.
#
# If you are adding to this file consider if a similar change needs to
# be made to contrib/gitdm/aliases. They are not however completely
# analogous. .mailmap is concerned with fixing up damaged author
# fields where as the gitdm equivalent is more concerned with making
# sure multiple email addresses get mapped onto the same author.
#
# From man git-shortlog the forms are:
#
# Proper Name <commit@email.xx>
# <proper@email.xx> <commit@email.xx>
# Proper Name <proper@email.xx> <commit@email.xx>
# Proper Name <proper@email.xx> Commit Name <commit@email.xx>
#
# The first section translates weird addresses from the original git import
# into proper addresses so that they are counted properly by git shortlog.
Andrzej Zaborowski <balrogg@gmail.com> balrog <balrog@c046a42c-6fe2-441c-8c8c-71466251a162>
Anthony Liguori <anthony@codemonkey.ws> aliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>
Aurelien Jarno <aurelien@aurel32.net> aurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162>
Blue Swirl <blauwirbel@gmail.com> blueswir1 <blueswir1@c046a42c-6fe2-441c-8c8c-71466251a162>
Edgar E. Iglesias <edgar.iglesias@gmail.com> edgar_igl <edgar_igl@c046a42c-6fe2-441c-8c8c-71466251a162>
Fabrice Bellard <fabrice@bellard.org> bellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>
Jocelyn Mayer <l_indien@magic.fr> j_mayer <j_mayer@c046a42c-6fe2-441c-8c8c-71466251a162>
Paul Brook <paul@codesourcery.com> pbrook <pbrook@c046a42c-6fe2-441c-8c8c-71466251a162>
Thiemo Seufer <ths@networkno.de> ths <ths@c046a42c-6fe2-441c-8c8c-71466251a162>
malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
# Corrupted Author fields
Aaron Larson <alarson@ddci.com> alarson@ddci.com
Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber>
Jason Wang <jasowang@redhat.com> Jason Wang <jasowang>
Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org>
Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
# There is also a:
# (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
# for the cvs2svn initialization commit e63c3dc74bf.
# Next, translate a few commits where mailman rewrote the From: line due
# to strict SPF, although we prefer to avoid adding more entries like that.
Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org>
Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org>
Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
# Next, replace old addresses by a more recent one.
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com>
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@imgtec.com>
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <amarkovic@wavecomp.com>
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <arikalo@wavecomp.com>
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com>
Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
Frederic Konrad <konrad.frederic@yahoo.fr> <fred.konrad@greensocs.com>
Frederic Konrad <konrad.frederic@yahoo.fr> <konrad@adacore.com>
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
Paul Brook <paul@nowt.org> <paul@codesourcery.com>
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
Paul Burton <paulburton@kernel.org> <paul@archlinuxmips.org>
Paul Burton <paulburton@kernel.org> <pburton@wavecomp.com>
Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org>
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
# Also list preferred name forms where people have changed their
# git author config, or had utf8/latin1 encoding issues.
Aaron Lindsay <aaron@os.amperecomputing.com>
Aaron Larson <alarson@ddci.com>
Alexey Gerasimenko <x1917x@gmail.com>
Alex Chen <alex.chen@huawei.com>
Alex Ivanov <void@aleksoft.net>
Andreas Färber <afaerber@suse.de>
Bandan Das <bsd@redhat.com>
Benjamin MARSILI <mlspirat42@gmail.com>
Benoît Canet <benoit.canet@gmail.com>
Benoît Canet <benoit.canet@irqsave.net>
Benoît Canet <benoit.canet@nodalink.com>
Boqun Feng <boqun.feng@gmail.com>
Boqun Feng <boqun.feng@intel.com>
Brad Smith <brad@comstyle.com>
Brijesh Singh <brijesh.singh@amd.com>
Brilly Wu <brillywu@viatech.com.cn>
Cédric Vincent <cedric.vincent@st.com>
CheneyLin <linzc@zju.edu.cn>
Chen Gang <chengang@emindsoft.com.cn>
Chen Gang <gang.chen.5i5j@gmail.com>
Chen Gang <gang.chen@sunrus.com.cn>
Chen Wei-Ren <chenwj@iis.sinica.edu.tw>
Christophe Lyon <christophe.lyon@st.com>
Collin L. Walling <walling@linux.ibm.com>
Daniel P. Berrangé <berrange@redhat.com>
Eduardo Otubo <otubo@redhat.com>
Erik Smit <erik.lucas.smit@gmail.com>
Fabrice Desclaux <fabrice.desclaux@cea.fr>
Fernando Luis Vázquez Cao <fernando_b1@lab.ntt.co.jp>
Fernando Luis Vázquez Cao <fernando@oss.ntt.co.jp>
Gautham R. Shenoy <ego@in.ibm.com>
Gautham R. Shenoy <ego@linux.vnet.ibm.com>
Gonglei (Arei) <arei.gonglei@huawei.com>
Guang Wang <wang.guang55@zte.com.cn>
Haibin Zhang <haibinzhang@tencent.com>
Hailiang Zhang <zhang.zhanghailiang@huawei.com>
Hanna Reitz <hreitz@redhat.com> <mreitz@redhat.com>
Hervé Poussineau <hpoussin@reactos.org>
Hyman Huang <huangy81@chinatelecom.cn>
Jakub Jermář <jakub@jermar.eu>
Jakub Jermář <jakub.jermar@kernkonzept.com>
Jean-Christophe Dubois <jcd@tribudubois.net>
Jindřich Makovička <makovick@gmail.com>
John Arbuckle <programmingkidx@gmail.com>
Juha Riihimäki <juha.riihimaki@nokia.com>
Juha Riihimäki <Juha.Riihimaki@nokia.com>
Jun Li <junmuzi@gmail.com>
Laurent Vivier <Laurent@lvivier.info>
Leandro Lupori <leandro.lupori@gmail.com>
Li Guang <lig.fnst@cn.fujitsu.com>
Liming Wang <walimisdev@gmail.com>
linzhecheng <linzc@zju.edu.cn>
Liran Schour <lirans@il.ibm.com>
Liu Yu <yu.liu@freescale.com>
Liu Yu <Yu.Liu@freescale.com>
Li Zhang <zhlcindy@gmail.com>
Li Zhang <zhlcindy@linux.vnet.ibm.com>
Lluís Vilanova <vilanova@ac.upc.edu>
Lluís Vilanova <xscript@gmx.net>
Longpeng (Mike) <longpeng2@huawei.com>
Luc Michel <luc.michel@git.antfield.fr>
Luc Michel <luc.michel@greensocs.com>
Marc Marí <marc.mari.barcelo@gmail.com>
Marc Marí <markmb@redhat.com>
Michael Avdienko <whitearchey@gmail.com>
Michael S. Tsirkin <mst@redhat.com>
Munkyu Im <munkyu.im@samsung.com>
Nicholas Bellinger <nab@linux-iscsi.org>
Nicholas Thomas <nick@bytemark.co.uk>
Nikunj A Dadhania <nikunj@linux.vnet.ibm.com>
Orit Wasserman <owasserm@redhat.com>
Paolo Bonzini <pbonzini@redhat.com>
Pan Nengyuan <pannengyuan@huawei.com>
Pavel Dovgaluk <dovgaluk@ispras.ru>
Pavel Dovgaluk <pavel.dovgaluk@gmail.com>
Pavel Dovgaluk <Pavel.Dovgaluk@ispras.ru>
Peter Chubb <peter.chubb@nicta.com.au>
Peter Crosthwaite <crosthwaite.peter@gmail.com>
Peter Crosthwaite <peter.crosthwaite@petalogix.com>
Peter Crosthwaite <peter.crosthwaite@xilinx.com>
Prasad J Pandit <pjp@fedoraproject.org>
Prasad J Pandit <ppandit@redhat.com>
Qiao Nuohan <qiaonuohan@cn.fujitsu.com>
Reimar Döffinger <Reimar.Doeffinger@gmx.de>
Remy Noel <remy.noel@blade-group.com>
Roger Pau Monné <roger.pau@citrix.com>
Shin'ichiro Kawasaki <kawasaki@juno.dti.ne.jp>
Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Sochin Jiang <sochin.jiang@huawei.com>
Stefan Berger <stefanb@linux.vnet.ibm.com> <stefanb@linux.ibm.com>
Takashi Yoshii <takasi-y@ops.dti.ne.jp>
Thomas Huth <thuth@redhat.com>
Thomas Knych <thomaswk@google.com>
Timothy Baldwin <T.E.Baldwin99@members.leeds.ac.uk>
Tony Nguyen <tony.nguyen@bt.com>
Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
Vibi Sreenivasan <vibi_sreenivasan@cms.com>
Vijaya Kumar K <vijayak@cavium.com>
Vijaya Kumar K <Vijaya.Kumar@cavium.com>
Vijay Kumar <vijaykumar@bravegnu.org>
Vijay Kumar <vijaykumar@zilogic.com>
Wang Guang <wang.guang55@zte.com.cn>
Wenchao Xia <xiawenc@linux.vnet.ibm.com>
Wenshuang Ma <kevinnma@tencent.com>
Xiaoqiang Zhao <zxq_yx_007@163.com>
Xinhua Cao <caoxinhua@huawei.com>
Xiong Zhang <xiong.y.zhang@intel.com>
Yin Yin <yin.yin@cs2c.com.cn>
Yu-Chen Lin <npes87184@gmail.com>
Yu-Chen Lin <npes87184@gmail.com> <yuchenlin@synology.com>
YunQiang Su <syq@debian.org>
YunQiang Su <ysu@wavecomp.com>
Yuri Pudgorodskiy <yur@virtuozzo.com>
Zhengui Li <lizhengui@huawei.com>
Zhenwei Pi <pizhenwei@bytedance.com>
Zhenwei Pi <zhenwei.pi@youruncloud.com>
Zhuang Yanying <ann.zhuangyanying@huawei.com>

299
.patchew.yml Normal file
View File

@ -0,0 +1,299 @@
---
# Note: this file is still unused. It serves as a documentation for the
# Patchew configuration in case patchew.org disappears or has to be
# reinstalled.
#
# Patchew configuration is available to project administrators at
# https://patchew.org/api/v1/projects/1/config/ and can be configured
# to YAML using the following Python script:
#
# import json
# import sys
# import ruamel.yaml
#
# json_str = sys.stdin.read()
# yaml = ruamel.yaml.YAML()
# yaml.explicit_start = True
# data = json.loads(json_str, object_pairs_hook=ruamel.yaml.comments.CommentedMap)
# ruamel.yaml.scalarstring.walk_tree(data)
# yaml.dump(data, sys.stdout)
email:
notifications:
timeouts:
event: TestingReport
enabled: true
to_user: false
reply_subject: true
set_reply_to: true
in_reply_to: true
reply_to_all: false
subject_template: none
to: fam@euphon.net
cc: ''
body_template: |
{% if not is_timeout %} {{ cancel }} {% endif %}
Test '{{ test }}' timeout, log:
{{ log }}
ENOSPC:
event: TestingReport
enabled: true
to_user: false
reply_subject: false
set_reply_to: false
in_reply_to: true
reply_to_all: false
subject_template: Out of space error
to: fam@euphon.net
cc: ''
body_template: |
{% if passed %}
{{ cancel }}
{% endif %}
{% if 'No space left on device' in log %}
Tester {{ tester }} out of space when running {{ test }}
{{ log }}
{% else %}
{{ cancel }}
{% endif %}
FailureShort:
event: TestingReport
enabled: true
to_user: false
reply_subject: true
set_reply_to: true
in_reply_to: true
reply_to_all: true
subject_template: Testing failed
to: ''
cc: ''
body_template: |
{% if passed or not obj.message_id or is_timeout %}
{{ cancel }}
{% endif %}
{% if 'No space left on device' in log %}
{{ cancel }}
{% endif %}
Patchew URL: https://patchew.org/QEMU/{{ obj.message_id }}/
{% ansi2text log as logtext %}
{% if test == "checkpatch" %}
Hi,
This series seems to have some coding style problems. See output below for
more information:
{{ logtext }}
{% elif test == "docker-mingw@fedora" or test == "docker-quick@centos8" or test == "asan" %}
Hi,
This series failed the {{ test }} build test. Please find the testing commands and
their output below. If you have Docker installed, you can probably reproduce it
locally.
{% lines_between logtext start="^=== TEST SCRIPT BEGIN ===$" stop="^=== TEST SCRIPT END ===$" %}
{% lines_between logtext start="^=== OUTPUT BEGIN ===$" stop="=== OUTPUT END ===$" as output %}
{% grep_C output regex="\b(FAIL|XPASS|ERROR|WARN|error:|warning:)" n=3 %}
{% elif test == "s390x" or test == "FreeBSD" or test == "ppcle" or test == "ppcbe" %}
Hi,
This series failed build test on {{test}} host. Please find the details below.
{% lines_between logtext start="^=== TEST SCRIPT BEGIN ===$" stop="^=== TEST SCRIPT END ===$" %}
{% lines_between logtext start="^=== OUTPUT BEGIN ===$" stop="=== OUTPUT END ===$" as output %}
{% grep_C output regex="\b(FAIL|XPASS|ERROR|WARN|error:|warning:)" n=3 %}
{% else %}
{{ cancel }}
{% endif %}
The full log is available at
{{ log_url }}.
---
Email generated automatically by Patchew [https://patchew.org/].
Please send your feedback to patchew-devel@redhat.com
testing:
tests:
asan:
enabled: true
requirements: docker
timeout: 3600
script: |
#!/bin/bash
time make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu J=14 NETWORK=1
docker-quick@centos8:
enabled: false
requirements: docker,x86_64
timeout: 3600
script: |
#!/bin/bash
time make docker-test-quick@centos8 SHOW_ENV=1 J=14 NETWORK=1
checkpatch:
enabled: true
requirements: ''
timeout: 600
script: |
#!/bin/bash
git rev-parse base > /dev/null || exit 0
./scripts/checkpatch.pl --mailback base..
docker-mingw@fedora:
enabled: true
requirements: docker,x86_64
timeout: 3600
script: |
#! /bin/bash
test "$(uname -m)" = "x86_64"
ppcle:
enabled: false
requirements: ppcle
timeout: 3600
script: |
#!/bin/bash
# Testing script will be invoked under the git checkout with
# HEAD pointing to a commit that has the patches applied on top of "base"
# branch
set -e
CC=$HOME/bin/cc
INSTALL=$PWD/install
BUILD=$PWD/build
mkdir -p $BUILD $INSTALL
SRC=$PWD
cd $BUILD
$SRC/configure --cc=$CC --prefix=$INSTALL
make -j4
# XXX: we need reliable clean up
# make check -j4 V=1
make install
echo
echo "=== ENV ==="
env
echo
echo "=== PACKAGES ==="
rpm -qa
ppcbe:
enabled: false
requirements: ppcbe
timeout: 3600
script: |
#!/bin/bash
# Testing script will be invoked under the git checkout with
# HEAD pointing to a commit that has the patches applied on top of "base"
# branch
set -e
CC=$HOME/bin/cc
INSTALL=$PWD/install
BUILD=$PWD/build
mkdir -p $BUILD $INSTALL
SRC=$PWD
cd $BUILD
$SRC/configure --cc=$CC --prefix=$INSTALL
make -j4
# XXX: we need reliable clean up
# make check -j4 V=1
make install
echo
echo "=== ENV ==="
env
echo
echo "=== PACKAGES ==="
rpm -qa
FreeBSD:
enabled: true
requirements: qemu-x86,x86_64,git
timeout: 3600
script: |
#!/bin/bash
# Testing script will be invoked under the git checkout with
# HEAD pointing to a commit that has the patches applied on top of "base"
# branch
if qemu-system-x86_64 --help >/dev/null 2>&1; then
QEMU=qemu-system-x86_64
elif /usr/libexec/qemu-kvm --help >/dev/null 2>&1; then
QEMU=/usr/libexec/qemu-kvm
else
exit 1
fi
make vm-build-freebsd J=21 QEMU=$QEMU
exit 0
docker-clang@ubuntu:
enabled: true
requirements: docker,x86_64
timeout: 3600
script: |
#!/bin/bash
time make docker-test-clang@ubuntu SHOW_ENV=1 J=14 NETWORK=1
s390x:
enabled: true
requirements: s390x
timeout: 3600
script: |
#!/bin/bash
# Testing script will be invoked under the git checkout with
# HEAD pointing to a commit that has the patches applied on top of "base"
# branch
set -e
CC=$HOME/bin/cc
INSTALL=$PWD/install
BUILD=$PWD/build
mkdir -p $BUILD $INSTALL
SRC=$PWD
cd $BUILD
$SRC/configure --cc=$CC --prefix=$INSTALL
make -j4
# XXX: we need reliable clean up
# make check -j4 V=1
make install
echo
echo "=== ENV ==="
env
echo
echo "=== PACKAGES ==="
rpm -qa
requirements:
x86_64:
script: |
#! /bin/bash
test "$(uname -m)" = "x86_64"
qemu-x86:
script: |
#!/bin/bash
if qemu-system-x86_64 --help >/dev/null 2>&1; then
:
elif /usr/libexec/qemu-kvm --help >/dev/null 2>&1; then
:
else
exit 1
fi
ppcle:
script: |
#!/bin/bash
test "$(uname -m)" = "ppc64le"
ppcbe:
script: |
#!/bin/bash
test "$(uname -m)" = "ppc64"
git:
script: |
#! /bin/bash
git config user.name > /dev/null 2>&1
docker:
script: |
#!/bin/bash
docker ps || sudo -n docker ps
s390x:
script: |
#!/bin/bash
test "$(uname -m)" = "s390x"
git:
push_to: git@github.com:patchew-project/qemu
public_repo: https://github.com/patchew-project/qemu
url_template: https://github.com/patchew-project/qemu/tree/%t

20
.readthedocs.yml Normal file
View File

@ -0,0 +1,20 @@
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: docs/conf.py
# We want all the document formats
formats: all
# For consistency, we require that QEMU's Sphinx extensions
# run with at least the same minimum version of Python that
# we require for other Python in our codebase (our conf.py
# enforces this, and some code needs it.)
python:
version: 3.6

305
.travis.yml Normal file
View File

@ -0,0 +1,305 @@
os: linux
dist: focal
language: c
compiler:
- gcc
cache:
# There is one cache per branch and compiler version.
# characteristics of each job are used to identify the cache:
# - OS name (currently only linux)
# - OS distribution (for Linux, bionic or focal)
# - Names and values of visible environment variables set in .travis.yml or Settings panel
timeout: 1200
ccache: true
pip: true
directories:
- $HOME/avocado/data/cache
addons:
apt:
packages:
# Build dependencies
- libaio-dev
- libattr1-dev
- libbrlapi-dev
- libcap-ng-dev
- libcacard-dev
- libgcc-7-dev
- libgnutls28-dev
- libgtk-3-dev
- libiscsi-dev
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libpixman-1-dev
- libpng-dev
- librados-dev
- libsdl2-dev
- libsdl2-image-dev
- libseccomp-dev
- libspice-protocol-dev
- libspice-server-dev
- libssh-dev
- liburcu-dev
- libusb-1.0-0-dev
- libvdeplug-dev
- libvte-2.91-dev
- libzstd-dev
- ninja-build
- sparse
- uuid-dev
# Tests dependencies
- genisoimage
# The channel name "irc.oftc.net#qemu" is encrypted against qemu/qemu
# to prevent IRC notifications from forks. This was created using:
# $ travis encrypt -r "qemu/qemu" "irc.oftc.net#qemu"
notifications:
irc:
channels:
- secure: "F7GDRgjuOo5IUyRLqSkmDL7kvdU4UcH3Lm/W2db2JnDHTGCqgEdaYEYKciyCLZ57vOTsTsOgesN8iUT7hNHBd1KWKjZe9KDTZWppWRYVwAwQMzVeSOsbbU4tRoJ6Pp+3qhH1Z0eGYR9ZgKYAoTumDFgSAYRp4IscKS8jkoedOqM="
on_success: change
on_failure: always
env:
global:
- SRC_DIR=".."
- BUILD_DIR="build"
- BASE_CONFIG="--disable-docs --disable-tools"
- TEST_BUILD_CMD=""
- TEST_CMD="make check V=1"
# This is broadly a list of "mainline" softmmu targets which have support across the major distros
- MAIN_SOFTMMU_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
- CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime"
- CCACHE_MAXSIZE=1G
- G_MESSAGES_DEBUG=error
git:
# we want to do this ourselves
submodules: false
# Common first phase for all steps
# We no longer use nproc to calculate jobs:
# https://travis-ci.community/t/nproc-reports-32-cores-on-arm64/5851
before_install:
- if command -v ccache ; then ccache --zero-stats ; fi
- export JOBS=3
- echo "=== Using ${JOBS} simultaneous jobs ==="
# Configure step - may be overridden
before_script:
- mkdir -p ${BUILD_DIR} && cd ${BUILD_DIR}
- ${SRC_DIR}/configure ${BASE_CONFIG} ${CONFIG} || { cat config.log meson-logs/meson-log.txt && exit 1; }
# Main build & test - rarely overridden - controlled by TEST_CMD
script:
- BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
- |
if [ "$BUILD_RC" -eq 0 ] && [ -n "$TEST_BUILD_CMD" ]; then
${TEST_BUILD_CMD} || BUILD_RC=$?
else
$(exit $BUILD_RC);
fi
- |
if [ "$BUILD_RC" -eq 0 ] ; then
${TEST_CMD} ;
else
$(exit $BUILD_RC);
fi
after_script:
- df -h
- if command -v ccache ; then ccache --show-stats ; fi
jobs:
include:
- name: "[aarch64] GCC check-tcg"
arch: arm64
dist: focal
addons:
apt_packages:
- libaio-dev
- libattr1-dev
- libbrlapi-dev
- libcacard-dev
- libcap-ng-dev
- libgcrypt20-dev
- libgnutls28-dev
- libgtk-3-dev
- libiscsi-dev
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libpixman-1-dev
- libpng-dev
- librados-dev
- libsdl2-dev
- libseccomp-dev
- liburcu-dev
- libusb-1.0-0-dev
- libvdeplug-dev
- libvte-2.91-dev
- ninja-build
# Tests dependencies
- genisoimage
env:
- TEST_CMD="make check check-tcg V=1"
- CONFIG="--disable-containers --target-list=${MAIN_SOFTMMU_TARGETS} --cxx=/bin/false"
- UNRELIABLE=true
- name: "[ppc64] GCC check-tcg"
arch: ppc64le
dist: focal
addons:
apt_packages:
- libaio-dev
- libattr1-dev
- libbrlapi-dev
- libcacard-dev
- libcap-ng-dev
- libgcrypt20-dev
- libgnutls28-dev
- libgtk-3-dev
- libiscsi-dev
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libpixman-1-dev
- libpng-dev
- librados-dev
- libsdl2-dev
- libseccomp-dev
- liburcu-dev
- libusb-1.0-0-dev
- libvdeplug-dev
- libvte-2.91-dev
- ninja-build
# Tests dependencies
- genisoimage
env:
- TEST_CMD="make check check-tcg V=1"
- CONFIG="--disable-containers --target-list=ppc64-softmmu,ppc64le-linux-user"
- name: "[s390x] GCC check-tcg"
arch: s390x
dist: focal
addons:
apt_packages:
- libaio-dev
- libattr1-dev
- libbrlapi-dev
- libcacard-dev
- libcap-ng-dev
- libgcrypt20-dev
- libgnutls28-dev
- libgtk-3-dev
- libiscsi-dev
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libpixman-1-dev
- libpng-dev
- librados-dev
- libsdl2-dev
- libseccomp-dev
- liburcu-dev
- libusb-1.0-0-dev
- libvdeplug-dev
- libvte-2.91-dev
- ninja-build
# Tests dependencies
- genisoimage
env:
- TEST_CMD="make check check-tcg V=1"
- CONFIG="--disable-containers --target-list=${MAIN_SOFTMMU_TARGETS},s390x-linux-user"
- UNRELIABLE=true
script:
- BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
- |
if [ "$BUILD_RC" -eq 0 ] ; then
mv pc-bios/s390-ccw/*.img qemu-bundle/usr/local/share/qemu ;
${TEST_CMD} ;
else
$(exit $BUILD_RC);
fi
- name: "[s390x] GCC (other-softmmu)"
arch: s390x
dist: focal
addons:
apt_packages:
- libaio-dev
- libattr1-dev
- libcacard-dev
- libcap-ng-dev
- libgnutls28-dev
- libiscsi-dev
- liblttng-ust-dev
- liblzo2-dev
- libncurses-dev
- libnfs-dev
- libpixman-1-dev
- libsdl2-dev
- libsdl2-image-dev
- libseccomp-dev
- libsnappy-dev
- libzstd-dev
- nettle-dev
- xfslibs-dev
- ninja-build
# Tests dependencies
- genisoimage
env:
- CONFIG="--disable-containers --audio-drv-list=sdl --disable-user
--target-list-exclude=${MAIN_SOFTMMU_TARGETS}"
- name: "[s390x] GCC (user)"
arch: s390x
dist: focal
addons:
apt_packages:
- libgcrypt20-dev
- libglib2.0-dev
- libgnutls28-dev
- ninja-build
env:
- CONFIG="--disable-containers --disable-system"
- name: "[s390x] Clang (disable-tcg)"
arch: s390x
dist: focal
compiler: clang
addons:
apt_packages:
- libaio-dev
- libattr1-dev
- libbrlapi-dev
- libcacard-dev
- libcap-ng-dev
- libgcrypt20-dev
- libgnutls28-dev
- libgtk-3-dev
- libiscsi-dev
- liblttng-ust-dev
- libncurses5-dev
- libnfs-dev
- libpixman-1-dev
- libpng-dev
- librados-dev
- libsdl2-dev
- libseccomp-dev
- liburcu-dev
- libusb-1.0-0-dev
- libvdeplug-dev
- libvte-2.91-dev
- ninja-build
env:
- TEST_CMD="make check-unit"
- CONFIG="--disable-containers --disable-tcg --enable-kvm
--disable-tools --host-cc=clang --cxx=clang++"
- UNRELIABLE=true

339
COPYING Normal file
View File

@ -0,0 +1,339 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

502
COPYING.LIB Normal file
View File

@ -0,0 +1,502 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it becomes
a de-facto standard. To achieve this, non-free programs must be
allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
c) Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
d) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
e) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest
possible use to the public, we recommend making it free software that
everyone can redistribute and change. You can do so by permitting
redistribution under these terms (or, alternatively, under the terms of the
ordinary General Public License).
To apply these terms, attach the following notices to the library. It is
safest to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least the
"copyright" line and a pointer to where the full notice is found.
<one line to give the library's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Also add information on how to contact you by electronic and paper mail.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the library, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
<signature of Ty Coon>, 1 April 1990
Ty Coon, President of Vice
That's all there is to it!

View File

@ -1,42 +0,0 @@
[workspace]
members = [
"libafl",
"libafl_derive",
"libafl_cc",
"libafl_targets",
"libafl_frida",
"libafl_qemu",
"libafl_tinyinst",
"libafl_sugar",
"libafl_nyx",
"libafl_concolic/symcc_runtime",
"libafl_concolic/symcc_libafl",
"libafl_concolic/test/dump_constraints",
"libafl_concolic/test/runtime_test",
"utils/deexit",
"utils/gramatron/construct_automata",
"utils/libafl_benches",
]
default-members = [
"libafl",
"libafl_derive",
"libafl_cc",
"libafl_targets",
]
exclude = [
"fuzzers",
"bindings",
"scripts",
"libafl_qemu/libafl_qemu_build",
"libafl_qemu/libafl_qemu_sys"
]
[workspace.package]
version = "0.8.2"
[profile.release]
lto = true
codegen-units = 1
opt-level = 3
debug = true

View File

@ -1,122 +0,0 @@
# syntax=docker/dockerfile:1.2
FROM rust:bullseye AS libafl
LABEL "maintainer"="afl++ team <afl@aflplus.plus>"
LABEL "about"="LibAFL Docker image"
# install sccache to cache subsequent builds of dependencies
RUN cargo install sccache
ENV HOME=/root
ENV SCCACHE_CACHE_SIZE="1G"
ENV SCCACHE_DIR=$HOME/.cache/sccache
ENV RUSTC_WRAPPER="/usr/local/cargo/bin/sccache"
ENV IS_DOCKER="1"
RUN sh -c 'echo set encoding=utf-8 > /root/.vimrc' \
echo "export PS1='"'[LibAFL \h] \w$(__git_ps1) \$ '"'" >> ~/.bashrc && \
mkdir ~/.cargo && \
echo "[build]\nrustc-wrapper = \"${RUSTC_WRAPPER}\"" >> ~/.cargo/config
RUN rustup component add rustfmt clippy
# Install clang 11, common build tools
RUN apt update && apt install -y build-essential gdb git wget clang clang-tools libc++-11-dev libc++abi-11-dev llvm
# Copy a dummy.rs and Cargo.toml first, so that dependencies are cached
WORKDIR /libafl
COPY Cargo.toml README.md ./
COPY libafl_derive/Cargo.toml libafl_derive/Cargo.toml
COPY scripts/dummy.rs libafl_derive/src/lib.rs
COPY libafl/Cargo.toml libafl/build.rs libafl/
COPY libafl/examples libafl/examples
COPY scripts/dummy.rs libafl/src/lib.rs
COPY libafl_frida/Cargo.toml libafl_frida/build.rs libafl_frida/
COPY scripts/dummy.rs libafl_frida/src/lib.rs
COPY libafl_frida/src/gettls.c libafl_frida/src/gettls.c
COPY libafl_qemu/Cargo.toml libafl_qemu/build.rs libafl_qemu/
COPY scripts/dummy.rs libafl_qemu/src/lib.rs
COPY libafl_qemu/libafl_qemu_build/Cargo.toml libafl_qemu/libafl_qemu_build/
COPY scripts/dummy.rs libafl_qemu/libafl_qemu_build/src/lib.rs
COPY libafl_qemu/libafl_qemu_sys/Cargo.toml libafl_qemu/libafl_qemu_sys/build.rs libafl_qemu/libafl_qemu_sys/
COPY scripts/dummy.rs libafl_qemu/libafl_qemu_sys/src/lib.rs
COPY libafl_sugar/Cargo.toml libafl_sugar/
COPY scripts/dummy.rs libafl_sugar/src/lib.rs
COPY libafl_cc/Cargo.toml libafl_cc/Cargo.toml
COPY libafl_cc/build.rs libafl_cc/build.rs
COPY libafl_cc/src libafl_cc/src
COPY scripts/dummy.rs libafl_cc/src/lib.rs
COPY libafl_targets/Cargo.toml libafl_targets/build.rs libafl_targets/
COPY libafl_targets/src libafl_targets/src
COPY scripts/dummy.rs libafl_targets/src/lib.rs
COPY libafl_concolic/test/dump_constraints/Cargo.toml libafl_concolic/test/dump_constraints/
COPY scripts/dummy.rs libafl_concolic/test/dump_constraints/src/lib.rs
COPY libafl_concolic/test/runtime_test/Cargo.toml libafl_concolic/test/runtime_test/
COPY scripts/dummy.rs libafl_concolic/test/runtime_test/src/lib.rs
COPY libafl_concolic/symcc_runtime/Cargo.toml libafl_concolic/symcc_runtime/build.rs libafl_concolic/symcc_runtime/
COPY scripts/dummy.rs libafl_concolic/symcc_runtime/src/lib.rs
COPY libafl_concolic/symcc_libafl/Cargo.toml libafl_concolic/symcc_libafl/
COPY scripts/dummy.rs libafl_concolic/symcc_libafl/src/lib.rs
COPY libafl_nyx/Cargo.toml libafl_nyx/build.rs libafl_nyx/
COPY scripts/dummy.rs libafl_nyx/src/lib.rs
COPY libafl_tinyinst/Cargo.toml libafl_tinyinst/
COPY scripts/dummy.rs libafl_tinyinst/src/lib.rs
COPY utils utils
RUN cargo build && cargo build --release
COPY scripts scripts
COPY docs docs
# Pre-build dependencies for a few common fuzzers
# Dep chain:
# libafl_cc (independent)
# libafl_derive -> libafl
# libafl -> libafl_targets
# libafl_targets -> libafl_frida
# Build once without source
COPY libafl_cc/src libafl_cc/src
RUN touch libafl_cc/src/lib.rs
COPY libafl_derive/src libafl_derive/src
RUN touch libafl_derive/src/lib.rs
COPY libafl/src libafl/src
RUN touch libafl/src/lib.rs
COPY libafl_targets/src libafl_targets/src
RUN touch libafl_targets/src/lib.rs
COPY libafl_frida/src libafl_frida/src
RUN touch libafl_qemu/libafl_qemu_build/src/lib.rs
COPY libafl_qemu/libafl_qemu_build/src libafl_qemu/libafl_qemu_build/src
RUN touch libafl_qemu/libafl_qemu_sys/src/lib.rs
COPY libafl_qemu/libafl_qemu_sys/src libafl_qemu/libafl_qemu_sys/src
RUN touch libafl_qemu/src/lib.rs
COPY libafl_qemu/src libafl_qemu/src
RUN touch libafl_frida/src/lib.rs
COPY libafl_concolic/symcc_libafl libafl_concolic/symcc_libafl
COPY libafl_concolic/symcc_runtime libafl_concolic/symcc_runtime
COPY libafl_concolic/test libafl_concolic/test
COPY libafl_nyx/src libafl_nyx/src
RUN touch libafl_nyx/src/lib.rs
RUN cargo build && cargo build --release
# Copy fuzzers over
COPY fuzzers fuzzers
# RUN ./scripts/test_all_fuzzers.sh --no-fmt
ENTRYPOINT [ "/bin/bash" ]

6
Kconfig Normal file
View File

@ -0,0 +1,6 @@
source Kconfig.host
source backends/Kconfig
source accel/Kconfig
source target/Kconfig
source hw/Kconfig
source semihosting/Kconfig

48
Kconfig.host Normal file
View File

@ -0,0 +1,48 @@
# These are "proxy" symbols used to pass config-host.mak values
# down to Kconfig. See also kconfig_external_symbols in
# meson.build: these two need to be kept in sync.
config LINUX
bool
config OPENGL
bool
config X11
bool
config SPICE
bool
config IVSHMEM
bool
config TPM
bool
config VHOST_USER
bool
config VHOST_VDPA
bool
config VHOST_KERNEL
bool
config VIRTFS
bool
config PVRDMA
bool
config MULTIPROCESS_ALLOWED
bool
imply MULTIPROCESS
config FUZZ
bool
select SPARSE_MEM
config VFIO_USER_SERVER_ALLOWED
bool
imply VFIO_USER_SERVER

27
LICENSE Normal file
View File

@ -0,0 +1,27 @@
The QEMU distribution includes both the QEMU emulator and
various firmware files. These are separate programs that are
distributed together for our users' convenience, and they have
separate licenses.
The following points clarify the license of the QEMU emulator:
1) The QEMU emulator as a whole is released under the GNU General
Public License, version 2.
2) Parts of the QEMU emulator have specific licenses which are compatible
with the GNU General Public License, version 2. Hence each source file
contains its own licensing information. Source files with no licensing
information are released under the GNU General Public License, version
2 or (at your option) any later version.
As of July 2013, contributions under version 2 of the GNU General Public
License (and no later version) are only accepted for the following files
or directories: bsd-user/, linux-user/, hw/vfio/, hw/xen/xen_pt*.
3) The Tiny Code Generator (TCG) is mostly under the BSD or MIT licenses;
but some parts may be GPLv2 or other licenses. Again, see the
specific licensing information in each source file.
4) QEMU is a trademark of Fabrice Bellard.
Fabrice Bellard and the QEMU team

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

3823
MAINTAINERS Normal file

File diff suppressed because it is too large Load Diff

337
Makefile Normal file
View File

@ -0,0 +1,337 @@
# Makefile for QEMU.
ifneq ($(words $(subst :, ,$(CURDIR))), 1)
$(error main directory cannot contain spaces nor colons)
endif
# Always point to the root of the build tree (needs GNU make).
BUILD_DIR=$(CURDIR)
# Before including a proper config-host.mak, assume we are in the source tree
SRC_PATH=.
# Don't use implicit rules or variables
# we have explicit rules for everything
MAKEFLAGS += -rR
SHELL = bash -o pipefail
# Usage: $(call quiet-command,command and args,"NAME","args to print")
# This will run "command and args", and either:
# if V=1 just print the whole command and args
# otherwise print the 'quiet' output in the format " NAME args to print"
# NAME should be a short name of the command, 7 letters or fewer.
# If called with only a single argument, will print nothing in quiet mode.
quiet-command-run = $(if $(V),,$(if $2,printf " %-7s %s\n" $2 $3 && ))$1
quiet-@ = $(if $(V),,@)
quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3)
UNCHECKED_GOALS := %clean TAGS cscope ctags dist \
help check-help print-% \
docker docker-% vm-help vm-test vm-build-%
all:
.PHONY: all clean distclean recurse-all dist msi FORCE
# Don't try to regenerate Makefile or configure
# We don't generate any of them
Makefile: ;
configure: ;
# All following code might depend on configuration variables
ifneq ($(wildcard config-host.mak),)
include config-host.mak
include Makefile.prereqs
Makefile.prereqs: config-host.mak
git-submodule-update:
.git-submodule-status: git-submodule-update config-host.mak
Makefile: .git-submodule-status
.PHONY: git-submodule-update
git-submodule-update:
ifneq ($(GIT_SUBMODULES_ACTION),ignore)
$(call quiet-command, \
(GIT="$(GIT)" "$(SRC_PATH)/scripts/git-submodule.sh" $(GIT_SUBMODULES_ACTION) $(GIT_SUBMODULES)), \
"GIT","$(GIT_SUBMODULES)")
endif
# 0. ensure the build tree is okay
# Check that we're not trying to do an out-of-tree build from
# a tree that's been used for an in-tree build.
ifneq ($(realpath $(SRC_PATH)),$(realpath .))
ifneq ($(wildcard $(SRC_PATH)/config-host.mak),)
$(error This is an out of tree build but your source tree ($(SRC_PATH)) \
seems to have been used for an in-tree build. You can fix this by running \
"$(MAKE) distclean && rm -rf *-linux-user *-softmmu" in your source tree)
endif
endif
# force a rerun of configure if config-host.mak is too old or corrupted
ifeq ($(MESON),)
.PHONY: config-host.mak
x := $(shell rm -rf meson-private meson-info meson-logs)
endif
ifeq ($(NINJA),)
.PHONY: config-host.mak
x := $(shell rm -rf meson-private meson-info meson-logs)
else
export NINJA
endif
ifeq ($(wildcard build.ninja),)
.PHONY: config-host.mak
x := $(shell rm -rf meson-private meson-info meson-logs)
endif
ifeq ($(origin prefix),file)
.PHONY: config-host.mak
x := $(shell rm -rf meson-private meson-info meson-logs)
endif
# 1. ensure config-host.mak is up-to-date
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh $(SRC_PATH)/VERSION
@echo config-host.mak is out-of-date, running configure
@if test -f meson-private/coredata.dat; then \
./config.status --skip-meson; \
else \
./config.status && touch build.ninja.stamp; \
fi
# 2. meson.stamp exists if meson has run at least once (so ninja reconfigure
# works), but otherwise never needs to be updated
meson-private/coredata.dat: meson.stamp
meson.stamp: config-host.mak
@touch meson.stamp
# 3. ensure generated build files are up-to-date
ifneq ($(NINJA),)
Makefile.ninja: build.ninja
$(quiet-@){ \
echo 'ninja-targets = \'; \
$(NINJA) -t targets all | sed 's/:.*//; $$!s/$$/ \\/'; \
echo 'build-files = \'; \
$(NINJA) -t query build.ninja | sed -n '1,/^ input:/d; /^ outputs:/q; s/$$/ \\/p'; \
} > $@.tmp && mv $@.tmp $@
-include Makefile.ninja
# A separate rule is needed for Makefile dependencies to avoid -n
build.ninja: build.ninja.stamp
$(build-files):
build.ninja.stamp: meson.stamp $(build-files)
$(NINJA) $(if $V,-v,) build.ninja && touch $@
endif
ifneq ($(MESON),)
Makefile.mtest: build.ninja scripts/mtest2make.py
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
-include Makefile.mtest
.PHONY: update-buildoptions
all update-buildoptions: $(SRC_PATH)/scripts/meson-buildoptions.sh
$(SRC_PATH)/scripts/meson-buildoptions.sh: $(SRC_PATH)/meson_options.txt
$(MESON) introspect --buildoptions $(SRC_PATH)/meson.build | $(PYTHON) \
scripts/meson-buildoptions.py > $@.tmp && mv $@.tmp $@
endif
# 4. Rules to bridge to other makefiles
ifneq ($(NINJA),)
# Filter out long options to avoid flags like --no-print-directory which
# may result in false positive match for MAKE.n
MAKE.n = $(findstring n,$(firstword $(filter-out --%,$(MAKEFLAGS))))
MAKE.k = $(findstring k,$(firstword $(filter-out --%,$(MAKEFLAGS))))
MAKE.q = $(findstring q,$(firstword $(filter-out --%,$(MAKEFLAGS))))
MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq)
NINJAFLAGS = $(if $V,-v) $(if $(MAKE.n), -n) $(if $(MAKE.k), -k0) \
$(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \
-d keepdepfile
ninja-cmd-goals = $(or $(MAKECMDGOALS), all)
ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g))
makefile-targets := build.ninja ctags TAGS cscope dist clean uninstall
# "ninja -t targets" also lists all prerequisites. If build system
# files are marked as PHONY, however, Make will always try to execute
# "ninja build.ninja".
ninja-targets := $(filter-out $(build-files) $(makefile-targets), $(ninja-targets))
.PHONY: $(ninja-targets) run-ninja
$(ninja-targets): run-ninja
# Use "| cat" to give Ninja a more "make-y" output. Use "+" to bypass the
# --output-sync line.
run-ninja: config-host.mak
ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),)
+$(if $(MAKE.nq),@:,$(quiet-@)$(NINJA) $(NINJAFLAGS) \
$(sort $(filter $(ninja-targets), $(ninja-cmd-goals))) | cat)
endif
endif
ifeq ($(CONFIG_PLUGIN),y)
.PHONY: plugins
plugins:
$(call quiet-command,\
$(MAKE) $(SUBDIR_MAKEFLAGS) -C contrib/plugins V="$(V)", \
"BUILD", "example plugins")
endif # $(CONFIG_PLUGIN)
else # config-host.mak does not exist
config-host.mak:
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
@echo "Please call configure before running make!"
@exit 1
endif
endif # config-host.mak does not exist
SUBDIR_MAKEFLAGS=$(if $(V),,--no-print-directory --quiet)
include $(SRC_PATH)/tests/Makefile.include
all: recurse-all
ROMS_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(ROMS)))
.PHONY: $(ROMS_RULES)
$(ROMS_RULES):
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),)
.PHONY: recurse-all recurse-clean
recurse-all: $(addsuffix /all, $(ROMS))
recurse-clean: $(addsuffix /clean, $(ROMS))
recurse-distclean: $(addsuffix /distclean, $(ROMS))
######################################################################
clean: recurse-clean
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean || :
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) clean-ctlist || :
find . \( -name '*.so' -o -name '*.dll' -o \
-name '*.[oda]' -o -name '*.gcno' \) -type f \
! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-aarch64.a \
! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \
-exec rm {} +
rm -f TAGS cscope.* *~ */*~
VERSION = $(shell cat $(SRC_PATH)/VERSION)
dist: qemu-$(VERSION).tar.bz2
qemu-%.tar.bz2:
$(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)"
distclean: clean recurse-distclean
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || :
rm -f config-host.mak Makefile.prereqs qemu-bundle
rm -f tests/tcg/*/config-target.mak tests/tcg/config-host.mak
rm -f config.status
rm -f roms/seabios/config.mak
rm -f qemu-plugins-ld.symbols qemu-plugins-ld64.symbols
rm -f *-config-target.h *-config-devices.mak *-config-devices.h
rm -rf meson-private meson-logs meson-info compile_commands.json
rm -f Makefile.ninja Makefile.mtest build.ninja.stamp meson.stamp
rm -f config.log
rm -f linux-headers/asm
rm -Rf .sdk
find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \
-type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \)
.PHONY: ctags
ctags:
$(call quiet-command, \
rm -f "$(SRC_PATH)/"tags, \
"CTAGS", "Remove old tags")
$(call quiet-command, \
$(find-src-path) -exec ctags \
-f "$(SRC_PATH)/"tags --append {} +, \
"CTAGS", "Re-index $(SRC_PATH)")
.PHONY: gtags
gtags:
$(call quiet-command, \
rm -f "$(SRC_PATH)/"GTAGS; \
rm -f "$(SRC_PATH)/"GRTAGS; \
rm -f "$(SRC_PATH)/"GPATH, \
"GTAGS", "Remove old $@ files")
$(call quiet-command, \
(cd $(SRC_PATH) && \
$(find-src-path) -print | gtags -f -), \
"GTAGS", "Re-index $(SRC_PATH)")
.PHONY: TAGS
TAGS:
$(call quiet-command, \
rm -f "$(SRC_PATH)/"TAGS, \
"TAGS", "Remove old $@")
$(call quiet-command, \
$(find-src-path) -exec etags \
-f "$(SRC_PATH)/"TAGS --append {} +, \
"TAGS", "Re-index $(SRC_PATH)")
.PHONY: cscope
cscope:
$(call quiet-command, \
rm -f "$(SRC_PATH)/"cscope.* , \
"cscope", "Remove old $@ files")
$(call quiet-command, \
($(find-src-path) -print | sed -e 's,^\./,,' \
> "$(SRC_PATH)/cscope.files"), \
"cscope", "Create file list")
$(call quiet-command, \
cscope -b -i"$(SRC_PATH)/cscope.files" \
-f"$(SRC_PATH)"/cscope.out, \
"cscope", "Re-index $(SRC_PATH)")
# Needed by "meson install"
export DESTDIR
include $(SRC_PATH)/tests/lcitool/Makefile.include
include $(SRC_PATH)/tests/docker/Makefile.include
include $(SRC_PATH)/tests/vm/Makefile.include
print-help-run = printf " %-30s - %s\\n" "$1" "$2"
print-help = @$(call print-help-run,$1,$2)
.PHONY: help
help:
@echo 'Generic targets:'
$(call print-help,all,Build all)
$(call print-help,dir/file.o,Build specified target only)
$(call print-help,install,Install QEMU, documentation and tools)
$(call print-help,ctags/gtags/TAGS,Generate tags file for editors)
$(call print-help,cscope,Generate cscope index)
$(call print-help,sparse,Run sparse on the QEMU source)
@echo ''
ifeq ($(CONFIG_PLUGIN),y)
@echo 'Plugin targets:'
$(call print-help,plugins,Build the example TCG plugins)
@echo ''
endif
@echo 'Cleaning targets:'
$(call print-help,clean,Remove most generated files but keep the config)
$(call print-help,distclean,Remove all generated files)
$(call print-help,dist,Build a distributable tarball)
@echo ''
@echo 'Test targets:'
$(call print-help,check,Run all tests (check-help for details))
$(call print-help,bench,Run all benchmarks)
$(call print-help,lcitool-help,Help about targets for managing build environment manifests)
$(call print-help,docker-help,Help about targets running tests inside containers)
$(call print-help,vm-help,Help about targets running tests inside VM)
@echo ''
@echo 'Documentation targets:'
$(call print-help,html man,Build documentation in specified format)
@echo ''
ifdef CONFIG_WIN32
@echo 'Windows targets:'
$(call print-help,installer,Build NSIS-based installer for QEMU)
$(call print-help,msi,Build MSI-based installer for qemu-ga)
@echo ''
endif
$(call print-help,$(MAKE) [targets],(quiet build, default))
$(call print-help,$(MAKE) V=1 [targets],(verbose build))
# will delete the target of a rule if commands exit with a nonzero exit status
.DELETE_ON_ERROR:
print-%:
@echo '$*=$($*)'

154
README.md
View File

@ -1,159 +1,13 @@
# LibAFL, the fuzzer library.
# QEMU LibAFL Bridge
<img align="right" src="https://github.com/AFLplusplus/Website/raw/master/static/logo_256x256.png" alt="AFL++ Logo">
This is a patched QEMU that exposes an interface for LibAFL-based fuzzers.
Advanced Fuzzing Library - Slot your own fuzzers together and extend their features using Rust.
LibAFL is written and maintained by
* [Andrea Fioraldi](https://twitter.com/andreafioraldi) <andrea@aflplus.plus>
* [Dominik Maier](https://twitter.com/domenuk) <dominik@aflplus.plus>
* [s1341](https://twitter.com/srubenst1341) <github@shmarya.net>
* [Dongjia Zhang](https://github.com/tokatoka) <toka@aflplus.plus>
## Why LibAFL?
LibAFL gives you many of the benefits of an off-the-shelf fuzzer, while being completely customizable.
Some highlight features currently include:
- `fast`: We do everything we can at compile time, keeping runtime overhead minimal. Users reach 120k execs/sec in frida-mode on a phone (using all cores).
- `scalable`: `Low Level Message Passing`, `LLMP` for short, allows LibAFL to scale almost linearly over cores, and via TCP to multiple machines.
- `adaptable`: You can replace each part of LibAFL. For example, `BytesInput` is just one potential form input:
feel free to add an AST-based input for structured fuzzing, and more.
- `multi platform`: LibAFL was confirmed to work on *Windows*, *MacOS*, *Linux*, and *Android* on *x86_64* and *aarch64*. `LibAFL` can be built in `no_std` mode to inject LibAFL into obscure targets like embedded devices and hypervisors.
- `bring your own target`: We support binary-only modes, like Frida-Mode, as well as multiple compilation passes for sourced-based instrumentation. Of course it's easy to add custom instrumentation backends.
## Overview
LibAFL is a collection of reusable pieces of fuzzers, written in Rust.
It is fast, multi-platform, no_std compatible, and scales over cores and machines.
It offers a main crate that provide building blocks for custom fuzzers, [libafl](./libafl), a library containing common code that can be used for targets instrumentation, [libafl_targets](./libafl_targets), and a library providing facilities to wrap compilers, [libafl_cc](./libafl_cc).
LibAFL offers integrations with popular instrumentation frameworks. At the moment, the supported backends are:
+ SanitizerCoverage, in [libafl_targets](./libafl_targets)
+ Frida, in [libafl_frida](./libafl_frida)
+ QEMU user-mode, in [libafl_qemu](./libafl_qemu)
+ TinyInst, in [libafl_tinyinst](./libafl_tinyinst) by [elbiazo](https://github.com/elbiazo)
## Getting started
1. Install the Dependecies
- The Rust development language.
We highly recommend *not* to use e.g. your Linux distribition package as this is likely outdated. So rather install
Rust directly, instructions can be found [here](https://www.rust-lang.org/tools/install).
- LLVM tools
The LLVM tools are needed (newer than LLVM 11.0.0 but older than LLVM 15.0.0)
- Cargo-make
We use cargo-make to build the fuzzers in `fuzzers/` directory. You can install it with
```
cargo install cargo-make
```
2. Clone the LibAFL repository with
```
git clone https://github.com/AFLplusplus/LibAFL
```
3. Build the library using
```
cargo build --release
```
4. Build the API documentation with
```
cargo doc
```
5. Browse the LibAFL book (WIP!) with (requires [mdbook](https://github.com/rust-lang/mdBook))
```
cd docs && mdbook serve
```
We collect all example fuzzers in [`./fuzzers`](./fuzzers/).
Be sure to read their documentation (and source), this is *the natural way to get started!*
You can run each example fuzzer with
```
cargo make run
```
as long as the fuzzer directory has `Makefile.toml` file.
The best-tested fuzzer is [`./fuzzers/libfuzzer_libpng`](./fuzzers/libfuzzer_libpng), a multicore libfuzzer-like fuzzer using LibAFL for a libpng harness.
## Resources
+ [Installation guide](./docs/src/getting_started/setup.md)
+ [Online API documentation](https://docs.rs/libafl/)
+ The LibAFL book (WIP) [online](https://aflplus.plus/libafl-book) or in the [repo](./docs/src/)
+ Our research [paper](https://www.s3.eurecom.fr/docs/ccs22_fioraldi.pdf)
+ Our RC3 [talk](http://www.youtube.com/watch?v=3RWkT1Q5IV0 "Fuzzers Like LEGO") explaining the core concepts
+ Our Fuzzcon Europe [talk](https://www.youtube.com/watch?v=PWB8GIhFAaI "LibAFL: The Advanced Fuzzing Library") with a (a bit but not so much outdated) step-by-step discussion on how to build some example fuzzers
+ The Fuzzing101 [solutions](https://github.com/epi052/fuzzing-101-solutions) & series of [blog posts](https://epi052.gitlab.io/notes-to-self/blog/2021-11-01-fuzzing-101-with-libafl/) by [epi](https://github.com/epi052)
+ Blogpost on binary-only fuzzing lib libaf_qemu, [Hacking TMNF - Fuzzing the game server](https://blog.bricked.tech/posts/tmnf/part1/), by [RickdeJager](https://github.com/RickdeJager).
## Contributing
For bugs, feel free to open issues or contact us directly. Thank you for your support. <3
Even though we will gladly assist you in finishing up your PR, try to
- keep all the crates compiling with *stable* rust (hide the eventual non-stable code under [`cfg`s](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/build.rs#L26))
- run `cargo fmt` on your code before pushing
- check the output of `cargo clippy --all` or `./clippy.sh`
- run `cargo build --no-default-features` to check for `no_std` compatibility (and possibly add `#[cfg(feature = "std")]`) to hide parts of your code.
Some of the parts in this list may be hard, don't be afraid to open a PR if you cannot fix them by yourself, so we can help.
## Cite
If you use LibAFL for your academic work, please cite the following paper:
```bibtex
@inproceedings{libafl,
author = {Andrea Fioraldi and Dominik Maier and Dongjia Zhang and Davide Balzarotti},
title = {{LibAFL: A Framework to Build Modular and Reusable Fuzzers}},
booktitle = {Proceedings of the 29th ACM conference on Computer and communications security (CCS)},
series = {CCS '22},
year = {2022},
month = {November},
location = {Los Angeles, U.S.A.},
publisher = {ACM},
}
```
This raw interface is used in `libafl_qemu` that expose a more Rusty API.
#### License
<sup>
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
This project extends the QEMU emulator, and our contributions to previously existing files adopt those files' respective licenses; the files that we have added are made available under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version.
</sup>
<br>
<sub>
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
</sub>
<br>
<sub>
Dependencies under more restrictive licenses, such as GPL or AGPL, can be enabled
using the respective feature in each crate when it is present, such as the
'agpl' feature of the libafl crate.
</sub>

171
README.qemu.rst Normal file
View File

@ -0,0 +1,171 @@
===========
QEMU README
===========
QEMU is a generic and open source machine & userspace emulator and
virtualizer.
QEMU is capable of emulating a complete machine in software without any
need for hardware virtualization support. By using dynamic translation,
it achieves very good performance. QEMU can also integrate with the Xen
and KVM hypervisors to provide emulated hardware while allowing the
hypervisor to manage the CPU. With hypervisor support, QEMU can achieve
near native performance for CPUs. When QEMU emulates CPUs directly it is
capable of running operating systems made for one machine (e.g. an ARMv7
board) on a different machine (e.g. an x86_64 PC board).
QEMU is also capable of providing userspace API virtualization for Linux
and BSD kernel interfaces. This allows binaries compiled against one
architecture ABI (e.g. the Linux PPC64 ABI) to be run on a host using a
different architecture ABI (e.g. the Linux x86_64 ABI). This does not
involve any hardware emulation, simply CPU and syscall emulation.
QEMU aims to fit into a variety of use cases. It can be invoked directly
by users wishing to have full control over its behaviour and settings.
It also aims to facilitate integration into higher level management
layers, by providing a stable command line interface and monitor API.
It is commonly invoked indirectly via the libvirt library when using
open source applications such as oVirt, OpenStack and virt-manager.
QEMU as a whole is released under the GNU General Public License,
version 2. For full licensing details, consult the LICENSE file.
Documentation
=============
Documentation can be found hosted online at
`<https://www.qemu.org/documentation/>`_. The documentation for the
current development version that is available at
`<https://www.qemu.org/docs/master/>`_ is generated from the ``docs/``
folder in the source tree, and is built by `Sphinx
<https://www.sphinx-doc.org/en/master/>`_.
Building
========
QEMU is multi-platform software intended to be buildable on all modern
Linux platforms, OS-X, Win32 (via the Mingw64 toolchain) and a variety
of other UNIX targets. The simple steps to build QEMU are:
.. code-block:: shell
mkdir build
cd build
../configure
make
Additional information can also be found online via the QEMU website:
* `<https://wiki.qemu.org/Hosts/Linux>`_
* `<https://wiki.qemu.org/Hosts/Mac>`_
* `<https://wiki.qemu.org/Hosts/W32>`_
Submitting patches
==================
The QEMU source code is maintained under the GIT version control system.
.. code-block:: shell
git clone https://gitlab.com/qemu-project/qemu.git
When submitting patches, one common approach is to use 'git
format-patch' and/or 'git send-email' to format & send the mail to the
qemu-devel@nongnu.org mailing list. All patches submitted must contain
a 'Signed-off-by' line from the author. Patches should follow the
guidelines set out in the `style section
<https://www.qemu.org/docs/master/devel/style.html>`_ of
the Developers Guide.
Additional information on submitting patches can be found online via
the QEMU website
* `<https://wiki.qemu.org/Contribute/SubmitAPatch>`_
* `<https://wiki.qemu.org/Contribute/TrivialPatches>`_
The QEMU website is also maintained under source control.
.. code-block:: shell
git clone https://gitlab.com/qemu-project/qemu-web.git
* `<https://www.qemu.org/2017/02/04/the-new-qemu-website-is-up/>`_
A 'git-publish' utility was created to make above process less
cumbersome, and is highly recommended for making regular contributions,
or even just for sending consecutive patch series revisions. It also
requires a working 'git send-email' setup, and by default doesn't
automate everything, so you may want to go through the above steps
manually for once.
For installation instructions, please go to
* `<https://github.com/stefanha/git-publish>`_
The workflow with 'git-publish' is:
.. code-block:: shell
$ git checkout master -b my-feature
$ # work on new commits, add your 'Signed-off-by' lines to each
$ git publish
Your patch series will be sent and tagged as my-feature-v1 if you need to refer
back to it in the future.
Sending v2:
.. code-block:: shell
$ git checkout my-feature # same topic branch
$ # making changes to the commits (using 'git rebase', for example)
$ git publish
Your patch series will be sent with 'v2' tag in the subject and the git tip
will be tagged as my-feature-v2.
Bug reporting
=============
The QEMU project uses GitLab issues to track bugs. Bugs
found when running code built from QEMU git or upstream released sources
should be reported via:
* `<https://gitlab.com/qemu-project/qemu/-/issues>`_
If using QEMU via an operating system vendor pre-built binary package, it
is preferable to report bugs to the vendor's own bug tracker first. If
the bug is also known to affect latest upstream code, it can also be
reported via GitLab.
For additional information on bug reporting consult:
* `<https://wiki.qemu.org/Contribute/ReportABug>`_
ChangeLog
=========
For version history and release notes, please visit
`<https://wiki.qemu.org/ChangeLog/>`_ or look at the git history for
more detailed information.
Contact
=======
The QEMU community can be contacted in a number of ways, with the two
main methods being email and IRC
* `<mailto:qemu-devel@nongnu.org>`_
* `<https://lists.nongnu.org/mailman/listinfo/qemu-devel>`_
* #qemu on irc.oftc.net
Information on additional methods of contacting the community can be
found online via the QEMU website:
* `<https://wiki.qemu.org/Contribute/StartHere>`_

1
VERSION Normal file
View File

@ -0,0 +1 @@
7.1.91

21
accel/Kconfig Normal file
View File

@ -0,0 +1,21 @@
config WHPX
bool
config NVMM
bool
config HAX
bool
config HVF
bool
config TCG
bool
config KVM
bool
config XEN
bool
select FSDEV_9P if VIRTFS

155
accel/accel-common.c Normal file
View File

@ -0,0 +1,155 @@
/*
* QEMU accel class, components common to system emulation and user mode
*
* Copyright (c) 2003-2008 Fabrice Bellard
* Copyright (c) 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "qemu/accel.h"
#include "cpu.h"
#include "hw/core/accel-cpu.h"
#ifndef CONFIG_USER_ONLY
#include "accel-softmmu.h"
#endif /* !CONFIG_USER_ONLY */
static const TypeInfo accel_type = {
.name = TYPE_ACCEL,
.parent = TYPE_OBJECT,
.class_size = sizeof(AccelClass),
.instance_size = sizeof(AccelState),
};
/* Lookup AccelClass from opt_name. Returns NULL if not found */
AccelClass *accel_find(const char *opt_name)
{
char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
AccelClass *ac = ACCEL_CLASS(module_object_class_by_name(class_name));
g_free(class_name);
return ac;
}
/* Return the name of the current accelerator */
const char *current_accel_name(void)
{
AccelClass *ac = ACCEL_GET_CLASS(current_accel());
return ac->name;
}
static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
{
CPUClass *cc = CPU_CLASS(klass);
AccelCPUClass *accel_cpu = opaque;
/*
* The first callback allows accel-cpu to run initializations
* for the CPU, customizing CPU behavior according to the accelerator.
*
* The second one allows the CPU to customize the accel-cpu
* behavior according to the CPU.
*
* The second is currently only used by TCG, to specialize the
* TCGCPUOps depending on the CPU type.
*/
cc->accel_cpu = accel_cpu;
if (accel_cpu->cpu_class_init) {
accel_cpu->cpu_class_init(cc);
}
if (cc->init_accel_cpu) {
cc->init_accel_cpu(accel_cpu, cc);
}
}
/* initialize the arch-specific accel CpuClass interfaces */
static void accel_init_cpu_interfaces(AccelClass *ac)
{
const char *ac_name; /* AccelClass name */
char *acc_name; /* AccelCPUClass name */
ObjectClass *acc; /* AccelCPUClass */
ac_name = object_class_get_name(OBJECT_CLASS(ac));
g_assert(ac_name != NULL);
acc_name = g_strdup_printf("%s-%s", ac_name, CPU_RESOLVING_TYPE);
acc = object_class_by_name(acc_name);
g_free(acc_name);
if (acc) {
object_class_foreach(accel_init_cpu_int_aux,
CPU_RESOLVING_TYPE, false, acc);
}
}
void accel_init_interfaces(AccelClass *ac)
{
#ifndef CONFIG_USER_ONLY
accel_init_ops_interfaces(ac);
#endif /* !CONFIG_USER_ONLY */
accel_init_cpu_interfaces(ac);
}
void accel_cpu_instance_init(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->accel_cpu && cc->accel_cpu->cpu_instance_init) {
cc->accel_cpu->cpu_instance_init(cpu);
}
}
bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) {
return cc->accel_cpu->cpu_realizefn(cpu, errp);
}
return true;
}
int accel_supported_gdbstub_sstep_flags(void)
{
AccelState *accel = current_accel();
AccelClass *acc = ACCEL_GET_CLASS(accel);
if (acc->gdbstub_supported_sstep_flags) {
return acc->gdbstub_supported_sstep_flags();
}
return 0;
}
static const TypeInfo accel_cpu_type = {
.name = TYPE_ACCEL_CPU,
.parent = TYPE_OBJECT,
.abstract = true,
.class_size = sizeof(AccelCPUClass),
};
static void register_accel_types(void)
{
type_register_static(&accel_type);
type_register_static(&accel_cpu_type);
}
type_init(register_accel_types);

106
accel/accel-softmmu.c Normal file
View File

@ -0,0 +1,106 @@
/*
* QEMU accel class, system emulation components
*
* Copyright (c) 2003-2008 Fabrice Bellard
* Copyright (c) 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "qemu/accel.h"
#include "hw/boards.h"
#include "sysemu/cpus.h"
#include "accel-softmmu.h"
int accel_init_machine(AccelState *accel, MachineState *ms)
{
AccelClass *acc = ACCEL_GET_CLASS(accel);
int ret;
ms->accelerator = accel;
*(acc->allowed) = true;
ret = acc->init_machine(ms);
if (ret < 0) {
ms->accelerator = NULL;
*(acc->allowed) = false;
object_unref(OBJECT(accel));
} else {
object_set_accelerator_compat_props(acc->compat_props);
}
return ret;
}
AccelState *current_accel(void)
{
return current_machine->accelerator;
}
void accel_setup_post(MachineState *ms)
{
AccelState *accel = ms->accelerator;
AccelClass *acc = ACCEL_GET_CLASS(accel);
if (acc->setup_post) {
acc->setup_post(ms, accel);
}
}
/* initialize the arch-independent accel operation interfaces */
void accel_init_ops_interfaces(AccelClass *ac)
{
const char *ac_name;
char *ops_name;
ObjectClass *oc;
AccelOpsClass *ops;
ac_name = object_class_get_name(OBJECT_CLASS(ac));
g_assert(ac_name != NULL);
ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name);
ops = ACCEL_OPS_CLASS(module_object_class_by_name(ops_name));
oc = module_object_class_by_name(ops_name);
if (!oc) {
error_report("fatal: could not load module for type '%s'", ops_name);
exit(1);
}
g_free(ops_name);
ops = ACCEL_OPS_CLASS(oc);
/*
* all accelerators need to define ops, providing at least a mandatory
* non-NULL create_vcpu_thread operation.
*/
g_assert(ops != NULL);
if (ops->ops_init) {
ops->ops_init(ops);
}
cpus_register_accel(ops);
}
static const TypeInfo accel_ops_type_info = {
.name = TYPE_ACCEL_OPS,
.parent = TYPE_OBJECT,
.abstract = true,
.class_size = sizeof(AccelOpsClass),
};
static void accel_softmmu_register_types(void)
{
type_register_static(&accel_ops_type_info);
}
type_init(accel_softmmu_register_types);

15
accel/accel-softmmu.h Normal file
View File

@ -0,0 +1,15 @@
/*
* QEMU System Emulation accel internal functions
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef ACCEL_SOFTMMU_H
#define ACCEL_SOFTMMU_H
void accel_init_ops_interfaces(AccelClass *ac);
#endif /* ACCEL_SOFTMMU_H */

24
accel/accel-user.c Normal file
View File

@ -0,0 +1,24 @@
/*
* QEMU accel class, user-mode components
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu/accel.h"
AccelState *current_accel(void)
{
static AccelState *accel;
if (!accel) {
AccelClass *ac = accel_find("tcg");
g_assert(ac != NULL);
accel = ACCEL(object_new_with_class(OBJECT_CLASS(ac)));
}
return accel;
}

82
accel/dummy-cpus.c Normal file
View File

@ -0,0 +1,82 @@
/*
* Dummy cpu thread code
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qemu/rcu.h"
#include "sysemu/cpus.h"
#include "qemu/guest-random.h"
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
static void *dummy_cpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
rcu_register_thread();
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
current_cpu = cpu;
#ifndef _WIN32
sigset_t waitset;
int r;
sigemptyset(&waitset);
sigaddset(&waitset, SIG_IPI);
#endif
/* signal CPU creation */
cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
qemu_mutex_unlock_iothread();
#ifndef _WIN32
do {
int sig;
r = sigwait(&waitset, &sig);
} while (r == -1 && (errno == EAGAIN || errno == EINTR));
if (r == -1) {
perror("sigwait");
exit(1);
}
#else
qemu_sem_wait(&cpu->sem);
#endif
qemu_mutex_lock_iothread();
qemu_wait_io_event(cpu);
} while (!cpu->unplug);
qemu_mutex_unlock_iothread();
rcu_unregister_thread();
return NULL;
}
void dummy_start_vcpu_thread(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
cpu->thread = g_malloc0(sizeof(QemuThread));
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(cpu->halt_cond);
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu,
QEMU_THREAD_JOINABLE);
#ifdef _WIN32
qemu_sem_init(&cpu->sem, 0);
#endif
}

View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>com.apple.security.hypervisor</key>
<true/>
</dict>
</plist>

488
accel/hvf/hvf-accel-ops.c Normal file
View File

@ -0,0 +1,488 @@
/*
* Copyright 2008 IBM Corporation
* 2008 Red Hat, Inc.
* Copyright 2011 Intel Corporation
* Copyright 2016 Veertu, Inc.
* Copyright 2017 The Android Open Source Project
*
* QEMU Hypervisor.framework support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* This file contain code under public domain from the hvdos project:
* https://github.com/mist64/hvdos
*
* Parts Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "exec/address-spaces.h"
#include "exec/exec-all.h"
#include "sysemu/cpus.h"
#include "sysemu/hvf.h"
#include "sysemu/hvf_int.h"
#include "sysemu/runstate.h"
#include "qemu/guest-random.h"
HVFState *hvf_state;
#ifdef __aarch64__
#define HV_VM_DEFAULT NULL
#endif
/* Memory slots */
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
{
hvf_slot *slot;
int x;
for (x = 0; x < hvf_state->num_slots; ++x) {
slot = &hvf_state->slots[x];
if (slot->size && start < (slot->start + slot->size) &&
(start + size) > slot->start) {
return slot;
}
}
return NULL;
}
struct mac_slot {
int present;
uint64_t size;
uint64_t gpa_start;
uint64_t gva;
};
struct mac_slot mac_slots[32];
static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
{
struct mac_slot *macslot;
hv_return_t ret;
macslot = &mac_slots[slot->slot_id];
if (macslot->present) {
if (macslot->size != slot->size) {
macslot->present = 0;
ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
assert_hvf_ok(ret);
}
}
if (!slot->size) {
return 0;
}
macslot->present = 1;
macslot->gpa_start = slot->start;
macslot->size = slot->size;
ret = hv_vm_map(slot->mem, slot->start, slot->size, flags);
assert_hvf_ok(ret);
return 0;
}
static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
{
hvf_slot *mem;
MemoryRegion *area = section->mr;
bool writable = !area->readonly && !area->rom_device;
hv_memory_flags_t flags;
uint64_t page_size = qemu_real_host_page_size();
if (!memory_region_is_ram(area)) {
if (writable) {
return;
} else if (!memory_region_is_romd(area)) {
/*
* If the memory device is not in romd_mode, then we actually want
* to remove the hvf memory slot so all accesses will trap.
*/
add = false;
}
}
if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) ||
!QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) {
/* Not page aligned, so we can not map as RAM */
add = false;
}
mem = hvf_find_overlap_slot(
section->offset_within_address_space,
int128_get64(section->size));
if (mem && add) {
if (mem->size == int128_get64(section->size) &&
mem->start == section->offset_within_address_space &&
mem->mem == (memory_region_get_ram_ptr(area) +
section->offset_within_region)) {
return; /* Same region was attempted to register, go away. */
}
}
/* Region needs to be reset. set the size to 0 and remap it. */
if (mem) {
mem->size = 0;
if (do_hvf_set_memory(mem, 0)) {
error_report("Failed to reset overlapping slot");
abort();
}
}
if (!add) {
return;
}
if (area->readonly ||
(!memory_region_is_ram(area) && memory_region_is_romd(area))) {
flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
} else {
flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
}
/* Now make a new slot. */
int x;
for (x = 0; x < hvf_state->num_slots; ++x) {
mem = &hvf_state->slots[x];
if (!mem->size) {
break;
}
}
if (x == hvf_state->num_slots) {
error_report("No free slots");
abort();
}
mem->size = int128_get64(section->size);
mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
mem->start = section->offset_within_address_space;
mem->region = area;
if (do_hvf_set_memory(mem, flags)) {
error_report("Error registering new memory slot");
abort();
}
}
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
if (!cpu->vcpu_dirty) {
hvf_get_registers(cpu);
cpu->vcpu_dirty = true;
}
}
static void hvf_cpu_synchronize_state(CPUState *cpu)
{
if (!cpu->vcpu_dirty) {
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
run_on_cpu_data arg)
{
/* QEMU state is the reference, push it to HVF now and on next entry */
cpu->vcpu_dirty = true;
}
static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
{
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
}
static void hvf_cpu_synchronize_post_init(CPUState *cpu)
{
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
}
static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
{
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
}
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
{
hvf_slot *slot;
slot = hvf_find_overlap_slot(
section->offset_within_address_space,
int128_get64(section->size));
/* protect region against writes; begin tracking it */
if (on) {
slot->flags |= HVF_SLOT_LOG;
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
HV_MEMORY_READ | HV_MEMORY_EXEC);
/* stop tracking region*/
} else {
slot->flags &= ~HVF_SLOT_LOG;
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
}
}
static void hvf_log_start(MemoryListener *listener,
MemoryRegionSection *section, int old, int new)
{
if (old != 0) {
return;
}
hvf_set_dirty_tracking(section, 1);
}
static void hvf_log_stop(MemoryListener *listener,
MemoryRegionSection *section, int old, int new)
{
if (new != 0) {
return;
}
hvf_set_dirty_tracking(section, 0);
}
static void hvf_log_sync(MemoryListener *listener,
MemoryRegionSection *section)
{
/*
* sync of dirty pages is handled elsewhere; just make sure we keep
* tracking the region.
*/
hvf_set_dirty_tracking(section, 1);
}
static void hvf_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
hvf_set_phys_mem(section, true);
}
static void hvf_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
hvf_set_phys_mem(section, false);
}
static MemoryListener hvf_memory_listener = {
.name = "hvf",
.priority = 10,
.region_add = hvf_region_add,
.region_del = hvf_region_del,
.log_start = hvf_log_start,
.log_stop = hvf_log_stop,
.log_sync = hvf_log_sync,
};
static void dummy_signal(int sig)
{
}
bool hvf_allowed;
static int hvf_accel_init(MachineState *ms)
{
int x;
hv_return_t ret;
HVFState *s;
ret = hv_vm_create(HV_VM_DEFAULT);
assert_hvf_ok(ret);
s = g_new0(HVFState, 1);
s->num_slots = ARRAY_SIZE(s->slots);
for (x = 0; x < s->num_slots; ++x) {
s->slots[x].size = 0;
s->slots[x].slot_id = x;
}
hvf_state = s;
memory_listener_register(&hvf_memory_listener, &address_space_memory);
return hvf_arch_init();
}
static void hvf_accel_class_init(ObjectClass *oc, void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "HVF";
ac->init_machine = hvf_accel_init;
ac->allowed = &hvf_allowed;
}
static const TypeInfo hvf_accel_type = {
.name = TYPE_HVF_ACCEL,
.parent = TYPE_ACCEL,
.class_init = hvf_accel_class_init,
};
static void hvf_type_init(void)
{
type_register_static(&hvf_accel_type);
}
type_init(hvf_type_init);
static void hvf_vcpu_destroy(CPUState *cpu)
{
hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
assert_hvf_ok(ret);
hvf_arch_vcpu_destroy(cpu);
g_free(cpu->hvf);
cpu->hvf = NULL;
}
static int hvf_init_vcpu(CPUState *cpu)
{
int r;
cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
/* init cpu signals */
struct sigaction sigact;
memset(&sigact, 0, sizeof(sigact));
sigact.sa_handler = dummy_signal;
sigaction(SIG_IPI, &sigact, NULL);
pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
#ifdef __aarch64__
r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
#else
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
#endif
cpu->vcpu_dirty = 1;
assert_hvf_ok(r);
return hvf_arch_init_vcpu(cpu);
}
/*
* The HVF-specific vCPU thread function. This one should only run when the host
* CPU supports the VMX "unrestricted guest" feature.
*/
static void *hvf_cpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
int r;
assert(hvf_enabled());
rcu_register_thread();
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
current_cpu = cpu;
hvf_init_vcpu(cpu);
/* signal CPU creation */
cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
if (cpu_can_run(cpu)) {
r = hvf_vcpu_exec(cpu);
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
}
}
qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
hvf_vcpu_destroy(cpu);
cpu_thread_signal_destroyed(cpu);
qemu_mutex_unlock_iothread();
rcu_unregister_thread();
return NULL;
}
static void hvf_start_vcpu_thread(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
/*
* HVF currently does not support TCG, and only runs in
* unrestricted-guest mode.
*/
assert(hvf_enabled());
cpu->thread = g_malloc0(sizeof(QemuThread));
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(cpu->halt_cond);
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE);
}
static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = hvf_start_vcpu_thread;
ops->kick_vcpu_thread = hvf_kick_vcpu_thread;
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
ops->synchronize_state = hvf_cpu_synchronize_state;
ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
};
static const TypeInfo hvf_accel_ops_type = {
.name = ACCEL_OPS_NAME("hvf"),
.parent = TYPE_ACCEL_OPS,
.class_init = hvf_accel_ops_class_init,
.abstract = true,
};
static void hvf_accel_ops_register_types(void)
{
type_register_static(&hvf_accel_ops_type);
}
type_init(hvf_accel_ops_register_types);

46
accel/hvf/hvf-all.c Normal file
View File

@ -0,0 +1,46 @@
/*
* QEMU Hypervisor.framework support
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* Contributions after 2012-01-13 are licensed under the terms of the
* GNU GPL, version 2 or (at your option) any later version.
*/
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "sysemu/hvf.h"
#include "sysemu/hvf_int.h"
void assert_hvf_ok(hv_return_t ret)
{
if (ret == HV_SUCCESS) {
return;
}
switch (ret) {
case HV_ERROR:
error_report("Error: HV_ERROR");
break;
case HV_BUSY:
error_report("Error: HV_BUSY");
break;
case HV_BAD_ARGUMENT:
error_report("Error: HV_BAD_ARGUMENT");
break;
case HV_NO_RESOURCES:
error_report("Error: HV_NO_RESOURCES");
break;
case HV_NO_DEVICE:
error_report("Error: HV_NO_DEVICE");
break;
case HV_UNSUPPORTED:
error_report("Error: HV_UNSUPPORTED");
break;
default:
error_report("Unknown Error");
}
abort();
}

7
accel/hvf/meson.build Normal file
View File

@ -0,0 +1,7 @@
hvf_ss = ss.source_set()
hvf_ss.add(files(
'hvf-all.c',
'hvf-accel-ops.c',
))
specific_ss.add_all(when: 'CONFIG_HVF', if_true: hvf_ss)

121
accel/kvm/kvm-accel-ops.c Normal file
View File

@ -0,0 +1,121 @@
/*
* QEMU KVM support
*
* Copyright IBM, Corp. 2008
* Red Hat, Inc. 2008
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Glauber Costa <gcosta@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "sysemu/kvm.h"
#include "sysemu/kvm_int.h"
#include "sysemu/runstate.h"
#include "sysemu/cpus.h"
#include "qemu/guest-random.h"
#include "qapi/error.h"
#include <linux/kvm.h>
#include "kvm-cpus.h"
static void *kvm_vcpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
int r;
rcu_register_thread();
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
current_cpu = cpu;
r = kvm_init_vcpu(cpu, &error_fatal);
kvm_init_cpu_signals(cpu);
/* signal CPU creation */
cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
if (cpu_can_run(cpu)) {
r = kvm_cpu_exec(cpu);
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
}
}
qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
kvm_destroy_vcpu(cpu);
cpu_thread_signal_destroyed(cpu);
qemu_mutex_unlock_iothread();
rcu_unregister_thread();
return NULL;
}
static void kvm_start_vcpu_thread(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
cpu->thread = g_malloc0(sizeof(QemuThread));
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(cpu->halt_cond);
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, kvm_vcpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE);
}
static bool kvm_vcpu_thread_is_idle(CPUState *cpu)
{
return !kvm_halt_in_kernel();
}
static bool kvm_cpus_are_resettable(void)
{
return !kvm_enabled() || kvm_cpu_check_are_resettable();
}
static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = kvm_start_vcpu_thread;
ops->cpu_thread_is_idle = kvm_vcpu_thread_is_idle;
ops->cpus_are_resettable = kvm_cpus_are_resettable;
ops->synchronize_post_reset = kvm_cpu_synchronize_post_reset;
ops->synchronize_post_init = kvm_cpu_synchronize_post_init;
ops->synchronize_state = kvm_cpu_synchronize_state;
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
#ifdef KVM_CAP_SET_GUEST_DEBUG
ops->supports_guest_debug = kvm_supports_guest_debug;
ops->insert_breakpoint = kvm_insert_breakpoint;
ops->remove_breakpoint = kvm_remove_breakpoint;
ops->remove_all_breakpoints = kvm_remove_all_breakpoints;
#endif
}
static const TypeInfo kvm_accel_ops_type = {
.name = ACCEL_OPS_NAME("kvm"),
.parent = TYPE_ACCEL_OPS,
.class_init = kvm_accel_ops_class_init,
.abstract = true,
};
static void kvm_accel_ops_register_types(void)
{
type_register_static(&kvm_accel_ops_type);
}
type_init(kvm_accel_ops_register_types);

4082
accel/kvm/kvm-all.c Normal file

File diff suppressed because it is too large Load Diff

26
accel/kvm/kvm-cpus.h Normal file
View File

@ -0,0 +1,26 @@
/*
* Accelerator CPUS Interface
*
* Copyright 2020 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef KVM_CPUS_H
#define KVM_CPUS_H
#include "sysemu/cpus.h"
int kvm_init_vcpu(CPUState *cpu, Error **errp);
int kvm_cpu_exec(CPUState *cpu);
void kvm_destroy_vcpu(CPUState *cpu);
void kvm_cpu_synchronize_post_reset(CPUState *cpu);
void kvm_cpu_synchronize_post_init(CPUState *cpu);
void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu);
bool kvm_supports_guest_debug(void);
int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len);
int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len);
void kvm_remove_all_breakpoints(CPUState *cpu);
#endif /* KVM_CPUS_H */

7
accel/kvm/meson.build Normal file
View File

@ -0,0 +1,7 @@
kvm_ss = ss.source_set()
kvm_ss.add(files(
'kvm-all.c',
'kvm-accel-ops.c',
))
specific_ss.add_all(when: 'CONFIG_KVM', if_true: kvm_ss)

28
accel/kvm/trace-events Normal file
View File

@ -0,0 +1,28 @@
# See docs/devel/tracing.rst for syntax documentation.
# kvm-all.c
kvm_ioctl(int type, void *arg) "type 0x%x, arg %p"
kvm_vm_ioctl(int type, void *arg) "type 0x%x, arg %p"
kvm_vcpu_ioctl(int cpu_index, int type, void *arg) "cpu_index %d, type 0x%x, arg %p"
kvm_run_exit(int cpu_index, uint32_t reason) "cpu_index %d, reason %d"
kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p"
kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s"
kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s"
kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
kvm_irqchip_commit_routes(void) ""
kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d"
kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
kvm_irqchip_release_virq(int virq) "virq %d"
kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d"
kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"
kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
kvm_resample_fd_notify(int gsi) "gsi %d"
kvm_dirty_ring_full(int id) "vcpu %d"
kvm_dirty_ring_reap_vcpu(int id) "vcpu %d"
kvm_dirty_ring_page(int vcpu, uint32_t slot, uint64_t offset) "vcpu %d fetch %"PRIu32" offset 0x%"PRIx64
kvm_dirty_ring_reaper(const char *s) "%s"
kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
kvm_dirty_ring_reaper_kick(const char *reason) "%s"
kvm_dirty_ring_flush(int finished) "%d"

1
accel/kvm/trace.h Normal file
View File

@ -0,0 +1 @@
#include "trace/trace-accel_kvm.h"

20
accel/meson.build Normal file
View File

@ -0,0 +1,20 @@
specific_ss.add(files('accel-common.c'))
softmmu_ss.add(files('accel-softmmu.c'))
user_ss.add(files('accel-user.c'))
subdir('tcg')
if have_system
subdir('hvf')
subdir('qtest')
subdir('kvm')
subdir('xen')
subdir('stubs')
endif
dummy_ss = ss.source_set()
dummy_ss.add(files(
'dummy-cpus.c',
))
specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: dummy_ss)
specific_ss.add_all(when: ['CONFIG_XEN'], if_true: dummy_ss)

1
accel/qtest/meson.build Normal file
View File

@ -0,0 +1 @@
qtest_module_ss.add(when: ['CONFIG_SOFTMMU'], if_true: files('qtest.c'))

72
accel/qtest/qtest.c Normal file
View File

@ -0,0 +1,72 @@
/*
* QTest accelerator code
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qemu/rcu.h"
#include "qapi/error.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
#include "qemu/accel.h"
#include "sysemu/qtest.h"
#include "sysemu/cpus.h"
#include "qemu/guest-random.h"
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
static int qtest_init_accel(MachineState *ms)
{
return 0;
}
static void qtest_accel_class_init(ObjectClass *oc, void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "QTest";
ac->init_machine = qtest_init_accel;
ac->allowed = &qtest_allowed;
}
#define TYPE_QTEST_ACCEL ACCEL_CLASS_NAME("qtest")
static const TypeInfo qtest_accel_type = {
.name = TYPE_QTEST_ACCEL,
.parent = TYPE_ACCEL,
.class_init = qtest_accel_class_init,
};
module_obj(TYPE_QTEST_ACCEL);
static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = dummy_start_vcpu_thread;
ops->get_virtual_clock = qtest_get_virtual_clock;
};
static const TypeInfo qtest_accel_ops_type = {
.name = ACCEL_OPS_NAME("qtest"),
.parent = TYPE_ACCEL_OPS,
.class_init = qtest_accel_ops_class_init,
.abstract = true,
};
module_obj(ACCEL_OPS_NAME("qtest"));
static void qtest_type_init(void)
{
type_register_static(&qtest_accel_type);
type_register_static(&qtest_accel_ops_type);
}
type_init(qtest_type_init);

24
accel/stubs/hax-stub.c Normal file
View File

@ -0,0 +1,24 @@
/*
* QEMU HAXM support
*
* Copyright (c) 2015, Intel Corporation
*
* Copyright 2016 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "sysemu/hax.h"
bool hax_allowed;
int hax_sync_vcpus(void)
{
return 0;
}

134
accel/stubs/kvm-stub.c Normal file
View File

@ -0,0 +1,134 @@
/*
* QEMU KVM stub
*
* Copyright Red Hat, Inc. 2010
*
* Author: Paolo Bonzini <pbonzini@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "sysemu/kvm.h"
#include "hw/pci/msi.h"
KVMState *kvm_state;
bool kvm_kernel_irqchip;
bool kvm_async_interrupts_allowed;
bool kvm_eventfds_allowed;
bool kvm_irqfds_allowed;
bool kvm_resamplefds_allowed;
bool kvm_msi_via_irqfd_allowed;
bool kvm_gsi_routing_allowed;
bool kvm_gsi_direct_mapping;
bool kvm_allowed;
bool kvm_readonly_mem_allowed;
bool kvm_ioeventfd_any_length_allowed;
bool kvm_msi_use_devid;
void kvm_flush_coalesced_mmio_buffer(void)
{
}
void kvm_cpu_synchronize_state(CPUState *cpu)
{
}
bool kvm_has_sync_mmu(void)
{
return false;
}
int kvm_has_many_ioeventfds(void)
{
return 0;
}
int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
{
return 1;
}
int kvm_on_sigbus(int code, void *addr)
{
return 1;
}
int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
{
return -ENOSYS;
}
void kvm_init_irq_routing(KVMState *s)
{
}
void kvm_irqchip_release_virq(KVMState *s, int virq)
{
}
int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
PCIDevice *dev)
{
return -ENOSYS;
}
void kvm_irqchip_commit_routes(KVMState *s)
{
}
void kvm_irqchip_add_change_notifier(Notifier *n)
{
}
void kvm_irqchip_remove_change_notifier(Notifier *n)
{
}
void kvm_irqchip_change_notify(void)
{
}
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
{
return -ENOSYS;
}
int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
EventNotifier *rn, int virq)
{
return -ENOSYS;
}
int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
int virq)
{
return -ENOSYS;
}
bool kvm_has_free_slot(MachineState *ms)
{
return false;
}
void kvm_init_cpu_signals(CPUState *cpu)
{
abort();
}
bool kvm_arm_supports_user_irq(void)
{
return false;
}
bool kvm_dirty_ring_enabled(void)
{
return false;
}
uint32_t kvm_dirty_ring_size(void)
{
return 0;
}

7
accel/stubs/meson.build Normal file
View File

@ -0,0 +1,7 @@
sysemu_stubs_ss = ss.source_set()
sysemu_stubs_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c'))
sysemu_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
sysemu_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
sysemu_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: sysemu_stubs_ss)

50
accel/stubs/tcg-stub.c Normal file
View File

@ -0,0 +1,50 @@
/*
* QEMU TCG accelerator stub
*
* Copyright Red Hat, Inc. 2013
*
* Author: Paolo Bonzini <pbonzini@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "exec/exec-all.h"
void tb_flush(CPUState *cpu)
{
}
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
{
}
void tcg_flush_jmp_cache(CPUState *cpu)
{
}
int probe_access_flags(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{
g_assert_not_reached();
}
void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
/* Handled by hardware accelerator. */
g_assert_not_reached();
}
G_NORETURN void cpu_loop_exit(CPUState *cpu)
{
g_assert_not_reached();
}
G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
{
g_assert_not_reached();
}

16
accel/stubs/xen-stub.c Normal file
View File

@ -0,0 +1,16 @@
/*
* Copyright (C) 2014 Citrix Systems UK Ltd.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "sysemu/xen.h"
#include "qapi/qapi-commands-migration.h"
bool xen_allowed;
void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
{
}

View File

@ -0,0 +1,104 @@
/*
* Common Atomic Helper Functions
*
* This file should be included before the various instantiations of
* the atomic_template.h helpers.
*
* Copyright (c) 2019 Linaro
* Written by Alex Bennée <alex.bennee@linaro.org>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
}
#if HAVE_ATOMIC128
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
}
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
#endif
/*
* Atomic helpers callable from TCG.
* These have a common interface and all defer to cpu_atomic_*
* using the host return address from GETPC().
*/
#define CMPXCHG_HELPER(OP, TYPE) \
TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \
TYPE oldv, TYPE newv, uint32_t oi) \
{ return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
CMPXCHG_HELPER(cmpxchgb, uint32_t)
CMPXCHG_HELPER(cmpxchgw_be, uint32_t)
CMPXCHG_HELPER(cmpxchgw_le, uint32_t)
CMPXCHG_HELPER(cmpxchgl_be, uint32_t)
CMPXCHG_HELPER(cmpxchgl_le, uint32_t)
#ifdef CONFIG_ATOMIC64
CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif
#undef CMPXCHG_HELPER
#define ATOMIC_HELPER(OP, TYPE) \
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \
TYPE val, uint32_t oi) \
{ return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(OP) \
ATOMIC_HELPER(glue(OP,b), uint32_t) \
ATOMIC_HELPER(glue(OP,w_be), uint32_t) \
ATOMIC_HELPER(glue(OP,w_le), uint32_t) \
ATOMIC_HELPER(glue(OP,l_be), uint32_t) \
ATOMIC_HELPER(glue(OP,l_le), uint32_t) \
ATOMIC_HELPER(glue(OP,q_be), uint64_t) \
ATOMIC_HELPER(glue(OP,q_le), uint64_t)
#else
#define GEN_ATOMIC_HELPERS(OP) \
ATOMIC_HELPER(glue(OP,b), uint32_t) \
ATOMIC_HELPER(glue(OP,w_be), uint32_t) \
ATOMIC_HELPER(glue(OP,w_le), uint32_t) \
ATOMIC_HELPER(glue(OP,l_be), uint32_t) \
ATOMIC_HELPER(glue(OP,l_le), uint32_t)
#endif
GEN_ATOMIC_HELPERS(fetch_add)
GEN_ATOMIC_HELPERS(fetch_and)
GEN_ATOMIC_HELPERS(fetch_or)
GEN_ATOMIC_HELPERS(fetch_xor)
GEN_ATOMIC_HELPERS(fetch_smin)
GEN_ATOMIC_HELPERS(fetch_umin)
GEN_ATOMIC_HELPERS(fetch_smax)
GEN_ATOMIC_HELPERS(fetch_umax)
GEN_ATOMIC_HELPERS(add_fetch)
GEN_ATOMIC_HELPERS(and_fetch)
GEN_ATOMIC_HELPERS(or_fetch)
GEN_ATOMIC_HELPERS(xor_fetch)
GEN_ATOMIC_HELPERS(smin_fetch)
GEN_ATOMIC_HELPERS(umin_fetch)
GEN_ATOMIC_HELPERS(smax_fetch)
GEN_ATOMIC_HELPERS(umax_fetch)
GEN_ATOMIC_HELPERS(xchg)
#undef ATOMIC_HELPER
#undef GEN_ATOMIC_HELPERS

340
accel/tcg/atomic_template.h Normal file
View File

@ -0,0 +1,340 @@
/*
* Atomic helper templates
* Included from tcg-runtime.c and cputlb.c.
*
* Copyright (c) 2016 Red Hat, Inc
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/plugin.h"
#if DATA_SIZE == 16
# define SUFFIX o
# define DATA_TYPE Int128
# define BSWAP bswap128
# define SHIFT 4
#elif DATA_SIZE == 8
# define SUFFIX q
# define DATA_TYPE aligned_uint64_t
# define SDATA_TYPE aligned_int64_t
# define BSWAP bswap64
# define SHIFT 3
#elif DATA_SIZE == 4
# define SUFFIX l
# define DATA_TYPE uint32_t
# define SDATA_TYPE int32_t
# define BSWAP bswap32
# define SHIFT 2
#elif DATA_SIZE == 2
# define SUFFIX w
# define DATA_TYPE uint16_t
# define SDATA_TYPE int16_t
# define BSWAP bswap16
# define SHIFT 1
#elif DATA_SIZE == 1
# define SUFFIX b
# define DATA_TYPE uint8_t
# define SDATA_TYPE int8_t
# define BSWAP
# define SHIFT 0
#else
# error unsupported data size
#endif
#if DATA_SIZE >= 4
# define ABI_TYPE DATA_TYPE
#else
# define ABI_TYPE uint32_t
#endif
/* Define host-endian atomic operations. Note that END is used within
the ATOMIC_NAME macro, and redefined below. */
#if DATA_SIZE == 1
# define END
#elif HOST_BIG_ENDIAN
# define END _be
#else
# define END _le
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, cmpv, newv);
#else
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
#endif
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, oi);
return ret;
}
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, oi);
return val;
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, oi);
return ret;
}
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
return ret; \
}
GEN_ATOMIC_HELPER(fetch_add)
GEN_ATOMIC_HELPER(fetch_and)
GEN_ATOMIC_HELPER(fetch_or)
GEN_ATOMIC_HELPER(fetch_xor)
GEN_ATOMIC_HELPER(add_fetch)
GEN_ATOMIC_HELPER(and_fetch)
GEN_ATOMIC_HELPER(or_fetch)
GEN_ATOMIC_HELPER(xor_fetch)
#undef GEN_ATOMIC_HELPER
/*
* These helpers are, as a whole, full barriers. Within the helper,
* the leading barrier is explicit and the trailing barrier is within
* cmpxchg primitive.
*
* Trace this load + RMW loop as a single RMW op. This way, regardless
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE cmp, old, new, val = xval; \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
do { \
old = cmp; new = FN(old, val); \
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
} while (cmp != old); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
return RET; \
}
GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA SIZE >= 16 */
#undef END
#if DATA_SIZE > 1
/* Define reverse-host-endian atomic operations. Note that END is used
within the ATOMIC_NAME macro. */
#if HOST_BIG_ENDIAN
# define END _le
#else
# define END _be
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
#else
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
#endif
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, oi);
return BSWAP(ret);
}
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;
val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, oi);
return BSWAP(val);
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
val = BSWAP(val);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
ABI_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, oi);
return BSWAP(ret);
}
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
return BSWAP(ret); \
}
GEN_ATOMIC_HELPER(fetch_and)
GEN_ATOMIC_HELPER(fetch_or)
GEN_ATOMIC_HELPER(fetch_xor)
GEN_ATOMIC_HELPER(and_fetch)
GEN_ATOMIC_HELPER(or_fetch)
GEN_ATOMIC_HELPER(xor_fetch)
#undef GEN_ATOMIC_HELPER
/* These helpers are, as a whole, full barriers. Within the helper,
* the leading barrier is explicit and the trailing barrier is within
* cmpxchg primitive.
*
* Trace this load + RMW loop as a single RMW op. This way, regardless
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
do { \
ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
} while (ldo != ldn); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
return RET; \
}
GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
/* Note that for addition, we need to use a separate cmpxchg loop instead
of bswaps for the reverse-host-endian helpers. */
#define ADD(X, Y) (X + Y)
GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#undef ADD
#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA_SIZE >= 16 */
#undef END
#endif /* DATA_SIZE > 1 */
#undef BSWAP
#undef ABI_TYPE
#undef DATA_TYPE
#undef SDATA_TYPE
#undef SUFFIX
#undef DATA_SIZE
#undef SHIFT

View File

@ -0,0 +1,83 @@
/*
* emulator main execution loop
*
* Copyright (c) 2003-2005 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "sysemu/cpus.h"
#include "sysemu/tcg.h"
#include "exec/exec-all.h"
bool tcg_allowed;
/* exit the current TB, but without causing any exception to be raised */
void cpu_loop_exit_noexc(CPUState *cpu)
{
cpu->exception_index = -1;
cpu_loop_exit(cpu);
}
#if defined(CONFIG_SOFTMMU)
void cpu_reloading_memory_map(void)
{
if (qemu_in_vcpu_thread() && current_cpu->running) {
/* The guest can in theory prolong the RCU critical section as long
* as it feels like. The major problem with this is that because it
* can do multiple reconfigurations of the memory map within the
* critical section, we could potentially accumulate an unbounded
* collection of memory data structures awaiting reclamation.
*
* Because the only thing we're currently protecting with RCU is the
* memory data structures, it's sufficient to break the critical section
* in this callback, which we know will get called every time the
* memory map is rearranged.
*
* (If we add anything else in the system that uses RCU to protect
* its data structures, we will need to implement some other mechanism
* to force TCG CPUs to exit the critical section, at which point this
* part of this callback might become unnecessary.)
*
* This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
* only protects cpu->as->dispatch. Since we know our caller is about
* to reload it, it's safe to split the critical section.
*/
rcu_read_unlock();
rcu_read_lock();
}
}
#endif
void cpu_loop_exit(CPUState *cpu)
{
/* Undo the setting in cpu_tb_exec. */
cpu->can_do_io = 1;
siglongjmp(cpu->jmp_env, 1);
}
void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
{
if (pc) {
cpu_restore_state(cpu, pc);
}
cpu_loop_exit(cpu);
}
void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
{
cpu->exception_index = EXCP_ATOMIC;
cpu_loop_exit_restore(cpu, pc);
}

1210
accel/tcg/cpu-exec.c Normal file

File diff suppressed because it is too large Load Diff

2637
accel/tcg/cputlb.c Normal file

File diff suppressed because it is too large Load Diff

14
accel/tcg/hmp.c Normal file
View File

@ -0,0 +1,14 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "exec/exec-all.h"
#include "monitor/monitor.h"
static void hmp_tcg_register(void)
{
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
}
type_init(hmp_tcg_register);

122
accel/tcg/internal.h Normal file
View File

@ -0,0 +1,122 @@
/*
* Internal execution defines for qemu
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#ifndef ACCEL_TCG_INTERNAL_H
#define ACCEL_TCG_INTERNAL_H
#include "exec/exec-all.h"
/*
* Access to the various translations structures need to be serialised
* via locks for consistency. In user-mode emulation access to the
* memory related structures are protected with mmap_lock.
* In !user-mode we use per-page locks.
*/
#ifdef CONFIG_SOFTMMU
#define assert_memory_lock()
#else
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
#endif
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
uintptr_t first_tb;
#ifdef CONFIG_USER_ONLY
unsigned long flags;
void *target_data;
#endif
#ifdef CONFIG_SOFTMMU
QemuSpin lock;
#endif
} PageDesc;
/* Size of the L2 (and L3, etc) page tables. */
#define V_L2_BITS 10
#define V_L2_SIZE (1 << V_L2_BITS)
/*
* L1 Mapping properties
*/
extern int v_l1_size;
extern int v_l1_shift;
extern int v_l2_levels;
/*
* The bottom level has pointers to PageDesc, and is indexed by
* anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
*/
#define V_L1_MIN_BITS 4
#define V_L1_MAX_BITS (V_L2_BITS + 3)
#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
extern void *l1_map[V_L1_MAX_SIZE];
PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc);
static inline PageDesc *page_find(tb_page_addr_t index)
{
return page_find_alloc(index, false);
}
/* list iterators for lists of tagged pointers in TranslationBlock */
#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
tb = (TranslationBlock *)((uintptr_t)tb & ~1))
#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
#define TB_FOR_EACH_JMP(head_tb, tb, n) \
TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
/* In user-mode page locks aren't used; mmap_lock is enough */
#ifdef CONFIG_USER_ONLY
#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
static inline void page_lock(PageDesc *pd) { }
static inline void page_unlock(PageDesc *pd) { }
#else
#ifdef CONFIG_DEBUG_TCG
void do_assert_page_locked(const PageDesc *pd, const char *file, int line);
#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
#else
#define assert_page_locked(pd)
#endif
void page_lock(PageDesc *pd);
void page_unlock(PageDesc *pd);
#endif
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
void assert_no_pages_locked(void);
#else
static inline void assert_no_pages_locked(void) { }
#endif
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
int cflags);
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2);
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc);
/* Return the current PC from CPU, which may be cached in TB. */
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
{
#if TARGET_TB_PCREL
return cpu->cc->get_pc(cpu);
#else
return tb_pc(tb);
#endif
}
#endif /* ACCEL_TCG_INTERNAL_H */

Some files were not shown because too many files have changed in this diff Show More