Compare commits

..

No commits in common. "fret" and "0.7.1" have entirely different histories.
fret ... 0.7.1

600 changed files with 35646 additions and 86798 deletions

View File

@ -1,148 +0,0 @@
---
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: true
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
AllowShortIfStatementsOnASingleLine: true
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeCategories:
- Regex: '^<ext/.*\.h>'
Priority: 2
- Regex: '^<.*\.h>'
Priority: 1
- Regex: '^<.*'
Priority: 2
- Regex: '.*'
Priority: 3
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: BeforeHash
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Right
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
BasedOnStyle: google
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
ReflowComments: true
SortIncludes: false
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Auto
TabWidth: 8
UseTab: Never
...

View File

@ -1,4 +1,4 @@
name: build and test name: Build and Test
on: on:
push: push:
@ -20,23 +20,12 @@ jobs:
with: with:
profile: minimal profile: minimal
toolchain: nightly toolchain: nightly
override: true - uses: Swatinem/rust-cache@v1
- name: install mdbook - name: install mdbook
uses: baptiste0928/cargo-install@v1.3.0 run: cargo install mdbook
with:
crate: mdbook
- name: install linkcheck - name: install linkcheck
uses: baptiste0928/cargo-install@v1.3.0 run: cargo install mdbook-linkcheck
with: - uses: actions/checkout@v2
crate: mdbook-linkcheck
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Install mimetype
if: runner.os == 'Linux'
run: sudo apt-get install libfile-mimeinfo-perl
- name: Check for binary blobs
if: runner.os == 'Linux'
run: ./scripts/check_for_blobs.sh
- name: Build libafl debug - name: Build libafl debug
run: cargo build -p libafl run: cargo build -p libafl
- name: Build the book - name: Build the book
@ -49,66 +38,49 @@ jobs:
run: cargo test run: cargo test
- name: Test libafl no_std - name: Test libafl no_std
run: cd libafl && cargo test --no-default-features run: cd libafl && cargo test --no-default-features
- name: Test libafl_targets no_std
run: cd libafl_targets && cargo test --no-default-features
ubuntu: ubuntu:
runs-on: ubuntu-22.04 runs-on: ubuntu-latest
steps: steps:
- name: Remove Dotnet & Haskell
run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- name: set mold linker as default linker - uses: Swatinem/rust-cache@v1
uses: rui314/setup-mold@v1 - name: Install deps
- name: Install and cache deps run: sudo apt-get install -y llvm llvm-dev clang ninja-build
uses: awalsh128/cache-apt-pkgs-action@v1.1.0
with:
packages: llvm llvm-dev clang ninja-build clang-format-13 shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev
- name: get clang version - name: get clang version
run: command -v llvm-config && clang -v run: command -v llvm-config && clang -v
- name: Install cargo-hack - name: Install cargo-hack
run: curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin run: curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin
- name: Add nightly rustfmt and clippy - name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: Swatinem/rust-cache@v2 - name: Run a normal build
run: cargo build --verbose
# ---- format check ---- # cargo-hack tests/checks each crate in the workspace
#- name: Run tests
# run: cargo hack test --all-features
# cargo-hack's --feature-powerset would be nice here but libafl has a too many knobs
- name: Check each feature
# Skipping python as it has to be built with the `maturin` tool
run: cargo hack check --feature-powerset --depth=2 --exclude-features=agpl,nautilus,python,sancov_pcguard_edges,arm,aarch64,i386 --no-dev-deps
# pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately # pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately
- name: Check pcguard edges - name: Check pcguard edges
run: cargo check --features=sancov_pcguard_edges run: cargo check --features=sancov_pcguard_edges
- name: Build examples
run: cargo build --examples --verbose
- uses: actions/checkout@v2
- name: Format - name: Format
run: cargo fmt -- --check run: cargo fmt -- --check
- name: Run clang-format style check for C/C++ programs. - uses: actions/checkout@v2
run: clang-format-13 -n -Werror --style=file $(find . -type f \( -name '*.cpp' -o -iname '*.hpp' -o -name '*.cc' -o -name '*.cxx' -o -name '*.cc' -o -name '*.h' \) | grep -v '/target/' | grep -v 'libpng-1\.6\.37' | grep -v 'stb_image\.h' | grep -v 'dlmalloc\.c' | grep -v 'QEMU-Nyx')
- name: run shellcheck
run: shellcheck ./scripts/*.sh
- name: Run clippy
run: ./scripts/clippy.sh
# ---- doc check ----
- name: Build Docs - name: Build Docs
run: cargo doc run: cargo doc
- name: Test Docs - name: Test Docs
run: cargo +nightly test --doc --all-features run: cargo +nightly test --doc --all-features
- name: Run clippy
# ---- build and feature check ---- run: ./scripts/clippy.sh
- name: Run a normal build
run: cargo build --verbose
# cargo-hack's --feature-powerset would be nice here but libafl has a too many knobs
- name: Check each feature
# Skipping `python` as it has to be built with the `maturin` tool
# `agpl`, `nautilus` require nightly
# `sancov_pcguard_edges` is tested seperately
run: cargo hack check --each-feature --clean-per-run --exclude-features=prelude,agpl,nautilus,python,sancov_pcguard_edges,arm,aarch64,i386,be,systemmode --no-dev-deps
- name: Check nightly features
run: cargo +nightly check --features=agpl && cargo +nightly check --features=nautilus
- name: Build examples
run: cargo build --examples --verbose
ubuntu-concolic: ubuntu-concolic:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -116,77 +88,27 @@ jobs:
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- uses: actions/checkout@v3 - uses: Swatinem/rust-cache@v1
- uses: Swatinem/rust-cache@v2 - uses: actions/checkout@v2
- name: Install smoke test deps - name: Install smoke test deps
run: sudo ./libafl_concolic/test/smoke_test_ubuntu_deps.sh run: sudo ./libafl_concolic/test/smoke_test_ubuntu_deps.sh
- name: Run smoke test - name: Run smoke test
run: ./libafl_concolic/test/smoke_test.sh run: ./libafl_concolic/test/smoke_test.sh
ubuntu-fuzzers:
bindings:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- name: set mold linker as default linker - uses: Swatinem/rust-cache@v1
uses: rui314/setup-mold@v1
- name: Install deps
run: sudo apt-get install -y llvm llvm-dev clang ninja-build python3-dev python3-pip python3-venv
- name: Install maturin
run: python3 -m pip install maturin
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Run a maturin build
run: cd ./bindings/pylibafl && maturin build
fuzzers:
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- name: set mold linker as default linker
if: runner.os == 'Linux' # mold only support linux until now
uses: rui314/setup-mold@v1
- name: Add nightly rustfmt and clippy - name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- name: Add no_std toolchain - name: Install deps
run: rustup toolchain install nightly-x86_64-unknown-linux-gnu ; rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu run: sudo apt-get install -y llvm llvm-dev clang nasm ninja-build
- name: Install python - uses: actions/checkout@v2
if: runner.os == 'macOS' - name: Build and run example fuzzers
run: brew install --force-bottle --overwrite python@3.11
- uses: lyricwulf/abc@v1
with:
# todo: remove afl++-clang when nyx support samcov_pcguard
linux: llvm llvm-dev clang nasm ninja-build gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libgtk-3-dev afl++-clang pax-utils
# update bash for macos to support `declare -A` command`
macos: llvm libpng nasm coreutils z3 bash
- name: pip install
run: python3 -m pip install msgpack jinja2
# Note that nproc needs to have coreutils installed on macOS, so the order of CI commands matters.
- name: enable mult-thread for `make`
run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)"
- name: install cargo-make
uses: baptiste0928/cargo-install@v1.3.0
with:
crate: cargo-make
- uses: actions/checkout@v3
with:
submodules: true # recursively checkout submodules
- uses: Swatinem/rust-cache@v2
- name: Build and run example fuzzers (Linux)
if: runner.os == 'Linux'
run: ./scripts/test_all_fuzzers.sh run: ./scripts/test_all_fuzzers.sh
- name: Build and run example fuzzers (macOS)
if: runner.os == 'macOS' # use bash v4
run: /usr/local/bin/bash ./scripts/test_all_fuzzers.sh
nostd-build: nostd-build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -194,28 +116,22 @@ jobs:
with: with:
profile: minimal profile: minimal
toolchain: nightly toolchain: nightly
override: true - uses: Swatinem/rust-cache@v1
components: rustfmt, clippy, rust-src - name: Add nightly rustfmt and clippy
- uses: actions/checkout@v3 run: rustup toolchain install nightly && rustup target add --toolchain nightly aarch64-unknown-none && rustup component add --toolchain nightly rust-src
- uses: Swatinem/rust-cache@v2 - uses: actions/checkout@v2
- name: Add targets
run: rustup target add arm-linux-androideabi && rustup target add thumbv6m-none-eabi
- name: Build aarch64-unknown-none - name: Build aarch64-unknown-none
run: cd ./fuzzers/baby_no_std && cargo +nightly build -Zbuild-std=core,alloc --target aarch64-unknown-none -v --release && cd ../.. run: cd ./fuzzers/baby_no_std && cargo +nightly build -Zbuild-std=core,alloc --target aarch64-unknown-none -v --release && cd ../..
- name: run x86_64 until panic! - name: run x86_64 until panic!
run: cd ./fuzzers/baby_no_std && cargo +nightly run || test $? -ne 0 || exit 1 run: cd ./fuzzers/baby_no_std && cargo +nightly run || test $? -ne 0 || exit 1
- name: no_std tests - name: no_std tests
run: cd ./libafl && cargo test --no-default-features run: cd ./libafl && cargo test --no-default-features
- name: libafl armv6m-none-eabi (32 bit no_std) clippy
run: cd ./libafl && cargo clippy --target thumbv6m-none-eabi --no-default-features
build-docker: build-docker:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- name: Build docker - name: Build docker
run: docker build -t libafl . run: docker build -t libafl .
windows: windows:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
@ -223,26 +139,16 @@ jobs:
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- uses: actions/checkout@v3 - uses: Swatinem/rust-cache@v1
- uses: Swatinem/rust-cache@v2 - uses: actions/checkout@v2
- name: Windows Build - name: Windows Build
run: cargo build --verbose run: cargo build --verbose
- name: Run clippy - name: Run clippy
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: clippy command: clippy
- name: Build docs #- name: Build frida
run: cargo doc # run: cd fuzzers/frida_libpng/ && cargo build --release
- name: Set LIBCLANG_PATH
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
- name: install cargo-make
run: cargo install --force cargo-make
- uses: ilammy/msvc-dev-cmd@v1
- name: Build fuzzers/frida_libpng
run: cd fuzzers/frida_libpng/ && cargo make test
- name: Build fuzzers/frida_gdiplus
run: cd fuzzers/frida_gdiplus/ && cargo make test
macos: macos:
runs-on: macOS-latest runs-on: macOS-latest
steps: steps:
@ -250,12 +156,12 @@ jobs:
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- uses: Swatinem/rust-cache@v1
- name: Add nightly rustfmt and clippy - name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- name: Install deps - name: Install deps
run: brew install z3 gtk+3 run: brew install z3
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: Swatinem/rust-cache@v2
- name: MacOS Build - name: MacOS Build
run: cargo build --verbose run: cargo build --verbose
- name: Run clippy - name: Run clippy
@ -264,7 +170,23 @@ jobs:
run: ./scripts/shmem_limits_macos.sh run: ./scripts/shmem_limits_macos.sh
- name: Run Tests - name: Run Tests
run: cargo test run: cargo test
macos-fuzzers:
runs-on: macOS-latest
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: Swatinem/rust-cache@v1
- name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- name: Install deps
run: brew install llvm libpng nasm coreutils z3 && brew link --force llvm
- uses: actions/checkout@v2
- name: Increase map sizes
run: ./scripts/shmem_limits_macos.sh
- name: Build and run example fuzzers
run: ./scripts/test_all_fuzzers.sh
other_targets: other_targets:
runs-on: macOS-latest runs-on: macOS-latest
steps: steps:
@ -272,6 +194,7 @@ jobs:
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- uses: Swatinem/rust-cache@v1
- uses: nttld/setup-ndk@v1 - uses: nttld/setup-ndk@v1
with: with:
ndk-version: r21e ndk-version: r21e
@ -281,8 +204,7 @@ jobs:
run: rustup target add aarch64-linux-android run: rustup target add aarch64-linux-android
- name: install cargo ndk - name: install cargo ndk
run: cargo install cargo-ndk run: cargo install cargo-ndk
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: Swatinem/rust-cache@v2
- name: Build iOS - name: Build iOS
run: cargo build --target aarch64-apple-ios run: cargo build --target aarch64-apple-ios
- name: Build Android - name: Build Android
@ -296,36 +218,3 @@ jobs:
# run: clang -v # run: clang -v
#- name: Windows Test #- name: Windows Test
# run: C:\Rust\.cargo\bin\cargo.exe test --verbose # run: C:\Rust\.cargo\bin\cargo.exe test --verbose
freebsd:
runs-on: macos-12
name: Simple build in FreeBSD
steps:
- uses: actions/checkout@v3
- name: Test in FreeBSD
id: test
uses: vmactions/freebsd-vm@v0
with:
usesh: true
sync: rsync
copyback: false
mem: 2048
release: 13.1
prepare: |
pkg install -y curl bash sudo llvm14
curl https://sh.rustup.rs -sSf | sh -s -- -y
run: |
freebsd-version
. "$HOME/.cargo/env"
rustup toolchain install nightly
export LLVM_CONFIG=/usr/local/bin/llvm-config14
pwd
ls -lah
echo "local/bin"
ls -lah /usr/local/bin/
which llvm-config
chmod +x ./scripts/clippy.sh
bash ./scripts/shmem_limits_fbsd.sh
bash ./scripts/clippy.sh
cargo test

70
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@ -0,0 +1,70 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ main ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
schedule:
- cron: '41 18 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://git.io/codeql-language-support
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

15
.gitignore vendored
View File

@ -1,5 +1,4 @@
target target
target-bin
out out
Cargo.lock Cargo.lock
vendor vendor
@ -7,8 +6,6 @@ vendor
.DS_Store .DS_Store
.env .env
*.tmp
*.swp
*.o *.o
*.a *.a
*.so *.so
@ -18,7 +15,6 @@ vendor
*.dll *.dll
*.exe *.exe
*.dSYM *.dSYM
*.obj
.cur_input .cur_input
.venv .venv
@ -35,9 +31,6 @@ test.dict
# Ignore all built fuzzers # Ignore all built fuzzers
fuzzer_* fuzzer_*
AFLplusplus AFLplusplus
test_*
*_fuzzer
*_harness
# Ignore common dummy and logfiles # Ignore common dummy and logfiles
*.log *.log
@ -46,11 +39,3 @@ a
forkserver_test forkserver_test
__pycache__ __pycache__
*.lafl_lock *.lafl_lock
*atomic_file_testfile*
**/libxml2
**/corpus_discovered
**/libxml2-*.tar.gz
libafl_nyx/QEMU-Nyx
libafl_nyx/packer

View File

@ -1,3 +1,9 @@
[profile.release]
lto = true
codegen-units = 1
opt-level = 3
debug = true
[workspace] [workspace]
members = [ members = [
"libafl", "libafl",
@ -6,16 +12,13 @@ members = [
"libafl_targets", "libafl_targets",
"libafl_frida", "libafl_frida",
"libafl_qemu", "libafl_qemu",
"libafl_tinyinst",
"libafl_sugar", "libafl_sugar",
"libafl_nyx",
"libafl_concolic/symcc_runtime", "libafl_concolic/symcc_runtime",
"libafl_concolic/symcc_libafl", "libafl_concolic/symcc_libafl",
"libafl_concolic/test/dump_constraints", "libafl_concolic/test/dump_constraints",
"libafl_concolic/test/runtime_test", "libafl_concolic/test/runtime_test",
"utils/deexit", "utils/deexit",
"utils/gramatron/construct_automata", "utils/gramatron/construct_automata",
"utils/libafl_benches",
] ]
default-members = [ default-members = [
"libafl", "libafl",
@ -27,16 +30,4 @@ exclude = [
"fuzzers", "fuzzers",
"bindings", "bindings",
"scripts", "scripts",
"libafl_qemu/libafl_qemu_build",
"libafl_qemu/libafl_qemu_sys"
] ]
[workspace.package]
version = "0.8.2"
[profile.release]
lto = true
codegen-units = 1
opt-level = 3
debug = true

View File

@ -29,6 +29,7 @@ COPY libafl_derive/Cargo.toml libafl_derive/Cargo.toml
COPY scripts/dummy.rs libafl_derive/src/lib.rs COPY scripts/dummy.rs libafl_derive/src/lib.rs
COPY libafl/Cargo.toml libafl/build.rs libafl/ COPY libafl/Cargo.toml libafl/build.rs libafl/
COPY libafl/benches libafl/benches
COPY libafl/examples libafl/examples COPY libafl/examples libafl/examples
COPY scripts/dummy.rs libafl/src/lib.rs COPY scripts/dummy.rs libafl/src/lib.rs
@ -39,19 +40,13 @@ COPY libafl_frida/src/gettls.c libafl_frida/src/gettls.c
COPY libafl_qemu/Cargo.toml libafl_qemu/build.rs libafl_qemu/ COPY libafl_qemu/Cargo.toml libafl_qemu/build.rs libafl_qemu/
COPY scripts/dummy.rs libafl_qemu/src/lib.rs COPY scripts/dummy.rs libafl_qemu/src/lib.rs
COPY libafl_qemu/libafl_qemu_build/Cargo.toml libafl_qemu/libafl_qemu_build/
COPY scripts/dummy.rs libafl_qemu/libafl_qemu_build/src/lib.rs
COPY libafl_qemu/libafl_qemu_sys/Cargo.toml libafl_qemu/libafl_qemu_sys/build.rs libafl_qemu/libafl_qemu_sys/
COPY scripts/dummy.rs libafl_qemu/libafl_qemu_sys/src/lib.rs
COPY libafl_sugar/Cargo.toml libafl_sugar/ COPY libafl_sugar/Cargo.toml libafl_sugar/
COPY scripts/dummy.rs libafl_sugar/src/lib.rs COPY scripts/dummy.rs libafl_sugar/src/lib.rs
COPY libafl_cc/Cargo.toml libafl_cc/Cargo.toml COPY libafl_cc/Cargo.toml libafl_cc/Cargo.toml
COPY libafl_cc/build.rs libafl_cc/build.rs
COPY libafl_cc/src libafl_cc/src
COPY scripts/dummy.rs libafl_cc/src/lib.rs COPY scripts/dummy.rs libafl_cc/src/lib.rs
COPY libafl_cc/build.rs libafl_cc/build.rs
COPY libafl_cc/src/cmplog-routines-pass.cc libafl_cc/src/cmplog-routines-pass.cc
COPY libafl_targets/Cargo.toml libafl_targets/build.rs libafl_targets/ COPY libafl_targets/Cargo.toml libafl_targets/build.rs libafl_targets/
COPY libafl_targets/src libafl_targets/src COPY libafl_targets/src libafl_targets/src
@ -69,12 +64,6 @@ COPY scripts/dummy.rs libafl_concolic/symcc_runtime/src/lib.rs
COPY libafl_concolic/symcc_libafl/Cargo.toml libafl_concolic/symcc_libafl/ COPY libafl_concolic/symcc_libafl/Cargo.toml libafl_concolic/symcc_libafl/
COPY scripts/dummy.rs libafl_concolic/symcc_libafl/src/lib.rs COPY scripts/dummy.rs libafl_concolic/symcc_libafl/src/lib.rs
COPY libafl_nyx/Cargo.toml libafl_nyx/build.rs libafl_nyx/
COPY scripts/dummy.rs libafl_nyx/src/lib.rs
COPY libafl_tinyinst/Cargo.toml libafl_tinyinst/
COPY scripts/dummy.rs libafl_tinyinst/src/lib.rs
COPY utils utils COPY utils utils
RUN cargo build && cargo build --release RUN cargo build && cargo build --release
@ -100,18 +89,12 @@ RUN touch libafl/src/lib.rs
COPY libafl_targets/src libafl_targets/src COPY libafl_targets/src libafl_targets/src
RUN touch libafl_targets/src/lib.rs RUN touch libafl_targets/src/lib.rs
COPY libafl_frida/src libafl_frida/src COPY libafl_frida/src libafl_frida/src
RUN touch libafl_qemu/libafl_qemu_build/src/lib.rs
COPY libafl_qemu/libafl_qemu_build/src libafl_qemu/libafl_qemu_build/src
RUN touch libafl_qemu/libafl_qemu_sys/src/lib.rs
COPY libafl_qemu/libafl_qemu_sys/src libafl_qemu/libafl_qemu_sys/src
RUN touch libafl_qemu/src/lib.rs RUN touch libafl_qemu/src/lib.rs
COPY libafl_qemu/src libafl_qemu/src COPY libafl_qemu/src libafl_qemu/src
RUN touch libafl_frida/src/lib.rs RUN touch libafl_frida/src/lib.rs
COPY libafl_concolic/symcc_libafl libafl_concolic/symcc_libafl COPY libafl_concolic/symcc_libafl libafl_concolic/symcc_libafl
COPY libafl_concolic/symcc_runtime libafl_concolic/symcc_runtime COPY libafl_concolic/symcc_runtime libafl_concolic/symcc_runtime
COPY libafl_concolic/test libafl_concolic/test COPY libafl_concolic/test libafl_concolic/test
COPY libafl_nyx/src libafl_nyx/src
RUN touch libafl_nyx/src/lib.rs
RUN cargo build && cargo build --release RUN cargo build && cargo build --release
# Copy fuzzers over # Copy fuzzers over

View File

@ -34,32 +34,20 @@ LibAFL offers integrations with popular instrumentation frameworks. At the momen
+ SanitizerCoverage, in [libafl_targets](./libafl_targets) + SanitizerCoverage, in [libafl_targets](./libafl_targets)
+ Frida, in [libafl_frida](./libafl_frida) + Frida, in [libafl_frida](./libafl_frida)
+ QEMU user-mode, in [libafl_qemu](./libafl_qemu) + QEMU user-mode, in [libafl_qemu](./libafl_qemu)
+ TinyInst, in [libafl_tinyinst](./libafl_tinyinst) by [elbiazo](https://github.com/elbiazo)
## Getting started ## Getting started
1. Install the Dependecies 1. Install the Rust development language. We highly recommend *not* to use e.g.
- The Rust development language. your Linux distribution package as this is likely outdated. So rather install
We highly recommend *not* to use e.g. your Linux distribition package as this is likely outdated. So rather install
Rust directly, instructions can be found [here](https://www.rust-lang.org/tools/install). Rust directly, instructions can be found [here](https://www.rust-lang.org/tools/install).
- LLVM tools
The LLVM tools are needed (newer than LLVM 11.0.0 but older than LLVM 15.0.0)
- Cargo-make
We use cargo-make to build the fuzzers in `fuzzers/` directory. You can install it with
```
cargo install cargo-make
```
2. Clone the LibAFL repository with 2. Clone the LibAFL repository with
``` ```
git clone https://github.com/AFLplusplus/LibAFL git clone https://github.com/AFLplusplus/LibAFL
``` ```
3. Build the library using Build the library using
``` ```
cargo build --release cargo build --release
@ -80,12 +68,6 @@ cd docs && mdbook serve
We collect all example fuzzers in [`./fuzzers`](./fuzzers/). We collect all example fuzzers in [`./fuzzers`](./fuzzers/).
Be sure to read their documentation (and source), this is *the natural way to get started!* Be sure to read their documentation (and source), this is *the natural way to get started!*
You can run each example fuzzer with
```
cargo make run
```
as long as the fuzzer directory has `Makefile.toml` file.
The best-tested fuzzer is [`./fuzzers/libfuzzer_libpng`](./fuzzers/libfuzzer_libpng), a multicore libfuzzer-like fuzzer using LibAFL for a libpng harness. The best-tested fuzzer is [`./fuzzers/libfuzzer_libpng`](./fuzzers/libfuzzer_libpng), a multicore libfuzzer-like fuzzer using LibAFL for a libpng harness.
## Resources ## Resources
@ -96,18 +78,16 @@ The best-tested fuzzer is [`./fuzzers/libfuzzer_libpng`](./fuzzers/libfuzzer_lib
+ The LibAFL book (WIP) [online](https://aflplus.plus/libafl-book) or in the [repo](./docs/src/) + The LibAFL book (WIP) [online](https://aflplus.plus/libafl-book) or in the [repo](./docs/src/)
+ Our research [paper](https://www.s3.eurecom.fr/docs/ccs22_fioraldi.pdf)
+ Our RC3 [talk](http://www.youtube.com/watch?v=3RWkT1Q5IV0 "Fuzzers Like LEGO") explaining the core concepts + Our RC3 [talk](http://www.youtube.com/watch?v=3RWkT1Q5IV0 "Fuzzers Like LEGO") explaining the core concepts
+ Our Fuzzcon Europe [talk](https://www.youtube.com/watch?v=PWB8GIhFAaI "LibAFL: The Advanced Fuzzing Library") with a (a bit but not so much outdated) step-by-step discussion on how to build some example fuzzers + Our Fuzzcon Europe [talk](https://www.youtube.com/watch?v=PWB8GIhFAaI "LibAFL: The Advanced Fuzzing Library") with a (a bit but not so much outdated) step-by-step discussion on how to build some example fuzzers
+ The Fuzzing101 [solutions](https://github.com/epi052/fuzzing-101-solutions) & series of [blog posts](https://epi052.gitlab.io/notes-to-self/blog/2021-11-01-fuzzing-101-with-libafl/) by [epi](https://github.com/epi052) + The Fuzzing101 [solutions](https://github.com/epi052/fuzzing-101-solutions) & series of [blog posts](https://epi052.gitlab.io/notes-to-self/blog/2021-11-01-fuzzing-101-with-libafl/) by [epi](https://github.com/epi052)
+ Blogpost on binary-only fuzzing lib libaf_qemu, [Hacking TMNF - Fuzzing the game server](https://blog.bricked.tech/posts/tmnf/part1/), by [RickdeJager](https://github.com/RickdeJager).
## Contributing ## Contributing
Check the [TODO.md](./TODO.md) file for features that we plan to support.
For bugs, feel free to open issues or contact us directly. Thank you for your support. <3 For bugs, feel free to open issues or contact us directly. Thank you for your support. <3
Even though we will gladly assist you in finishing up your PR, try to Even though we will gladly assist you in finishing up your PR, try to
@ -118,23 +98,6 @@ Even though we will gladly assist you in finishing up your PR, try to
Some of the parts in this list may be hard, don't be afraid to open a PR if you cannot fix them by yourself, so we can help. Some of the parts in this list may be hard, don't be afraid to open a PR if you cannot fix them by yourself, so we can help.
## Cite
If you use LibAFL for your academic work, please cite the following paper:
```bibtex
@inproceedings{libafl,
author = {Andrea Fioraldi and Dominik Maier and Dongjia Zhang and Davide Balzarotti},
title = {{LibAFL: A Framework to Build Modular and Reusable Fuzzers}},
booktitle = {Proceedings of the 29th ACM conference on Computer and communications security (CCS)},
series = {CCS '22},
year = {2022},
month = {November},
location = {Los Angeles, U.S.A.},
publisher = {ACM},
}
```
#### License #### License
<sup> <sup>

26
TODO.md Normal file
View File

@ -0,0 +1,26 @@
# TODOs
- [ ] Objective-Specific Corpuses (named per objective)
- [ ] Good documentation
- [ ] More informative outputs, deeper introspection (monitor, what mutation did x, etc.)
- [ ] Timeout handling for llmp clients (no ping for n seconds -> treat as disconnected)
- [ ] Heap for signal handling (bumpallo or llmp directly?)
- [x] Frida support for Windows
- [x] LAIN / structured fuzzing example
- [x] LLMP compression
- [x] AFL-Style Forkserver Executor
- [x] "Launcher" example that spawns broker + n clients
- [x] QEMU based instrumentation
- [x] AFL++ LLVM passes in libafl_cc
- [x] LLMP Cross Machine Link (2 brokers connected via TCP)
- [x] Conditional composition of feedbacks (issue #24)
- [x] Other objectives examples (e.g. execution of a given program point)
- [x] Restart Count in Fuzzing Loop
- [x] Minset corpus scheduler
- [x] Win32 shared mem and crash handler to have Windows in-process executor
- [x] Other feedbacks examples (e.g. maximize allocations to spot OOMs)
- [x] A macro crate with derive directives (e.g. for SerdeAny impl).
- [x] Restarting EventMgr could use forks on Unix
- [x] Android Ashmem support
- [x] Errors in the Fuzzer should exit the fuzz run
- [x] Timeouts for executors (WIP on Windows)

View File

@ -1,20 +1,16 @@
[package] [package]
name = "pylibafl" name = "pylibafl"
version = "0.8.2" version = "0.7.0"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
pyo3 = { version = "0.17", features = ["extension-module"] } pyo3 = { version = "0.15", features = ["extension-module"] }
libafl_qemu = { path = "../../libafl_qemu", version = "0.8.2", features = ["python"] } libafl_qemu = { path = "../../libafl_qemu", version = "0.7", features = ["python"] }
libafl_sugar = { path = "../../libafl_sugar", version = "0.8.2", features = ["python"] } libafl_sugar = { path = "../../libafl_sugar", version = "0.7", features = ["python"] }
libafl = { path = "../../libafl", version = "0.8.2", features = ["python"] }
[build-dependencies] [build-dependencies]
pyo3-build-config = { version = "0.17" } pyo3-build-config = { version = "0.15" }
[lib] [lib]
name = "pylibafl" name = "pylibafl"
crate-type = ["cdylib"] crate-type = ["cdylib"]
[profile.dev]
panic = "abort"

View File

@ -1,31 +0,0 @@
# How to use python bindings
## First time setup
```bash
# Install maturin
pip install maturin
# Create virtual environment
python3 -m venv .env
```
## Build bindings
```
# Activate virtual environment
source .env/bin/activate
# Build python module
maturin develop
```
This is going to install `pylibafl` python module into this venv.
## Use bindings
### Example: Running baby_fuzzer in fuzzers/baby_fuzzer/baby_fuzzer.py
First, make sure the python virtual environment is activated. If not, run `source .env/bin/activate
`. Running `pip freeze` at this point should display the following (versions may differ):
```
maturin==0.12.6
pylibafl==0.7.0
toml==0.10.2
```
Then simply run
```
python PATH_TO_BABY_FUZZER/baby_fuzzer.py
```
The crashes directory will be created in the directory from which you ran the command.

View File

@ -1 +0,0 @@
nightly

View File

@ -1,121 +1,17 @@
use libafl;
#[cfg(target_os = "linux")]
use libafl_qemu; use libafl_qemu;
use libafl_sugar; use libafl_sugar;
use pyo3::{prelude::*, types::PyDict}; use pyo3::prelude::*;
const LIBAFL_CODE: &str = r#"
class BaseObserver:
def flush(self):
pass
def pre_exec(self, state, input):
pass
def post_exec(self, state, input, exit_kind):
pass
def pre_exec_child(self, state, input):
pass
def post_exec_child(self, state, input, exit_kind):
pass
def name(self):
return type(self).__name__
def as_observer(self):
return Observer.new_py(self)
class BaseFeedback:
def init_state(self, state):
pass
def is_interesting(self, state, mgr, input, observers, exit_kind) -> bool:
return False
def append_metadata(self, state, testcase):
pass
def discard_metadata(self, state, input):
pass
def name(self):
return type(self).__name__
def as_feedback(self):
return Feedback.new_py(self)
class BaseExecutor:
def observers(self) -> ObserversTuple:
raise NotImplementedError('Implement this yourself')
def run_target(self, fuzzer, state, mgr, input) -> ExitKind:
raise NotImplementedError('Implement this yourself')
def as_executor(self):
return Executor.new_py(self)
class BaseStage:
def perform(self, fuzzer, executor, state, manager, corpus_idx):
pass
def as_stage(self):
return Stage.new_py(self)
class BaseMutator:
def mutate(self, state, input, stage_idx):
pass
def post_exec(self, state, stage_idx, corpus_idx):
pass
def as_mutator(self):
return Mutator.new_py(self)
class FnStage(BaseStage):
def __init__(self, fn):
self.fn = fn
def __call__(self, fuzzer, executor, state, manager, corpus_idx):
self.fn(fuzzer, executor, state, manager, corpus_idx)
def perform(self, fuzzer, executor, state, manager, corpus_idx):
self.fn(fuzzer, executor, state, manager, corpus_idx)
def feedback_not(a):
return NotFeedback(a).as_feedback()
def feedback_and(a, b):
return EagerAndFeedback(a, b).as_feedback()
def feedback_and_fast(a, b):
return FastAndFeedback(a, b).as_feedback()
def feedback_or(a, b):
return EagerOrFeedback(a, b).as_feedback()
def feedback_or_fast(a, b):
return FastOrFeedback(a, b).as_feedback()
"#;
#[pymodule] #[pymodule]
#[pyo3(name = "pylibafl")] #[pyo3(name = "pylibafl")]
pub fn python_module(py: Python, m: &PyModule) -> PyResult<()> { pub fn python_module(py: Python, m: &PyModule) -> PyResult<()> {
let modules = py.import("sys")?.getattr("modules")?;
let sugar_module = PyModule::new(py, "sugar")?; let sugar_module = PyModule::new(py, "sugar")?;
libafl_sugar::python_module(py, sugar_module)?; libafl_sugar::python_module(py, sugar_module)?;
m.add_submodule(sugar_module)?; m.add_submodule(sugar_module)?;
modules.set_item("pylibafl.sugar", sugar_module)?;
#[cfg(target_os = "linux")]
let qemu_module = PyModule::new(py, "qemu")?; let qemu_module = PyModule::new(py, "qemu")?;
#[cfg(target_os = "linux")]
libafl_qemu::python_module(py, qemu_module)?; libafl_qemu::python_module(py, qemu_module)?;
#[cfg(target_os = "linux")]
m.add_submodule(qemu_module)?; m.add_submodule(qemu_module)?;
#[cfg(target_os = "linux")]
modules.set_item("pylibafl.qemu", qemu_module)?;
let libafl_module = PyModule::new(py, "libafl")?;
libafl::pybind::python_module(py, libafl_module)?;
libafl_module.add("__builtins__", py.import("builtins")?)?;
let locals = PyDict::new(py);
py.run(LIBAFL_CODE, Some(libafl_module.dict()), Some(locals))?;
for (key, val) in locals.iter() {
libafl_module.add(key.extract::<&str>()?, val)?;
}
m.add_submodule(libafl_module)?;
modules.set_item("pylibafl.libafl", libafl_module)?;
Ok(()) Ok(())
} }

View File

@ -1,94 +0,0 @@
from pylibafl.libafl import *
import ctypes
class FooObserver(BaseObserver):
def __init__(self):
self.n = 0
def name(self):
return "Foo"
def pre_exec(self, state, input):
if self.n % 10000 == 0:
print("FOO!", self.n, input)
self.n += 1
class FooFeedback(BaseFeedback):
def is_interesting(self, state, mgr, input, observers, exit_kind):
ob = observers.match_name("Foo").unwrap_py()
return ob.n % 10000 == 0
class FooExecutor(BaseExecutor):
def __init__(self, harness, observers: ObserversTuple):
self.h = harness
self.o = observers
def observers(self):
return self.o
def run_target(self, fuzzer, state, mgr, input) -> ExitKind:
return (self.h)(input)
libc = ctypes.cdll.LoadLibrary("libc.so.6")
area_ptr = libc.calloc(1, 4096)
observer = StdMapObserverI8("mymap", area_ptr, 4096)
m = observer.as_map_observer()
observers = ObserversTuple(
[observer.as_map_observer().as_observer(), FooObserver().as_observer()]
)
feedback = feedback_or(MaxMapFeedbackI8(m).as_feedback(), FooFeedback().as_feedback())
objective = feedback_and_fast(
CrashFeedback().as_feedback(), MaxMapFeedbackI8(m).as_feedback()
)
fuzzer = StdFuzzer(feedback, objective)
rand = StdRand.with_current_nanos()
state = StdState(
rand.as_rand(),
InMemoryCorpus().as_corpus(),
InMemoryCorpus().as_corpus(),
feedback,
objective,
)
monitor = SimpleMonitor(lambda s: print(s))
mgr = SimpleEventManager(monitor.as_monitor())
def harness(buf) -> ExitKind:
# print(buf)
m[0] = 1
if len(buf) > 0 and buf[0] == ord("a"):
m[1] = 1
if len(buf) > 1 and buf[1] == ord("b"):
m[2] = 1
if len(buf) > 2 and buf[2] == ord("c"):
m[3] = 1
return ExitKind.crash()
return ExitKind.ok()
# executor = InProcessExecutor(harness, observers, fuzzer, state, mgr.as_manager())
executor = FooExecutor(harness, observers)
stage = StdMutationalStage(StdHavocMutator().as_mutator())
stage_tuple_list = StagesTuple([stage.as_stage()])
fuzzer.add_input(state, executor.as_executor(), mgr.as_manager(), b"\0\0")
fuzzer.fuzz_loop(executor.as_executor(), state, mgr.as_manager(), stage_tuple_list)

View File

@ -9,8 +9,8 @@
- [Build](./getting_started/build.md) - [Build](./getting_started/build.md)
- [Crates](./getting_started/crates.md) - [Crates](./getting_started/crates.md)
- [Baby Fuzzer](./baby_fuzzer/baby_fuzzer.md) - [Baby Fuzzer](./baby_fuzzer.md)
- [More Examples](./baby_fuzzer/more_examples.md)
- [Core Concepts](./core_concepts/core_concepts.md) - [Core Concepts](./core_concepts/core_concepts.md)
- [Observer](./core_concepts/observer.md) - [Observer](./core_concepts/observer.md)
- [Executor](./core_concepts/executor.md) - [Executor](./core_concepts/executor.md)
@ -24,7 +24,6 @@
- [Design](./design/design.md) - [Design](./design/design.md)
- [Architecture](./design/architecture.md) - [Architecture](./design/architecture.md)
- [Metadata](./design/metadata.md) - [Metadata](./design/metadata.md)
- [Migrating from LibAFL <0.9 to 0.9](./design/migration-0.9.md)
- [Message Passing](./message_passing/message_passing.md) - [Message Passing](./message_passing/message_passing.md)
- [Spawning Instances](./message_passing/spawn_instances.md) - [Spawning Instances](./message_passing/spawn_instances.md)
@ -34,7 +33,5 @@
- [Introduction](./tutorial/intro.md) - [Introduction](./tutorial/intro.md)
- [Advanced Features](./advanced_features/advanced_features.md) - [Advanced Features](./advanced_features/advanced_features.md)
- [Binary-Only Fuzzing with `Frida`](./advanced_features/frida.md) - [Concolic Tracing & Hybrid Fuzzing](./advanced_features/concolic/concolic.md)
- [Concolic Tracing & Hybrid Fuzzing](./advanced_features/concolic.md) - [LibAFL in `no_std` environments (Kernels, Hypervisors, ...)](./advanced_features/no_std/no_std.md)
- [LibAFL in `no_std` environments (Kernels, Hypervisors, ...)](./advanced_features/no_std.md)
- [Snapshot Fuzzing in Nyx](./advanced_features/nyx.md)

View File

@ -1,4 +1,3 @@
# Advanced Features # Advanced Features
In addition to core building blocks for fuzzers, LibAFL also has features for more advanced/niche fuzzing techniques. In addition to core building blocks for fuzzers, LibAFL also has features for more advanced/niche fuzzing techniques.
The following sections are dedicated to some of these features. The following sections are dedicated to these features.

View File

@ -6,9 +6,7 @@ Then, we'll go through the relationship of SymCC and LibAFL concolic tracing.
Finally, we'll walk through building a basic hybrid fuzzer using LibAFL. Finally, we'll walk through building a basic hybrid fuzzer using LibAFL.
## Concolic Tracing by Example ## Concolic Tracing by Example
Suppose you want to fuzz the following program: Suppose you want to fuzz the following program:
```rust ```rust
fn target(input: &[u8]) -> i32 { fn target(input: &[u8]) -> i32 {
match &input { match &input {
@ -21,7 +19,6 @@ fn target(input: &[u8]) -> i32 {
} }
} }
``` ```
A simple coverage-maximizing fuzzer that generates new inputs somewhat randomly will have a hard time finding an input that triggers the fictitious crashing input. A simple coverage-maximizing fuzzer that generates new inputs somewhat randomly will have a hard time finding an input that triggers the fictitious crashing input.
Many techniques have been proposed to make fuzzing less random and more directly attempt to mutate the input to flip specific branches, such as the ones involved in crashing the above program. Many techniques have been proposed to make fuzzing less random and more directly attempt to mutate the input to flip specific branches, such as the ones involved in crashing the above program.
@ -30,7 +27,6 @@ In principle, concolic tracing works by observing all executed instructions in a
To understand what this entails, we'll run an example with the above program. To understand what this entails, we'll run an example with the above program.
First, we'll simplify the program to simple if-then-else-statements: First, we'll simplify the program to simple if-then-else-statements:
```rust ```rust
fn target(input: &[u8]) -> i32 { fn target(input: &[u8]) -> i32 {
if input.len() == 4 { if input.len() == 4 {
@ -60,10 +56,8 @@ fn target(input: &[u8]) -> i32 {
} }
} }
``` ```
Next, we'll trace the program on the input `[]`. Next, we'll trace the program on the input `[]`.
The trace would look like this: The trace would look like this:
```rust,ignore ```rust,ignore
Branch { // if input.len() == 4 Branch { // if input.len() == 4
condition: Equals { condition: Equals {
@ -80,7 +74,6 @@ Branch { // if input.len() == 0
taken: true // This condition turned out to be true! taken: true // This condition turned out to be true!
} }
``` ```
Using this trace, we can easily deduce that we can force the program to take a different path by having an input of length 4 or having an input with non-zero length. Using this trace, we can easily deduce that we can force the program to take a different path by having an input of length 4 or having an input with non-zero length.
We do this by negating each branch condition and analytically solving the resulting 'expression'. We do this by negating each branch condition and analytically solving the resulting 'expression'.
In fact, we can create these expressions for any computation and give them to an [SMT](https://en.wikipedia.org/wiki/Satisfiability_modulo_theories)-Solver that will generate an input that satisfies the expression (as long as such an input exists). In fact, we can create these expressions for any computation and give them to an [SMT](https://en.wikipedia.org/wiki/Satisfiability_modulo_theories)-Solver that will generate an input that satisfies the expression (as long as such an input exists).
@ -88,16 +81,13 @@ In fact, we can create these expressions for any computation and give them to an
In hybrid fuzzing, we combine this tracing + solving approach with more traditional fuzzing techniques. In hybrid fuzzing, we combine this tracing + solving approach with more traditional fuzzing techniques.
## Concolic Tracing in LibAFL, SymCC and SymQEMU ## Concolic Tracing in LibAFL, SymCC and SymQEMU
The concolic tracing support in LibAFL is implemented using SymCC. The concolic tracing support in LibAFL is implemented using SymCC.
SymCC is a compiler plugin for clang that can be used as a drop-in replacement for a normal C or C++ compiler. SymCC is a compiler plugin for clang that can be used as a drop-in replacement for a normal C or C++ compiler.
SymCC will instrument the compiled code with callbacks into a runtime that can be supplied by the user. SymCC will instrument the compiled code with callbacks into a runtime that can be supplied by the user.
These callbacks allow the runtime to construct a trace that similar to the previous example. These callbacks allow the runtime to construct a trace that similar to the previous example.
### SymCC and its Runtimes ### SymCC and its Runtimes
SymCC ships with 2 runtimes: SymCC ships with 2 runtimes:
* a 'simple' runtime that attempts to solve any branches it comes across using [Z3](https://github.com/Z3Prover/z3/wiki) and * a 'simple' runtime that attempts to solve any branches it comes across using [Z3](https://github.com/Z3Prover/z3/wiki) and
* a [QSym](https://github.com/sslab-gatech/qsym)-based runtime, which does a bit more filtering on the expressions and also solves using Z3. * a [QSym](https://github.com/sslab-gatech/qsym)-based runtime, which does a bit more filtering on the expressions and also solves using Z3.
@ -106,18 +96,15 @@ This crate allows you to easily build a custom runtime out of the built-in build
Checkout out the `symcc_runtime` docs for more information on how to build your own runtime. Checkout out the `symcc_runtime` docs for more information on how to build your own runtime.
### SymQEMU ### SymQEMU
[SymQEMU](https://github.com/eurecom-s3/symqemu) is a sibling project to SymCC. [SymQEMU](https://github.com/eurecom-s3/symqemu) is a sibling project to SymCC.
Instead of instrumenting the target at compile-time, it inserts instrumentation via dynamic binary translation, building on top of the [`QEMU`](https://www.qemu.org) emulation stack. Instead of instrumenting the target at compile-time, it inserts instrumentation via dynamic binary translation, building on top of the [`QEMU`](https://www.qemu.org) emulation stack.
This means that using SymQEMU, any (x86) binary can be traced without the need to build in instrumentation ahead of time. This means that using SymQEMU, any (x86) binary can be traced without the need to build in instrumentation ahead of time.
The `symcc_runtime` crate supports this use case and runtimes built with `symcc_runtime` also work with SymQEMU. The `symcc_runtime` crate supports this use case and runtimes built with `symcc_runtime` also work with SymQEMU.
## Hybrid Fuzzing in LibAFL ## Hybrid Fuzzing in LibAFL
The LibAFL repository contains an [example hybrid fuzzer](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/libfuzzer_stb_image_concolic). The LibAFL repository contains an [example hybrid fuzzer](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/libfuzzer_stb_image_concolic).
There are three main steps involved with building a hybrid fuzzer using LibAFL: There are three main steps involved with building a hybrid fuzzer using LibAFL:
1. Building a runtime, 1. Building a runtime,
2. choosing an instrumentation method and 2. choosing an instrumentation method and
3. building the fuzzer. 3. building the fuzzer.
@ -126,13 +113,11 @@ Note that the order of these steps is important.
For example, we need to have runtime ready before we can do instrumentation with SymCC. For example, we need to have runtime ready before we can do instrumentation with SymCC.
### Building a Runtime ### Building a Runtime
Building a custom runtime can be done easily using the `symcc_runtime` crate. Building a custom runtime can be done easily using the `symcc_runtime` crate.
Note, that a custom runtime is a separate shared object file, which means that we need a separate crate for our runtime. Note, that a custom runtime is a separate shared object file, which means that we need a separate crate for our runtime.
Check out the [example hybrid fuzzer's runtime](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/libfuzzer_stb_image_concolic/runtime) and the [`symcc_runtime` docs](https://docs.rs/symcc_runtime/0.1/symcc_runtime) for inspiration. Check out the [example hybrid fuzzer's runtime](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/libfuzzer_stb_image_concolic/runtime) and the [`symcc_runtime` docs](https://docs.rs/symcc_runtime/0.1/symcc_runtime) for inspiration.
### Instrumentation ### Instrumentation
There are two main instrumentation methods to make use of concolic tracing in LibAFL: There are two main instrumentation methods to make use of concolic tracing in LibAFL:
* Using an **compile-time** instrumented target with **SymCC**. * Using an **compile-time** instrumented target with **SymCC**.
This only works when the source is available for the target and the target is reasonably easy to build using the SymCC compiler wrapper. This only works when the source is available for the target and the target is reasonably easy to build using the SymCC compiler wrapper.
@ -142,7 +127,6 @@ It should be noted, however, that the 'quality' of the generated expressions can
Therefore, it is recommended to use SymCC over SymQEMU when possible. Therefore, it is recommended to use SymCC over SymQEMU when possible.
#### Using SymCC #### Using SymCC
The target needs to be instrumented ahead of fuzzing using SymCC. The target needs to be instrumented ahead of fuzzing using SymCC.
How exactly this is done does not matter. How exactly this is done does not matter.
However, the SymCC compiler needs to be made aware of the location of the runtime that it should instrument against. However, the SymCC compiler needs to be made aware of the location of the runtime that it should instrument against.
@ -155,13 +139,11 @@ The [`symcc_libafl` crate](https://docs.rs/symcc_libafl) contains helper functio
Make sure you satisfy the [build requirements](https://github.com/eurecom-s3/symcc#readme) of SymCC before attempting to build it. Make sure you satisfy the [build requirements](https://github.com/eurecom-s3/symcc#readme) of SymCC before attempting to build it.
#### Using SymQEMU #### Using SymQEMU
Build SymQEMU according to its [build instructions](https://github.com/eurecom-s3/symqemu#readme). Build SymQEMU according to its [build instructions](https://github.com/eurecom-s3/symqemu#readme).
By default, SymQEMU looks for the runtime in a sibling directory. By default, SymQEMU looks for the runtime in a sibling directory.
Since we don't have a runtime there, we need to let it know the path to your runtime by setting `--symcc-build` argument of the `configure` script to the path of your runtime. Since we don't have a runtime there, we need to let it know the path to your runtime by setting `--symcc-build` argument of the `configure` script to the path of your runtime.
### Building the Fuzzer ### Building the Fuzzer
No matter the instrumentation method, the interface between the fuzzer and the instrumented target should now be consistent. No matter the instrumentation method, the interface between the fuzzer and the instrumented target should now be consistent.
The only difference between using SymCC and SymQEMU should be the binary that represents the target: The only difference between using SymCC and SymQEMU should be the binary that represents the target:
In the case of SymCC it will be the binary that was build with instrumentation and with SymQEMU it will be the emulator binary (eg. `x86_64-linux-user/symqemu-x86_64`), followed by your uninstrumented target binary and arguments. In the case of SymCC it will be the binary that was build with instrumentation and with SymQEMU it will be the emulator binary (eg. `x86_64-linux-user/symqemu-x86_64`), followed by your uninstrumented target binary and arguments.
@ -170,7 +152,6 @@ You can use the [`CommandExecutor`](https://docs.rs/libafl/0.6.0/libafl/executor
When configuring the command, make sure you pass the `SYMCC_INPUT_FILE` environment variable the input file path, if your target reads input from a file (instead of standard input). When configuring the command, make sure you pass the `SYMCC_INPUT_FILE` environment variable the input file path, if your target reads input from a file (instead of standard input).
#### Serialization and Solving #### Serialization and Solving
While it is perfectly possible to build a custom runtime that also performs the solving step of hybrid fuzzing in the context of the target process, the intended use of the LibAFL concolic tracing support is to serialize the (filtered and pre-processed) branch conditions using the [`TracingRuntime`](https://docs.rs/symcc_runtime/0.1/symcc_runtime/tracing/struct.TracingRuntime.html). While it is perfectly possible to build a custom runtime that also performs the solving step of hybrid fuzzing in the context of the target process, the intended use of the LibAFL concolic tracing support is to serialize the (filtered and pre-processed) branch conditions using the [`TracingRuntime`](https://docs.rs/symcc_runtime/0.1/symcc_runtime/tracing/struct.TracingRuntime.html).
This serialized representation can be deserialized in the fuzzer process for solving using a [`ConcolicObserver`](https://docs.rs/libafl/0.6.0/libafl/observers/concolic/struct.ConcolicObserver.html) wrapped in a [`ConcolicTracingStage`](https://docs.rs/libafl/0.6.0/libafl/stages/concolic/struct.ConcolicTracingStage.html), which will attach a [`ConcolicMetadata`](https://docs.rs/libafl/0.6.0/libafl/observers/concolic/struct.ConcolicMetadata.html) to every [`TestCase`](https://docs.rs/libafl/0.6.0/libafl/corpus/testcase/struct.Testcase.html). This serialized representation can be deserialized in the fuzzer process for solving using a [`ConcolicObserver`](https://docs.rs/libafl/0.6.0/libafl/observers/concolic/struct.ConcolicObserver.html) wrapped in a [`ConcolicTracingStage`](https://docs.rs/libafl/0.6.0/libafl/stages/concolic/struct.ConcolicTracingStage.html), which will attach a [`ConcolicMetadata`](https://docs.rs/libafl/0.6.0/libafl/observers/concolic/struct.ConcolicMetadata.html) to every [`TestCase`](https://docs.rs/libafl/0.6.0/libafl/corpus/testcase/struct.Testcase.html).
@ -180,5 +161,5 @@ The [`SimpleConcolicMutationalStage`](https://docs.rs/libafl/0.6.0//libafl/stage
It will attempt to solve all branches, like the original simple backend from SymCC, using Z3. It will attempt to solve all branches, like the original simple backend from SymCC, using Z3.
### Example ### Example
The example fuzzer shows how to use the [`ConcolicTracingStage` together with the `SimpleConcolicMutationalStage`](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs#L203) to build a basic hybrid fuzzer.
The example fuzzer shows how to use the [`ConcolicTracingStage` together with the `SimpleConcolicMutationalStage`](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs#L222) to build a basic hybrid fuzzer.

View File

@ -1,87 +0,0 @@
# Binary-only Fuzzing with Frida
LibAFL supports different instrumentation engines for binary-only fuzzing.
A potent cross-platform (Windows, MacOS, Android, Linux, iOS) option for binary-only fuzzing is Frida; the dynamic instrumentation tool.
In this section, we will talk about the components in fuzzing with `libafl_frida`.
You can take a look at a working example in our [`fuzzers/frida_libpng`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/frida_libpng) folder for Linux, and [`fuzzers/frida_gdiplus`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/frida_gdiplus) for Windows.
## Dependencies
If you are on Linux or OSX, you'll need [libc++](https://libcxx.llvm.org/) for `libafl_frida` in addition to libafl's dependencies.
If you are on Windows, you'll need to install llvm tools.
## Harness & Instrumentation
LibAFL uses Frida's [__Stalker__](https://frida.re/docs/stalker/) to trace the execution of your program and instrument your harness.
Thus, you have to compile your harness to a dynamic library. Frida instruments your PUT after dynamically loading it.
For example in our `frida_libpng` example, we load the dynamic library and find the symbol to harness as follows:
```rust,ignore
let lib = libloading::Library::new(module_name).unwrap();
let target_func: libloading::Symbol<
unsafe extern "C" fn(data: *const u8, size: usize) -> i32,
> = lib.get(symbol_name.as_bytes()).unwrap();
```
## `FridaInstrumentationHelper` and Runtimes
To use functionalities that Frida offers, we'll first need to obtain `Gum` object by `Gum::obtain()`.
In LibAFL, we use the `FridaInstrumentationHelper` struct to manage frida-related state. `FridaInstrumentationHelper` is a key component that sets up the [__Transformer__](https://frida.re/docs/stalker/#transformer) that is used to generate the instrumented code. It also initializes the `Runtimes` that offer various instrumentation.
We have `CoverageRuntime` that can track the edge coverage, `AsanRuntime` for address sanitizer, `DrCovRuntime` that uses [__DrCov__](https://dynamorio.org/page_drcov.html) for coverage collection (to be imported in coverage tools like Lighthouse, bncov, dragondance,...), and `CmpLogRuntime` for cmplog instrumentation.
All of these runtimes can be slotted into `FridaInstrumentationHelper` at build time.
Combined with any `Runtime` you'd like to use, you can initialize the `FridaInstrumentationHelper` like this:
```rust,ignore
let gum = Gum::obtain();
let frida_options = FridaOptions::parse_env_options();
let coverage = CoverageRuntime::new();
let mut frida_helper = FridaInstrumentationHelper::new(
&gum,
&frida_options,
module_name,
modules_to_instrument,
tuple_list!(coverage),
);
```
## Running the Fuzzer
After setting up the `FridaInstrumentationHelper`. You can obtain the pointer to the coverage map by calling `map_ptr_mut()`.
```rust,ignore
let edges_observer = HitcountsMapObserver::new(StdMapObserver::new_from_ptr(
"edges",
frida_helper.map_ptr_mut().unwrap(),
MAP_SIZE,
));
```
You can then link this observer to `FridaInProcessExecutor` as follows:
```rust,ignore
let mut executor = FridaInProcessExecutor::new(
&gum,
InProcessExecutor::new(
&mut frida_harness,
tuple_list!(
edges_observer,
time_observer,
AsanErrorsObserver::new(&ASAN_ERRORS)
),
&mut fuzzer,
&mut state,
&mut mgr,
)?,
&mut frida_helper,
);
```
And, finally you can run the fuzzer.
See the `frida_` examples in [`./fuzzers`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/) for more information and, for linux or full-system, play around with `libafl_qemu`, another binary-only tracer.

View File

@ -1,6 +1,6 @@
# Using LibAFL in `no_std` environments # Using LibAFL in `no_std` environments
It is possible to use LibAFL in `no_std` environments e.g. custom platforms like microcontrollers, kernels, hypervisors, and more. It is possible to use LibAFL in `no_std` environments e.g. custom platforms like microcontrolles, kernels, hypervisors, and more.
You can simply add LibAFL to your `Cargo.toml` file: You can simply add LibAFL to your `Cargo.toml` file:
@ -8,8 +8,7 @@ You can simply add LibAFL to your `Cargo.toml` file:
libafl = { path = "path/to/libafl/", default-features = false} libafl = { path = "path/to/libafl/", default-features = false}
``` ```
Then build your project e.g. for `aarch64-unknown-none` using: Then build your project e.g. for `aarch64-unknown-none` using
```sh ```sh
cargo build --no-default-features --target aarch64-unknown-none cargo build --no-default-features --target aarch64-unknown-none
``` ```
@ -19,22 +18,18 @@ cargo build --no-default-features --target aarch64-unknown-none
The minimum amount of input LibAFL needs for `no_std` is a monotonically increasing timestamp. The minimum amount of input LibAFL needs for `no_std` is a monotonically increasing timestamp.
For this, anywhere in your project you need to implement the `external_current_millis` function, which returns the current time in milliseconds. For this, anywhere in your project you need to implement the `external_current_millis` function, which returns the current time in milliseconds.
```c
// Assume this a clock source from a custom stdlib, which you want to use, which returns current time in seconds. // Assume this a clock source from a custom stdlib, which you want to use, which returns current time in seconds.
```c
int my_real_seconds(void) int my_real_seconds(void)
{ {
return *CLOCK; return *CLOCK;
} }
``` ```
and here we use it in Rust. `external_current_millis` is then called from LibAFL.
Here, we use it in Rust. `external_current_millis` is then called from LibAFL. Note that it needs to be `no_mangle` in order to get picked up by LibAFL at linktime.
Note that it needs to be `no_mangle` in order to get picked up by LibAFL at linktime:
```rust,ignore ```rust,ignore
#[no_mangle] #[no_mangle]
pub extern "C" fn external_current_millis() -> u64 { pub extern "C" fn external_current_millis() -> u64 {
unsafe { my_real_seconds()*1000 } unsafe { my_real_seconds()*1000 }
} }
``` ```
See [./fuzzers/baby_no_std](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/baby_no_std) for an example.

View File

@ -1,126 +0,0 @@
# Snapshot Fuzzing in Nyx
NYX supports both source-based and binary-only fuzzing.
Currently, `libafl_nyx` only supports [afl++](https://github.com/AFLplusplus/AFLplusplus)'s instruction. To install it, you can use `sudo apt install aflplusplus`. Or compile from the source:
```bash
git clone https://github.com/AFLplusplus/AFLplusplus
cd AFLplusplus
make all # this will not compile afl's additional extension
```
Then you should compile the target with the afl++ compiler wrapper:
```bash
export CC=afl-clang-fast
export CXX=afl-clang-fast++
# the following line depends on your target
./configure --enable-shared=no
make
```
For binary-only fuzzing, Nyx uses intel-PT(Intel® Processor Trace). You can find the supported CPU at <https://www.intel.com/content/www/us/en/support/articles/000056730/processors.html>.
## Preparing Nyx working directory
This step is used to pack the target into Nyx's kernel. Don't worry, we have a template shell script in our [example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/setup_libxml2.sh):
the parameter's meaning is listed below:
```bash
git clone https://github.com/nyx-fuzz/packer
python3 "./packer/packer/nyx_packer.py" \
./libxml2/xmllint \ # your target binary
/tmp/nyx_libxml2 \ # the nyx work directory
afl \ # instruction type
instrumentation \
-args "/tmp/input" \ # the args of the program, means that we will run `xmllint /tmp/input` in each run.
-file "/tmp/input" \ # the input will be generated in `/tmp/input`. If no `--file`, then input will be passed through stdin
--fast_reload_mode \
--purge || exit
```
Then, you can generate the config file:
```bash
python3 ./packer/packer/nyx_config_gen.py /tmp/nyx_libxml2/ Kernel || exit
```
## Standalone fuzzing
In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_standalone/src/main.rs). First you need to run `./setup_libxml2.sh`, It will prepare your target and create your nyx work directory in `/tmp/libxml2`. After that, you can start write your code.
First, to create `Nyxhelper`:
```rust,ignore
let share_dir = Path::new("/tmp/nyx_libxml2/");
let cpu_id = 0; // use first cpu
let parallel_mode = false; // close parallel_mode
let mut helper = NyxHelper::new(share_dir, cpu_id, true, parallel_mode, None).unwrap(); // we don't the set the last parameter in standalone mode, we just use None, here
```
Then, fetch `trace_bits`, create an observer and the `NyxExecutor`:
```rust,ignore
let trace_bits = unsafe { std::slice::from_raw_parts_mut(helper.trace_bits, helper.map_size) };
let observer = StdMapObserver::new("trace", trace_bits);
let mut executor = NyxExecutor::new(&mut helper, tuple_list!(observer)).unwrap();
```
Finally, use them as normal and pass them into `fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)` to start fuzzing.
## Parallel fuzzing
In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/src/main.rs). First you need to run `./setup_libxml2.sh` as described before.
Parallel fuzzing relies on [`Launcher`](../message_passing/spawn_instances.md), so spawn logic should be written in the scoop of anonymous function `run_client`:
```rust,ignore
let mut run_client = |state: Option<_>, mut restarting_mgr, _core_id: usize| {}
```
In `run_client`, you need to create `NyxHelper` first:
```rust,ignore
let share_dir = Path::new("/tmp/nyx_libxml2/");
let cpu_id = _core_id as u32;
let parallel_mode = true;
let mut helper = NyxHelper::new(
share_dir, // nyx work directory
cpu_id, // current cpu id
true, // open snap_mode
parallel_mode, // open parallel mode
Some(parent_cpu_id.id as u32), // the cpu-id of master instance, there is only one master instance, other instances will be treated as slaved
)
.unwrap();
```
Then you can fetch the trace_bits and create an observer and `NyxExecutor`
```rust,ignore
let trace_bits =
unsafe { std::slice::from_raw_parts_mut(helper.trace_bits, helper.map_size) };
let observer = StdMapObserver::new("trace", trace_bits);
let mut executor = NyxExecutor::new(&mut helper, tuple_list!(observer)).unwrap();
```
Finally, open a `Launcher` as normal to start fuzzing:
```rust,ignore
match Launcher::builder()
.shmem_provider(shmem_provider)
.configuration(EventConfig::from_name("default"))
.monitor(monitor)
.run_client(&mut run_client)
.cores(&cores)
.broker_port(broker_port)
// .stdout_file(Some("/dev/null"))
.build()
.launch()
{
Ok(()) => (),
Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."),
Err(err) => panic!("Failed to run launcher: {:?}", err),
}
```

View File

@ -11,11 +11,11 @@ You can find a complete version of this tutorial as an example fuzzer in [`fuzze
> ### Warning > ### Warning
> >
> This example fuzzer is too naive for any real-world usage. > This example fuzzer is too naive for any real-world usage.
> Its purpose is solely to show the main components of the library, for a more in-depth walkthrough on building a custom fuzzer go to the [Tutorial chapter](../tutorial/intro.md) directly. > Its purpose is solely to show the main components of the library, for a more in-depth walkthrough on building a custom fuzzer go to the [Tutorial chapter](./tutorial/intro.md) directly.
## Creating a project ## Creating a project
We use cargo to create a new Rust project with LibAFL as a dependency. We use cargo to create a new Rust project with LibAFL as a dependency.
```sh ```sh
$ cargo new baby_fuzzer $ cargo new baby_fuzzer
@ -37,15 +37,16 @@ edition = "2018"
``` ```
In order to use LibAFl we must add it as dependency adding `libafl = { path = "path/to/libafl/" }` under `[dependencies]`. In order to use LibAFl we must add it as dependency adding `libafl = { path = "path/to/libafl/" }` under `[dependencies]`.
You can use the LibAFL version from [crates.io](https://crates.io/crates/libafl) if you want, in this case, you have to use `libafl = "*"` to get the latest version (or set it to the current version). You can use the LibAFL version from crates.io if you want, in this case, you have to use `libafl = "*"` to get the latest version (or set it to the current version).
As we are going to fuzz Rust code, we want that a panic does not simply cause the program to exit, but raise an `abort` that can then be caught by the fuzzer. As we are going to fuzz Rust code, we want that a panic does not simply cause the program to exit, but raise an `abort` that can then be caught by the fuzzer.
To do that, we specify `panic = "abort"` in the [profiles](https://doc.rust-lang.org/cargo/reference/profiles.html). To do that, we specify `panic = "abort"` in the [profiles](https://doc.rust-lang.org/cargo/reference/profiles.html).
Alongside this setting, we add some optimization flags for the compilation, when building in release mode. Alongside this setting, we add some optimization flags for the compile when building in release mode.
The final `Cargo.toml` should look similar to the following: The final `Cargo.toml` should look similar to the following:
```toml ```toml
[package] [package]
name = "baby_fuzzer" name = "baby_fuzzer"
@ -73,40 +74,31 @@ debug = true
Opening `src/main.rs`, we have an empty `main` function. Opening `src/main.rs`, we have an empty `main` function.
To start, we create the closure that we want to fuzz. It takes a buffer as input and panics if it starts with `"abc"`. To start, we create the closure that we want to fuzz. It takes a buffer as input and panics if it starts with `"abc"`.
`ExitKind` is used to inform the fuzzer about the harness' exit status.
```rust ```rust
extern crate libafl; extern crate libafl;
use libafl::{ use libafl::inputs::{BytesInput, HasTargetBytes};
bolts::AsSlice,
inputs::{BytesInput, HasTargetBytes},
executors::ExitKind,
};
fn main(){ let mut harness = |input: &BytesInput| {
let mut harness = |input: &BytesInput| { let target = input.target_bytes();
let target = input.target_bytes(); let buf = target.as_slice();
let buf = target.as_slice(); if buf.len() > 0 && buf[0] == 'a' as u8 {
if buf.len() > 0 && buf[0] == 'a' as u8 { if buf.len() > 1 && buf[1] == 'b' as u8 {
if buf.len() > 1 && buf[1] == 'b' as u8 { if buf.len() > 2 && buf[2] == 'c' as u8 {
if buf.len() > 2 && buf[2] == 'c' as u8 { panic!("=)");
panic!("=)");
}
} }
} }
ExitKind::Ok }
}; };
// To test the panic: // To test the panic:
let input = BytesInput::new(Vec::from("abc")); // let input = BytesInput::new("abc".as_bytes());
#[cfg(feature = "panic")] // harness(&input);
harness(&input);
}
``` ```
## Generating and running some tests ## Generating and running some tests
One of the main components that a LibAFL-based fuzzer uses is the State, a container of the data that is evolved during the fuzzing process. One of the main components that a LibAFL-based fuzzer uses is the State, a container of the data that is evolved during the fuzzing process.
Includes all State, such as the Corpus of inputs, the current RNG state, and potential Metadata for the testcases and run. Includes all State, such as the Corpus of inputs, the current rng state, and potential Metadata for the testcases and run.
In our `main` we create a basic State instance like the following: In our `main` we create a basic State instance like the following:
```rust,ignore ```rust,ignore
@ -119,20 +111,17 @@ let mut state = StdState::new(
// Corpus in which we store solutions (crashes in this example), // Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer // on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(), OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
&mut (), (),
&mut () );
).unwrap();
``` ```
- The first parameter is a random number generator, that is part of the fuzzer state, in this case, we use the default one `StdRand`, but you can choose a different one. We seed it with the current nanoseconds. It takes a random number generator, that is part of the fuzzer state, in this case, we use the default one `StdRand` but you can choose a different one. We seed it with the current nanoseconds.
- The second parameter is an instance of something implementing the Corpus trait, `InMemoryCorpus` in this case. The corpus is the container of the testcases evolved by the fuzzer, in this case, we keep it all in memory.
To avoid type annotation error, you can use `InMemoryCorpus::<BytesInput>::new()` to replace `InMemoryCorpus::new()`. If not, type annotation will be automatically inferred when adding `executor`. As the second parameter, it takes an instance of something implementing the Corpus trait, InMemoryCorpus in this case. The corpus is the container of the testcases evolved by the fuzzer, in this case, we keep it all in memory.
- third parameter is another corpus that stores the "solution" testcases for the fuzzer. For our purpose, the solution is the input that triggers the panic. In this case, we want to store it to disk under the `crashes` directory, so we can inspect it. We will discuss the last parameter later. The third parameter is another corpus, in this case, to store the testcases that are considered as "solutions" for the fuzzer. For our purpose, the solution is the input that triggers the panic. In this case, we want to store it to disk under the `crashes` directory, so we can inspect it.
- last two parameters are feedback and objective, we will discuss them later.
Another required component is the **EventManager**. It handles some events such as the addition of a testcase to the corpus during the fuzzing process. For our purpose, we use the simplest one that just displays the information about these events to the user using a `Monitor` instance. Another required component is the EventManager. It handles some events such as the addition of a testcase to the corpus during the fuzzing process. For our purpose, we use the simplest one that just displays the information about these events to the user using a `Monitor` instance.
```rust,ignore ```rust,ignore
// The Monitor trait defines how the fuzzer stats are displayed to the user // The Monitor trait defines how the fuzzer stats are displayed to the user
@ -143,18 +132,18 @@ let mon = SimpleMonitor::new(|s| println!("{}", s));
let mut mgr = SimpleEventManager::new(mon); let mut mgr = SimpleEventManager::new(mon);
``` ```
In addition, we have the **Fuzzer**, an entity that contains some actions that alter the State. One of these actions is the scheduling of the testcases to the fuzzer using a **Scheduler**. In addition, we have the Fuzzer, an entity that contains some actions that alter the State. One of these actions is the scheduling of the testcases to the fuzzer using a CorpusScheduler.
We create it as `QueueScheduler`, a scheduler that serves testcases to the fuzzer in a FIFO fashion. We create it as QueueCorpusScheduler, a scheduler that serves testcases to the fuzzer in a FIFO fashion.
```rust,ignore ```rust,ignore
// A queue policy to get testcasess from the corpus // A queue policy to get testcasess from the corpus
let scheduler = QueueScheduler::new(); let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler // A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, (), ()); let mut fuzzer = StdFuzzer::new(scheduler, (), ());
``` ```
Last but not least, we need an **Executor** that is the entity responsible to run our program under test. In this example, we want to run the harness function in-process (without forking off a child, for example), and so we use the `InProcessExecutor`. Last but not least, we need an Executor that is the entity responsible to run our program under test. In this example, we want to run the harness function in-process (without forking off a child, for example), and so we use the `InProcessExecutor`.
```rust,ignore ```rust,ignore
// Create the executor for an in-process function // Create the executor for an in-process function
@ -169,11 +158,11 @@ let mut executor = InProcessExecutor::new(
``` ```
It takes a reference to the harness, the state, and the event manager. We will discuss the second parameter later. It takes a reference to the harness, the state, and the event manager. We will discuss the second parameter later.
As the executor expects that the harness returns an ExitKind object, so we have added `ExitKind::Ok` to our harness function before. As the executor expects that the harness returns an ExitKind object, we add `ExitKind::Ok` to our harness function.
Now we have the 4 major entities ready for running our tests, but we still cannot generate testcases. Now we have the 4 major entities ready for running our tests, but we still cannot generate testcases.
For this purpose, we use a **Generator**, `RandPrintablesGenerator` that generates a string of printable bytes. For this purpose, we use a Generator, `RandPrintablesGenerator` that generates a string of printable bytes.
```rust,ignore ```rust,ignore
use libafl::generators::RandPrintablesGenerator; use libafl::generators::RandPrintablesGenerator;
@ -194,15 +183,14 @@ extern crate libafl;
use std::path::PathBuf; use std::path::PathBuf;
use libafl::{ use libafl::{
bolts::{AsSlice, current_nanos, rands::StdRand}, bolts::{current_nanos, rands::StdRand},
corpus::{InMemoryCorpus, OnDiskCorpus}, corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager, events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind}, executors::{inprocess::InProcessExecutor, ExitKind},
fuzzer::StdFuzzer, fuzzer::StdFuzzer,
generators::RandPrintablesGenerator, generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes}, inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor, monitors::SimpleMonitor,
schedulers::QueueScheduler,
state::StdState, state::StdState,
}; };
``` ```
@ -220,17 +208,16 @@ $ cargo run
Now you simply ran 8 randomly generated testcases, but none of them has been stored in the corpus. If you are very lucky, maybe you triggered the panic by chance but you don't see any saved file in `crashes`. Now you simply ran 8 randomly generated testcases, but none of them has been stored in the corpus. If you are very lucky, maybe you triggered the panic by chance but you don't see any saved file in `crashes`.
Now we want to turn our simple fuzzer into a feedback-based one and increase the chance to generate the right input to trigger the panic. We are going to implement a simple feedback based on the 3 conditions that are needed to reach the panic. To do that, we need a way to keep track of if a condition is satisfied. Now we want to turn our simple fuzzer into a feedback-based one and increase the chance to generate the right input to trigger the panic. We are going to implement a simple feedback based on the 3 conditions that are needed to reach the panic.
**Observer** can record the information about properties of a fuzzing run and then feeds the fuzzer. We use the `StdMapObserver`, the default observer that uses a map to keep track of covered elements. In our fuzzer, each condition is mapped to an entry of such map. To do that, we need a way to keep track of if a condition is satisfied. The component that feeds the fuzzer with information about properties of a fuzzing run, the satisfied conditions in our case, is the Observer. We use the `StdMapObserver`, the default observer that uses a map to keep track of covered elements. In our fuzzer, each condition is mapped to an entry of such map.
We represent such map as a `static mut` variable. We represent such map as a `static mut` variable.
As we don't rely on any instrumentation engine, we have to manually track the satisfied conditions by `singals_set` in our harness: As we don't rely on any instrumentation engine, we have to manually track the satisfied conditions in a map modyfing our tested function:
```rust ```rust
extern crate libafl; extern crate libafl;
use libafl::{ use libafl::{
bolts::AsSlice,
inputs::{BytesInput, HasTargetBytes}, inputs::{BytesInput, HasTargetBytes},
executors::ExitKind, executors::ExitKind,
}; };
@ -246,11 +233,11 @@ fn signals_set(idx: usize) {
let mut harness = |input: &BytesInput| { let mut harness = |input: &BytesInput| {
let target = input.target_bytes(); let target = input.target_bytes();
let buf = target.as_slice(); let buf = target.as_slice();
signals_set(0); // set SIGNALS[0] signals_set(0);
if buf.len() > 0 && buf[0] == 'a' as u8 { if buf.len() > 0 && buf[0] == 'a' as u8 {
signals_set(1); // set SIGNALS[1] signals_set(1);
if buf.len() > 1 && buf[1] == 'b' as u8 { if buf.len() > 1 && buf[1] == 'b' as u8 {
signals_set(2); // set SIGNALS[2] signals_set(2);
if buf.len() > 2 && buf[2] == 'c' as u8 { if buf.len() > 2 && buf[2] == 'c' as u8 {
panic!("=)"); panic!("=)");
} }
@ -281,13 +268,11 @@ let mut executor = InProcessExecutor::new(
.expect("Failed to create the Executor".into()); .expect("Failed to create the Executor".into());
``` ```
Now that the fuzzer can observe which condition is satisfied, we need a way to rate an input as interesting (i.e. worth of addition to the corpus) based on this observation. Here comes the notion of Feedback. Now that the fuzzer can observe which condition is satisfied, we need a way to rate an input as interesting (i.e. worth of addition to the corpus) based on this observation. Here comes the notion of Feedback. The Feedback is part of the State and provides a way to rate input and its corresponding execution as interesting looking for the information in the observers. Feedbacks can maintain a cumulative state of the information seen so far in a so-called FeedbackState instance, in our case it maintains the set of conditions satisfied in the previous runs.
**Feedback** is part of the State and provides a way to rate input and its corresponding execution as interesting looking for the information in the observers. Feedbacks can maintain a cumulative state of the information seen so far in a metadata in the State, in our case it maintains the set of conditions satisfied in the previous runs. We use MaxMapFeedback, a feedback that implements a novelty search over the map of the MapObserver. Basically, if there is a value in the observer's map that is greater than the maximum value registered so far for the same entry, it rates the input as interesting and updates its state.
We use `MaxMapFeedback`, a feedback that implements a novelty search over the map of the MapObserver. Basically, if there is a value in the observer's map that is greater than the maximum value registered so far for the same entry, it rates the input as interesting and updates its state. Feedbacks are used also to decide if an input is a "solution". The feedback that does that is called the Objective Feedback and when it rates an input as interesting it is not saved to the corpus but to the solutions, written in the `crashes` folder in our case. We use the CrashFeedback to tell the fuzzer that if an input causes the program to crash it is a solution for us.
**Objective Feedback** is another kind of Feedback which decide if an input is a "solution". It will save input to solutions(`./crashes` in our case) other than corpus when the input is rated interesting. We use `CrashFeedback` to tell the fuzzer that if an input causes the program to crash it is a solution for us.
We need to update our State creation including the feedback state and the Fuzzer including the feedback and the objective: We need to update our State creation including the feedback state and the Fuzzer including the feedback and the objective:
@ -296,17 +281,20 @@ extern crate libafl;
use libafl::{ use libafl::{
bolts::{current_nanos, rands::StdRand, tuples::tuple_list}, bolts::{current_nanos, rands::StdRand, tuples::tuple_list},
corpus::{InMemoryCorpus, OnDiskCorpus}, corpus::{InMemoryCorpus, OnDiskCorpus},
feedbacks::{MaxMapFeedback, CrashFeedback}, feedbacks::{MapFeedbackState, MaxMapFeedback, CrashFeedback},
fuzzer::StdFuzzer, fuzzer::StdFuzzer,
state::StdState, state::StdState,
observers::StdMapObserver, observers::StdMapObserver,
}; };
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
// Feedback to rate the interestingness of an input // Feedback to rate the interestingness of an input
let mut feedback = MaxMapFeedback::new(&observer); let feedback = MaxMapFeedback::new(&feedback_state, &observer);
// A feedback to choose if an input is a solution or not // A feedback to choose if an input is a solution or not
let mut objective = CrashFeedback::new(); let objective = CrashFeedback::new();
// create a State from scratch // create a State from scratch
let mut state = StdState::new( let mut state = StdState::new(
@ -317,9 +305,10 @@ let mut state = StdState::new(
// Corpus in which we store solutions (crashes in this example), // Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer // on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(), OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
&mut feedback, // States of the feedbacks.
&mut objective // They are the data related to the feedbacks that you want to persist in the State.
).unwrap(); tuple_list!(feedback_state),
);
// ... // ...
@ -331,8 +320,7 @@ let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
Now, after including the correct `use`, we can run the program, but the outcome is not so different from the previous one as the random generator does not take into account what we save as interesting in the corpus. To do that, we need to plug a Mutator. Now, after including the correct `use`, we can run the program, but the outcome is not so different from the previous one as the random generator does not take into account what we save as interesting in the corpus. To do that, we need to plug a Mutator.
**Stages** perform actions on individual inputs, taken from the corpus. Another central component of LibAFL are the Stages, that are actions done on individual inputs taken from the corpus. The MutationalStage mutates the input and executes it several times for instance.
For instance, the `MutationalStage` executes the harness several times in a row, every time with mutated inputs.
As the last step, we create a MutationalStage that uses a mutator inspired by the havoc mutator of AFL. As the last step, we create a MutationalStage that uses a mutator inspired by the havoc mutator of AFL.
@ -377,5 +365,3 @@ Bye!
``` ```
As you can see, after the panic message, the `objectives` count of the log increased by one and you will find the crashing input in `crashes/`. As you can see, after the panic message, the `objectives` count of the log increased by one and you will find the crashing input in `crashes/`.
The complete code can be found in [`./fuzzers/baby_fuzzer`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/baby_fuzzer) alongside other `baby_` fuzzers.

View File

@ -1,12 +0,0 @@
# More Examples
Examples can be found under `./fuzzer`.
|fuzzer name|usage|
| ---- | ---- |
| baby_fuzzer_gramatron | [Gramatron](https://github.com/HexHive/Gramatron) is a fuzzer that uses **grammar automatons** in conjunction with aggressive mutation operators to synthesize complex bug triggers |
| baby_fuzzer_grimoire | [Grimoire](https://www.usenix.org/system/files/sec19-blazytko.pdf) is a fully automated coverage-guided fuzzer which works **without any form of human interaction or pre-configuration** |
| baby_fuzzer_nautilus | [nautilus](https://www.ndss-symposium.org/wp-content/uploads/2019/02/ndss2019_04A-3_Aschermann_paper.pdf) is a **coverage guided, grammar based** fuzzer|
|baby_fuzzer_tokens| basic **token level** fuzzer with token level mutations|
|baby_fuzzer_with_forkexecutor| example for **InProcessForkExecutor**|
|baby_no_std|a minimalistic example how to create a libafl based fuzzer that works on **`no_std`** environments like TEEs, Kernels or on barew metal|

View File

@ -2,7 +2,7 @@
The Corpus is where testcases are stored. We define a Testcase as an Input and a set of related metadata like execution time for instance. The Corpus is where testcases are stored. We define a Testcase as an Input and a set of related metadata like execution time for instance.
A Corpus can store testcases in different ways, for example on disk, or in memory, or implement a cache to speedup on disk storage. A Corpus can store testcases in diferent ways, for example on disk, or in memory, or implement a cache to speedup on disk storage.
Usually, a testcase is added to the Corpus when it is considered as interesting, but a Corpus is used also to store testcases that fulfill an objective (like crashing the tested program for instance). Usually, a testcase is added to the Corpus when it is considered as interesting, but a Corpus is used also to store testcases that fulfill an objective (like crashing the tested program for instance).

View File

@ -11,60 +11,47 @@ In our model, it can also hold a set of Observers connected with each execution.
In Rust, we bind this concept to the [`Executor`](https://docs.rs/libafl/0/libafl/executors/trait.Executor.html) trait. A structure implementing this trait must implement [`HasObservers`](https://docs.rs/libafl/0/libafl/executors/trait.HasObservers.html) too if wants to hold a set of Observers. In Rust, we bind this concept to the [`Executor`](https://docs.rs/libafl/0/libafl/executors/trait.Executor.html) trait. A structure implementing this trait must implement [`HasObservers`](https://docs.rs/libafl/0/libafl/executors/trait.HasObservers.html) too if wants to hold a set of Observers.
By default, we implement some commonly used Executors such as [`InProcessExecutor`](https://docs.rs/libafl/0/libafl/executors/inprocess/struct.InProcessExecutor.html) in which the target is a harness function providing in-process crash detection. Another Executor is the [`ForkserverExecutor`](https://docs.rs/libafl/0/libafl/executors/forkserver/struct.ForkserverExecutor.html) that implements an AFL-like mechanism to spawn child processes to fuzz. By default, we implement some commonly used Executors such as [`InProcessExecutor`](https://docs.rs/libafl/0/libafl/executors/inprocess/struct.InProcessExecutor.html) is which the target is a harness function providing in-process crash detection. Another Executor is the [`ForkserverExecutor`](https://docs.rs/libafl/0/libafl/executors/forkserver/struct.ForkserverExecutor.html) that implements an AFL-like mechanism to spawn child processes to fuzz.
A common pattern when creating an Executor is wrapping an existing one, for instance [`TimeoutExecutor`](https://docs.rs/libafl/0.6.1/libafl/executors/timeout/struct.TimeoutExecutor.html) wraps an executor and install a timeout callback before calling the original run function of the wrapped executor. A common pattern when creating an Executor is wrapping an existing one, for instance [`TimeoutExecutor`](https://docs.rs/libafl/0.6.1/libafl/executors/timeout/struct.TimeoutExecutor.html) wraps an executor and install a timeout callback before calling the original run function of the wrapped executor.
## InProcessExecutor ## InProcessExecutor
Let's begin with the base case; `InProcessExecutor`. Let's begin with the base case; `InProcessExecutor`.
This executor executes the harness program (function) inside the fuzzer process. This executor uses [_SanitizerCoverage_](https://clang.llvm.org/docs/SanitizerCoverage.html) as its backend, as you can find the related code in `libafl_targets/src/sancov_pcguards`. Here we allocate a map called `EDGES_MAP` and then our compiler wrapper compiles the harness to write the coverage into this map.
When you want to execute the harness as fast as possible, you will most probably want to use this `InprocessExecutor`.
When you want to execute the harness as fast as possible, you will most probably want to use this `InprocessExecutor`.
One thing to note here is, when your harness is likely to have heap corruption bugs, you want to use another allocator so that corrupted heap does not affect the fuzzer itself. (For example, we adopt MiMalloc in some of our fuzzers.). Alternatively you can compile your harness with address sanitizer to make sure you can catch these heap bugs. One thing to note here is, when your harness is likely to have heap corruption bugs, you want to use another allocator so that corrupted heap does not affect the fuzzer itself. (For example, we adopt MiMalloc in some of our fuzzers.). Alternatively you can compile your harness with address sanitizer to make sure you can catch these heap bugs.
## ForkserverExecutor ## ForkserverExecutor
Next, we'll take a look at the `ForkserverExecutor`. In this case, it is `afl-cc` (from AFLplusplus/AFLplusplus) that compiles the harness code, and therefore, we can't use `EDGES_MAP` anymore. Hopefully, we have [_a way_](https://github.com/AFLplusplus/AFLplusplus/blob/2e15661f184c77ac1fbb6f868c894e946cbb7f17/instrumentation/afl-compiler-rt.o.c#L270) to tell the forkserver which map to record the coverage. Next, we'll take a look at the `ForkserverExecutor`. In this case, it is `afl-cc` (from AFLplusplus/AFLplusplus) that compiles the harness code, and therefore, we can't use `EDGES_MAP` anymore. Hopefully, we have [_a way_](https://github.com/AFLplusplus/AFLplusplus/blob/2e15661f184c77ac1fbb6f868c894e946cbb7f17/instrumentation/afl-compiler-rt.o.c#L270) to tell the forkserver which map to record the coverage.
As you can see from the forkserver example, As you can see from the forkserver example,
```rust,ignore ```rust,ignore
//Coverage map shared between observer and executor //Coverage map shared between observer and executor
let mut shmem = StdShMemProvider::new().unwrap().new_shmem(MAP_SIZE).unwrap(); let mut shmem = StdShMemProvider::new().unwrap().new_map(MAP_SIZE).unwrap();
//let the forkserver know the shmid //let the forkserver know the shmid
shmem.write_to_env("__AFL_SHM_ID").unwrap(); shmem.write_to_env("__AFL_SHM_ID").unwrap();
let mut shmem_buf = shmem.as_mut_slice(); let mut shmem_map = shmem.map_mut();
``` ```
Here we make a shared memory region; `shmem`, and write this to environmental variable `__AFL_SHM_ID`. Then the instrumented binary, or the forkserver, finds this shared memory region (from the aforementioned env var) to record its coverage. On your fuzzer side, you can pass this shmem map to your `Observer` to obtain coverage feedbacks combined with any `Feedback`. Here we make a shared memory region; `shmem`, and write this to environmental variable `__AFL_SHM_ID`. Then the instrumented binary, or the forkserver, finds this shared memory region (from the aforementioned env var) to record its coverage. On your fuzzer side, you can pass this shmem map to your `Observer` to obtain coverage feedbacks combined with any `Feedback`.
Another feature of the `ForkserverExecutor` to mention is the shared memory testcases. In normal cases, the mutated input is passed between the forkserver and the instrumented binary via `.cur_input` file. You can improve your forkserver fuzzer's performance by passing the input with shared memory. Another feature of the `ForkserverExecutor` to mention is the shared memory testcases. In normal cases, the mutated input is passed between the forkserver and the instrumented binary via `.cur_input` file. You can improve your forkserver fuzzer's performance by passing the input with shared memory.
See AFL++'s [_documentation_](https://github.com/AFLplusplus/AFLplusplus/blob/stable/instrumentation/README.persistent_mode.md#5-shared-memory-fuzzing) or the fuzzer example in `forkserver_simple/src/program.c` for reference.
If the target is configured to use shared memory testcases, the `ForkserverExecutor` will notice this during the handshake and will automatically set up things accordingly. It is very simple, when you call `ForkserverExecutor::new()` with `use_shmem_testcase` true, the `ForkserverExecutor` sets things up and your harness can just fetch the input from `__AFL_FUZZ_TESTCASE_BUF`
See AFL++'s [_documentation_](https://github.com/AFLplusplus/AFLplusplus/blob/stable/instrumentation/README.persistent_mode.md#5-shared-memory-fuzzing) or the fuzzer example in `forkserver_simple/src/program.c` for reference.
## InprocessForkExecutor ## InprocessForkExecutor
Finally, we'll talk about the `InProcessForkExecutor`. Finally, we'll talk about the `InProcessForkExecutor`.
`InProcessForkExecutor` has only one difference from `InprocessExecutor`; It forks before running the harness and that's it. `InProcessForkExecutor` has only one difference from `InprocessExecutor`; It forks before running the harness and that's it.
But why do we want to do so? well, under some circumstances, you may find your harness pretty unstable or your harness wreaks havoc on the global states. In this case, you want to fork it before executing the harness runs in the child process so that it doesn't break things.
But why do we want to do so? well, under some circumstances, you may find your harness pretty unstable or your harness wreaks havoc on the global states. In this case, you want to fork it before executing the harness runs in the child process so that it doesn't break things. However, we have to take care of the shared memory, it's the child process that runs the harness code and writes the coverage to the map.
We have to make the map shared between the parent process and the child process, so we'll use shared memory again. You should compile your harness with `pointer_maps` (for `libafl_targes`) features enabled, this way, we can have a pointer; `EDGES_MAP_PTR` that can point to any coverage map.
However, we have to take care of the shared memory, it's the child process that runs the harness code and writes the coverage to the map.
We have to make the map shared between the parent process and the child process, so we'll use shared memory again. You should compile your harness with `pointer_maps` (for `libafl_targets`) features enabled, this way, we can have a pointer; `EDGES_MAP_PTR` that can point to any coverage map.
On your fuzzer side, you can allocate a shared memory region and make the `EDGES_MAP_PTR` point to your shared memory. On your fuzzer side, you can allocate a shared memory region and make the `EDGES_MAP_PTR` point to your shared memory.
```rust,ignore ```rust,ignore
let mut shmem; let mut shmem;
unsafe{ unsafe{
shmem = StdShMemProvider::new().unwrap().new_shmem(MAX_EDGES_NUM).unwrap(); shmem = StdShMemProvider::new().unwrap().new_map(MAX_EDGES_NUM).unwrap();
} }
let shmem_buf = shmem.as_mut_slice(); let shmem_map = shmem.map_mut();
unsafe{ unsafe{
EDGES_PTR = shmem_buf.as_ptr(); EDGES_PTR = shmem_map.as_ptr();
} }
``` ```
Again, you can pass this shmem map to your `Observer` and `Feedback` to obtain coverage feedbacks. Again, you can pass this shmem map to your `Observer` and `Feedback` to obtain coverage feedbacks.

View File

@ -3,7 +3,7 @@
The Feedback is an entity that classifies the outcome of an execution of the program under test as interesting or not. The Feedback is an entity that classifies the outcome of an execution of the program under test as interesting or not.
Typically, if an execution is interesting, the corresponding input used to feed the target program is added to a corpus. Typically, if an execution is interesting, the corresponding input used to feed the target program is added to a corpus.
Most of the time, the notion of Feedback is deeply linked to the Observer, but they are different concepts. Most of the times, the notion of Feedback is deeply linked to the Observer, but they are different concepts.
The Feedback, in most of the cases, processes the information reported by one or more observers to decide if the execution is interesting. The Feedback, in most of the cases, processes the information reported by one or more observers to decide if the execution is interesting.
The concept of "interestingness" is abstract, but typically it is related to a novelty search (i.e. interesting inputs are those that reach a previously unseen edge in the control flow graph). The concept of "interestingness" is abstract, but typically it is related to a novelty search (i.e. interesting inputs are those that reach a previously unseen edge in the control flow graph).
@ -11,16 +11,8 @@ The concept of "interestingness" is abstract, but typically it is related to a n
As an example, given an Observer that reports all the sizes of memory allocations, a maximization Feedback can be used to maximize these sizes to sport pathological inputs in terms of memory consumption. As an example, given an Observer that reports all the sizes of memory allocations, a maximization Feedback can be used to maximize these sizes to sport pathological inputs in terms of memory consumption.
In terms of code, the library offers the [`Feedback`](https://docs.rs/libafl/0/libafl/feedbacks/trait.Feedback.html) and the [`FeedbackState`](https://docs.rs/libafl/0/libafl/feedbacks/trait.FeedbackState.html) traits. In terms of code, the library offers the [`Feedback`](https://docs.rs/libafl/0/libafl/feedbacks/trait.Feedback.html) and the [`FeedbackState`](https://docs.rs/libafl/0/libafl/feedbacks/trait.FeedbackState.html) traits.
The first is used to implement functors that, given the state of the observers from the last execution, tells if the execution was interesting. The second is tied with `Feedback` and it is the state of the data that the feedback wants to persist in the fuzzers's state, for instance the cumulative map holding all the edges seen so far in the case of a feedback based on edge coverage. The first is used to implement functors that, given the state of the obversers from the last execution, tells if the execution was interesting. The second is tied with `Feedback` and it is the state of the data that the feedback wants to persist in the fuzzers's state, for instance the cumulative map holding all the edges seen so far in the case of a feedback based on edge coverage.
Multiple Feedbacks can be combined into boolean formula, considering for instance an execution as interesting if it triggers new code paths or execute in less time compared to the average execution time using [`feedback_or`](https://docs.rs/libafl/*/libafl/macro.feedback_or.html). Multiple Feedbacks can be combined into boolean formula, considering for instance an execution as interesting if it triggers new code paths or execute in less time compared to the average execution time using [`feedback_or`](https://docs.rs/libafl/0/libafl/macro.feedback_or.html).
On top, logic operators like `feedback_or` and `feedback_and` have a `_fast` option (`feedback_or_fast` where the second feedback will not be evaluated, if the first part already answers the `interestingness` question, to save precious performance. TODO objective feedbacks and fast feedback logic operators
Using `feedback_and_fast` in combination with [`ConstFeedback`](https://docs.rs/libafl/*/libafl/feedbacks/enum.ConstFeedback.html#method.new), certain feedbacks can be disabled dynamically.
## Objectives
While feedbacks are commonly used to decide if an [`Input`](https://docs.rs/libafl/*/libafl/inputs/trait.Input.html) should be kept for future mutations, they serve a double-purpose, as so-called `Objective Feedbacks`.
In this case, the `interestingness` of a feedback indicates, if an `Objective` has been hit.
Commonly, these would be a`crash or a timeout, but they can also be used to find specific parts of the program, for sanitization, or a differential fuzzing success.

View File

@ -1,6 +1,6 @@
# Input # Input
Formally, the input of a program is the data taken from external sources that affect the program behavior. Formally, the input of a program is the data taken from external sources that affect the program behaviour.
In our model of an abstract fuzzer, we define the Input as the internal representation of the program input (or a part of it). In our model of an abstract fuzzer, we define the Input as the internal representation of the program input (or a part of it).
@ -10,6 +10,4 @@ But it is not always the case. A program can expect inputs that are not byte arr
In case of a grammar fuzzer for instance, the Input is generally an Abstract Syntax Tree because it is a data structure that can be easily manipulated while maintaining the validity, but the program expects a byte array as input, so just before the execution, the tree is serialized to a sequence of bytes. In case of a grammar fuzzer for instance, the Input is generally an Abstract Syntax Tree because it is a data structure that can be easily manipulated while maintaining the validity, but the program expects a byte array as input, so just before the execution, the tree is serialized to a sequence of bytes.
In the Rust code, an [`Input`](https://docs.rs/libafl/*/libafl/inputs/trait.Input.html) is a trait that can be implemented only by structures that are serializable and have only owned data as fields. In the Rust code, an [`Input`](https://docs.rs/libafl/0/libafl/inputs/trait.Input.html) is a trait that can be implemented only by structures that are serializable and have only owned data as fields.
While most fuzzer use a normal `BytesInput`], more advanced inputs like inputs include special inputs for grammar fuzzing ([GramatronInput](https://docs.rs/libafl/*/libafl/inputs/gramatron/struct.GramatronInput.html) or `NautilusInput` on nightly), as well as the token-level [EncodedInput](https://docs.rs/libafl/*/libafl/inputs/encoded/struct.EncodedInput.html).

View File

@ -2,8 +2,8 @@
The Mutator is an entity that takes one or more Inputs and generates a new derived one. The Mutator is an entity that takes one or more Inputs and generates a new derived one.
Mutators can be composed, and they are generally linked to a specific Input type. Mutators can be composed and they are generally linked to a specific Input type.
There can be, for instance, a Mutator that applies more than a single type of mutation on the input. Consider a generic Mutator for a byte stream, bit flip is just one of the possible mutations but not the only one, there is also, for instance, the random replacement of a byte of the copy of a chunk. There can be, for instance, a Mutator that applies more than a single type of mutation on the input. Consider a generic Mutator for a byte stream, bit flip is just one of the possible mutations but not the only one, there is also, for instance, the random replacement of a byte of the copy of a chunk.
In LibAFL, [`Mutator`](https://docs.rs/libafl/*/libafl/mutators/trait.Mutator.html) is a trait. In LibAFL, [`Mutator`](https://docs.rs/libafl/0/libafl/mutators/trait.Mutator.html) is a trait.

View File

@ -1,14 +1,12 @@
# Observer # Observer
An Observer is an entity that provides an information observed during the execution of the program under test to the fuzzer. An Observer, or Observation Channel, is an entity that provides an information observed during the execution of the program under test to the fuzzer.
The information contained in the Observer is not preserved across executions, but it may be serialized and passed on to other nodes if an `Input` is considered `interesting`, and added to the `Corpus`. The information contained in the Observer is not preserved across executions.
As an example, the coverage map, filled during the execution to report the executed edges used by fuzzers such as AFL and `HonggFuzz` can be considered an observation. Another `Observer` can be the time spent executing a run, the program output, or more advanced observation, like maximum stack depth at runtime. As an example, the coverage shared map filled during the execution to report the executed edges used by fuzzers such as AFL and HonggFuzz can be considered an Observation Channel.
This information is not preserved across runs, and it is an observation of a dynamic property of the program. This information is not preserved across runs and it is an observation of a dynamic property of the program.
In terms of code, in the library this entity is described by the [`Observer`](https://docs.rs/libafl/0/libafl/observers/trait.Observer.html) trait. In terms of code, in the library this entity is described by the [`Observer`](https://docs.rs/libafl/0/libafl/observers/trait.Observer.html) trait.
In addition to holding the volatile data connected with the last execution of the target, the structures implementing this trait can define some execution hooks that are executed before and after each fuzz case. In these hooks, the observer can modify the fuzzer's state. In addition to holding the volatile data connected with the last execution of the target, the structures implementing this trait can define some execution hooks that are executed before and after each fuzz case. In this hooks, the observer can modify the fuzzer's state.
The fuzzer will act based on these observers through a [`Feedback`](./feedback.md), that reduces the observation to the choice if a testcase is `interesting` for the fuzzer, or not.

View File

@ -6,4 +6,4 @@ For instance, a Mutational Stage, given an input of the corpus, applies a Mutato
A stage can also be an analysis stage, for instance, the Colorization stage of Redqueen that aims to introduce more entropy in a testcase or the Trimming stage of AFL that aims to reduce the size of a testcase. A stage can also be an analysis stage, for instance, the Colorization stage of Redqueen that aims to introduce more entropy in a testcase or the Trimming stage of AFL that aims to reduce the size of a testcase.
There are several stages in the LibAFL codebase implementing the [`Stage`](https://docs.rs/libafl/*/libafl/stages/trait.Stage.html) trait. There are several stages in the LibAFL codebases implementing the [`Stage`](https://docs.rs/libafl/0/libafl/stages/trait.Stage.html) trait.

View File

@ -2,14 +2,14 @@
The LibAFL architecture is built around some entities to allow code reuse and low-cost abstractions. The LibAFL architecture is built around some entities to allow code reuse and low-cost abstractions.
Initially, we started thinking about implementing LibAFL in a traditional Object-Oriented language, like C++. When we switched to Rust, we immediately changed our idea as we realized that, we can build the library using a more rust-y approach, namely the one described in [this blogpost](https://kyren.github.io/2018/09/14/rustconf-talk.html) about game design in Rust. Initially, we started thinking to implement LibAFL in an Object Oriented language, such C++. When we landed to Rust, we immediately changed our idea as we realized that, while Rust allows a sort of OOP pattern, we can build the library using a more sane approach like the one described in [this blogpost](https://kyren.github.io/2018/09/14/rustconf-talk.html) about game design in Rust.
The LibAFL code reuse mechanism is based on components, rather than sub-classes, but there are still some OOP patterns in the library. The LibAFL code reuse meachanism is so based on components rather than sub-classes, but there are still some OOP patterns in the library.
Thinking about similar fuzzers, you can observe that most of the time the data structures that are modified are the ones related to testcases and the fuzzer global state. Thinking about similar fuzzers, you can observe that most of the times the data structures that are modified are the ones related to testcases and the fuzzer global state.
Beside the entities previously described, we introduce the [`Testcase`](https://docs.rs/libafl/0.6/libafl/corpus/testcase/struct.Testcase.html) and [`State`](https://docs.rs/libafl/0.6/libafl/state/struct.StdState.html) entities. The Testcase is a container for an Input stored in the Corpus and its metadata (so, in the implementation, the Corpus stores Testcases) and the State contains all the metadata that are evolved while running the fuzzer, Corpus included. Beside the entities previously described, we introduce the [`Testcase`](https://docs.rs/libafl/0.6/libafl/corpus/testcase/struct.Testcase.html) and [`State`](https://docs.rs/libafl/0.6/libafl/state/struct.StdState.html) entities. The Testcase is a container for an Input stored in the Corpus and its metadata (so, in the implementation, the Corpus stores Testcases) and the State contains all the metadata that are evolved while running the fuzzer, Corpus included.
The State, in the implementation, contains only owned objects that are serializable, and it is serializable itself. Some fuzzers may want to serialize its state when pausing or just, when doing in-process fuzzing, serialize on crash and deserialize in the new process to continue to fuzz with all the metadata preserved. The State, in the implementation, contains only owned objects that are serializable and it is serializable itself. Some fuzzers may want to serialize its state when pausing or just, when doing in-process fuzzing, serialize on crash and deserialize in the new process to continue to fuzz with all the metadata preserved.
Additionally, we group the entities that are "actions", like the `CorpusScheduler` and the `Feedbacks`, in a common place, the [`Fuzzer'](https://docs.rs/libafl/*/libafl/fuzzer/struct.StdFuzzer.html). Additionally, we group the entities that are "actions", like the CorpusScheduler and the Feedbacks, in a common place, the [`Fuzzer'](https://docs.rs/libafl/0.6.1/libafl/fuzzer/struct.StdFuzzer.html).

View File

@ -1,6 +1,6 @@
# Metadata # Metadata
A metadata in LibAFL is a self-contained structure that holds associated data to the State or to a Testcase. A metadata in LibAFL is a self contained structure that holds associated data to the State or to a Testcase.
In terms of code, a metadata can be defined as a Rust struct registered in the SerdeAny register. In terms of code, a metadata can be defined as a Rust struct registered in the SerdeAny register.

View File

@ -1,162 +0,0 @@
# Migrating from LibAFL <0.9 to 0.9
Internal APIs of LibAFL have changed in version 0.9 to prefer associated types in cases where components were "fixed" to
particular versions of other components. As a result, many existing custom components will not be compatible between
versions prior to 0.9 and version 0.9.
## Reasons for this change
When implementing a trait with a generic, it is possible to have more than one instantiation of that generic trait. As a
result, everywhere where consistency across generic types was required to implement a trait, it needed to be properly
and explicitly constrained at every point. This led to `impl`s which were at best difficult to debug and, at worst,
incorrect and caused confusing bugs for users.
For example, consider the `MapCorpusMinimizer` implementation (from <0.9) below:
```rust,ignore
impl<E, I, O, S, TS> CorpusMinimizer<I, S> for MapCorpusMinimizer<E, I, O, S, TS>
where
E: Copy + Hash + Eq,
I: Input,
for<'a> O: MapObserver<Entry = E> + AsIter<'a, Item = E>,
S: HasMetadata + HasCorpus<I>,
TS: TestcaseScore<I, S>,
{
fn minimize<CS, EX, EM, OT, Z>(
&self,
fuzzer: &mut Z,
executor: &mut EX,
manager: &mut EM,
state: &mut S,
) -> Result<(), Error>
where
CS: Scheduler<I, S>,
EX: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
EM: EventManager<EX, I, S, Z>,
OT: ObserversTuple<S>,
Z: Evaluator<EX, EM, I, S> + HasScheduler<CS, I, S>,
{
// --- SNIP ---
}
}
```
It was previously necessary to constrain every generic using a slew of other generics; above, it is necessary to
constrain the input type (`I`) for every generic, despite the fact that this was already made clear by the state (`S`)
and that the input will necessarily be the same over every implementation for that type.
Below is the same code, but with the associated types changes (note that some generic names have changed):
```rust,ignore
impl<E, O, T, TS> CorpusMinimizer<E> for MapCorpusMinimizer<E, O, T, TS>
where
E: UsesState,
for<'a> O: MapObserver<Entry = T> + AsIter<'a, Item = T>,
E::State: HasMetadata + HasCorpus,
T: Copy + Hash + Eq,
TS: TestcaseScore<E::State>,
{
fn minimize<CS, EM, Z>(
&self,
fuzzer: &mut Z,
executor: &mut E,
manager: &mut EM,
state: &mut E::State,
) -> Result<(), Error>
where
E: Executor<EM, Z> + HasObservers,
CS: Scheduler<State=E::State>,
EM: UsesState<State=E::State>,
Z: HasScheduler<CS, State=E::State>,
{
// --- SNIP ---
}
}
```
The executor is constrained to `EM` and `Z`, with each of their respective states being constrained to `E`'s state. It
is no longer necessary to explicitly defined a generic for the input type, the state type, or the generic type, as these
are all present as associated types for `E`. Additionally, we don't even need to specify any details about the observers
(`OT` in the previous version) as the type does not need to be constrained and is not shared by other types.
## Scope
You are affected by this change if:
- You specified explicit generics for a type (e.g., `MaxMapFeedback::<_, (), _>::new(...)`)
- You implemented a custom component (e.g., `Mutator`, `Executor`, `State`, `Fuzzer`, `Feedback`, `Observer`, etc.)
If you did neither of these, congrats! You are likely unaffected by these changes.
### Migrating explicit generics
Migrating specific generics should be a quite simple process; you should review the API documentation for details on the
order of generics and replace them accordingly. Generally speaking, it should no longer be necessary to specify these
generics.
See `fuzzers/` for examples of these changes.
### Migrating component types
If you implemented a Mutator, Executor, State, or another kind of component, you must update your implementation. The
main changes to the API are in the use of "Uses*" for associated types.
In many scenarios, Input, Observers, and State generics have been moved into traits with associated types (namely,
"UsesInput", "UsesObservers", and "UsesState". These traits are required for many existing traits now and are very
straightforward to implement. In a majority of cases, you will have generics on your custom implementation or a fixed
type to implement this with. Thankfully, Rust will let you know when you need to implement this type.
As an example, `InMemoryCorpus` before 0.9 looked like this:
```rust,ignore
#[derive(Default, Serialize, Deserialize, Clone, Debug)]
#[serde(bound = "I: serde::de::DeserializeOwned")]
pub struct InMemoryCorpus<I>
where
I: Input,
{
entries: Vec<RefCell<Testcase<I>>>,
current: Option<usize>,
}
impl<I> Corpus<I> for InMemoryCorpus<I>
where
I: Input,
{
// --- SNIP ---
}
```
After 0.9, all `Corpus` implementations are required to implement `UsesInput` and `Corpus` no longer has a generic for
the input type (as it is now provided by the UsesInput impl). The migrated implementation is shown below:
```rust,ignore
#[derive(Default, Serialize, Deserialize, Clone, Debug)]
#[serde(bound = "I: serde::de::DeserializeOwned")]
pub struct InMemoryCorpus<I>
where
I: Input,
{
entries: Vec<RefCell<Testcase<I>>>,
current: Option<usize>,
}
impl<I> UsesInput for InMemoryCorpus<I>
where
I: Input,
{
type Input = I;
}
impl<I> Corpus for InMemoryCorpus<I>
where
I: Input,
{
// --- SNIP ---
}
```
Now, `Corpus` cannot be accidentally implemented for another type other than that specified by `InMemoryCorpus`, as it
is fixed to the associated type for `UsesInput`.
A more complex example of migration can be found in the "Reasons for this change" section of this document.

View File

@ -8,8 +8,6 @@ A crate is an individual library in Rust's Cargo build system, that you can use
libafl = { version = "*" } libafl = { version = "*" }
``` ```
## Crate List
For LibAFL, each crate has its self-contained purpose, and the user may not need to use all of them in its project. For LibAFL, each crate has its self-contained purpose, and the user may not need to use all of them in its project.
Following the naming convention of the folders in the project's root, they are: Following the naming convention of the folders in the project's root, they are:
@ -21,13 +19,13 @@ This crate has a number of feature flags that enable and disable certain aspects
The features can be found in [LibAFL's `Cargo.toml`](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/Cargo.toml) under "`[features]`", and are usually explained with comments there. The features can be found in [LibAFL's `Cargo.toml`](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/Cargo.toml) under "`[features]`", and are usually explained with comments there.
Some features worthy of remark are: Some features worthy of remark are:
- `std` enables the parts of the code that use the Rust standard library. Without this flag, LibAFL is `no_std` compatible. This disables a range of features, but allows us to use LibAFL in embedded environments, read [the `no_std` section](../advanced_features/no_std.md) for further details. - `std` enables the parts of the code that use the Rust standard library. Without this flag, LibAFL is `no_std` compatible. This disables a range of features, but allows us to use LibAFL in embedded environments, read [the `no_std` section](../advanced_features/no_std/no_std.md) for further details.
- `derive` enables the usage of the `derive(...)` macros defined in libafl_derive from libafl. - `derive` enables the usage of the `derive(...)` macros defined in libafl_derive from libafl.
- `rand_trait` allows you to use LibAFL's very fast (*but insecure!*) random number generator wherever compatibility with Rust's [`rand` crate](https://crates.io/crates/rand) is needed. - `rand_trait` allows you to use LibAFL's very fast (*but insecure!*) random number generator wherever compatibility with Rust's [`rand` crate](https://crates.io/crates/rand) is needed.
- `llmp_bind_public` makes LibAFL's LLMP bind to a public TCP port, over which other fuzzers nodes can communicate with this instance. - `llmp_bind_public` makes LibAFL's LLMP bind to a public TCP port, over which other fuzzers nodes can communicate with this instance.
- `introspection` adds performance statistics to LibAFL. - `introspection` adds performance statistics to LibAFL.
You can choose the features by using `features = ["feature1", "feature2", ...]` for LibAFL in your `Cargo.toml`. You can chose the features by using `features = ["feature1", "feature2", ...]` for LibAFL in your `Cargo.toml`.
Out of this list, by default, `std`, `derive`, and `rand_trait` are already set. Out of this list, by default, `std`, `derive`, and `rand_trait` are already set.
You can choose to disable them by setting `default-features = false` in your `Cargo.toml`. You can choose to disable them by setting `default-features = false` in your `Cargo.toml`.
@ -54,7 +52,7 @@ Currently, the supported flags are:
- `pcguard_edges` defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges in a map. - `pcguard_edges` defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges in a map.
- `pcguard_hitcounts defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges with the hitcounts (like AFL) in a map. - `pcguard_hitcounts defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges with the hitcounts (like AFL) in a map.
- `libfuzzer` exposes a compatibility layer with libFuzzer style harnesses. - `libfuzzer` exposes a compatibility layer with libFuzzer style harnesses.
- `value_profile` defines the SanitizerCoverage trace-cmp hooks to track the matching bits of each comparison in a map. - `value_profile` defines the SanitizerCoverage trace-cmp hooks to track the matching bits of each comparison in a map.
### libafl_cc ### libafl_cc
@ -66,9 +64,10 @@ To understand it deeper, look through the tutorials and examples.
### libafl_frida ### libafl_frida
This library bridges LibAFL with Frida as instrumentation backend. This library bridges LibAFL with Frida as instrumentation backend.
With this crate, you can instrument targets on Linux/macOS/Windows/Android for coverage collection. With this crate, you can instrument targets on Linux/macOS/Windows/Android for coverage collection.
Additionally, it supports CmpLog, and AddressSanitizer instrumentation and runtimes for aarch64. Additionally, it supports CmpLog, and AddressSanitizer instrumentation and runtimes for aarch64.
See further information, as well as usage instructions, [later in the book](../advanced_features/frida.md).
### libafl_qemu ### libafl_qemu
@ -76,13 +75,3 @@ This library bridges LibAFL with QEMU user-mode to fuzz ELF cross-platform binar
It works on Linux and can collect edge coverage without collisions! It works on Linux and can collect edge coverage without collisions!
It also supports a wide range of hooks and instrumentation options. It also supports a wide range of hooks and instrumentation options.
### libafl_nyx
[Nyx](https://nyx-fuzz.com/) is a KVM-based snapshot fuzzer. `libafl_nyx` adds these capabilities to LibAFL. There is a specific section explaining usage of libafl_nyx [later in the book](../advanced_features/nyx.md).
### libafl_concolic
Concolic fuzzing is the combination of fuzzing and a symbolic execution engine.
This can reach greater depth than normal fuzzing, and is exposed in this crate.
There is a specific section explaining usage of libafl_concolic [later in the book](../advanced_features/concolic.md).

View File

@ -1,5 +1,5 @@
# Getting Started # Getting Started
To get started with LibAFL, there are some initial steps to take. To get startes with LibAFL, there are some initial steps to do.
In this chapter, we discuss how to download and build LibAFL, using Rust's `cargo` command. In this chapter, we discuss how to download and build LibAFL, using Rust's `cargo` command.
We also describe the structure of LibAFL's components, so-called crates, and the purpose of each individual crate. We also describe the structure of LibAFL's components, so-called crates, and the purpose of each individual crate.

View File

@ -22,7 +22,7 @@ $ git clone git@github.com:AFLplusplus/LibAFL.git
You can alternatively, on a UNIX-like machine, download a compressed archive and extract it with: You can alternatively, on a UNIX-like machine, download a compressed archive and extract it with:
```sh ```sh
wget https://github.com/AFLplusplus/LibAFL/archive/main.tar.gz $ wget https://github.com/AFLplusplus/LibAFL/archive/main.tar.gz
$ tar xvf LibAFL-main.tar.gz $ tar xvf LibAFL-main.tar.gz
$ rm LibAFL-main.tar.gz $ rm LibAFL-main.tar.gz
$ ls LibAFL-main # this is the extracted folder $ ls LibAFL-main # this is the extracted folder

View File

@ -18,7 +18,6 @@ Be it a specific target, a particular instrumentation backend, or a custom mutat
LibAFL gives you many of the benefits of an off-the-shelf fuzzer, while being completely customizable. LibAFL gives you many of the benefits of an off-the-shelf fuzzer, while being completely customizable.
Some highlight features currently include: Some highlight features currently include:
- `multi platform`: LibAFL works pretty much anywhere you can find a Rust compiler for. We already used it on *Windows*, *Android*, *MacOS*, and *Linux*, on *x86_64*, *aarch64*, ... - `multi platform`: LibAFL works pretty much anywhere you can find a Rust compiler for. We already used it on *Windows*, *Android*, *MacOS*, and *Linux*, on *x86_64*, *aarch64*, ...
- `portable`: `LibAFL` can be built in `no_std` mode. - `portable`: `LibAFL` can be built in `no_std` mode.
This means it does not require a specific OS-dependent runtime to function. This means it does not require a specific OS-dependent runtime to function.
@ -31,4 +30,4 @@ Scaling to multiple machines over TCP is also possible, using LLMP's `broker2bro
- `fast`: We do everything we can at compile time so that the runtime overhead is as minimal as it can get. - `fast`: We do everything we can at compile time so that the runtime overhead is as minimal as it can get.
- `bring your own target`: We support binary-only modes, like QEMU-Mode and Frida-Mode with ASAN and CmpLog, as well as multiple compilation passes for sourced-based instrumentation. - `bring your own target`: We support binary-only modes, like QEMU-Mode and Frida-Mode with ASAN and CmpLog, as well as multiple compilation passes for sourced-based instrumentation.
Of course, we also support custom instrumentation, as you can see in the Python example based on Google's Atheris. Of course, we also support custom instrumentation, as you can see in the Python example based on Google's Atheris.
- `usable`: This one is on you to decide. Dig right in! - `usable`: This one is on you to decide. Dig right in!

View File

@ -12,4 +12,4 @@ This version of the LibAFL book is coupled with the release 1.0 beta of the libr
This document is still work-in-progress and incomplete. The structure and the concepts explained here are subject to change in future revisions, as the structure of LibAFL itself will evolve. This document is still work-in-progress and incomplete. The structure and the concepts explained here are subject to change in future revisions, as the structure of LibAFL itself will evolve.
The HTML version of this book is available online at [https://aflplus.plus/libafl-book/](https://aflplus.plus/libafl-book/) and offline from the LibAFL repository in the `docs/` folder. The HTML version of this book is available online at [https://aflplus.plus/libafl-book/](https://aflplus.plus/libafl-book/) and offline from the LibAFL repository in the `docs/` folder.
Build it using `mdbook build` in this folder, or run `mdbook serve` to view the book. Build it using `mdbook build` in this folder, or run `mdbook serve` to view the book.

View File

@ -5,5 +5,6 @@ The chapter describes how to run nodes with different configurations
in one fuzzing cluster. in one fuzzing cluster.
This allows, for example, a node compiled with ASAN, to know that it needs to rerun new testcases for a node without ASAN, while the same binary/configuration does not. This allows, for example, a node compiled with ASAN, to know that it needs to rerun new testcases for a node without ASAN, while the same binary/configuration does not.
Fuzzers with the same configuration can exchange Observers for new testcases and reuse them without rerunning the input. > ## Under Construction!
A different configuration indicates, that only the raw input can be exchanged, it must be rerun on the other node to capture relevant observations. > This section is under construction.
> Please check back later (or open a PR)

View File

@ -21,22 +21,21 @@ The broker can also intercept and filter the messages it receives instead of for
A common use-case for messages filtered by the broker are the status messages sent from each client to the broker directly. A common use-case for messages filtered by the broker are the status messages sent from each client to the broker directly.
The broker used this information to paint a simple UI, with up-to-date information about all clients, however the other clients don't need to receive this information. The broker used this information to paint a simple UI, with up-to-date information about all clients, however the other clients don't need to receive this information.
### Speedy Local Messages via Shared Memory ### Speedy Local Messages via Shared Maps
Throughout LibAFL, we use a wrapper around different operating system's shared maps, called `ShMem`. Throughout LibAFL, we use a wrapper around different operating system's shared maps, called `ShMem`.
Shared maps, called shared memory for the sake of not colliding with Rust's `map()` functions, are the backbone of `LLMP`. Shared maps are the backbone of `LLMP`.
Each client, usually a fuzzer trying to share stats and new testcases, maps an outgoing `ShMem` map. Each client, usually a fuzzer trying to share stats and new testcases, maps an outgoing `ShMem` map.
With very few exceptions, only this client writes to this map, therefore, we do not run in race conditions and can live without locks. With very few exceptions, only this client writes to this map, therefore, we do not run in race conditions and can live without locks.
The broker reads from all client's `ShMem` maps. The broker reads from all client's `ShMem` maps.
It checks all incoming client maps periodically and then forwards new messages to its outgoing broadcast-`ShMem`, mapped by all connected clients. It checks all incoming client maps periodically, and then forwards new messages to its outgoing broadcast-`ShMem`, mapped by all connected clients.
To send new messages, a client places a new message at the end of their shared memory and then updates a static field to notify the broker. To send new messages, a client places a new message at the end of their map, and then updates a static field to notify the broker.
Once the outgoing map is full, the sender allocates a new `ShMem` using the respective `ShMemProvider`. Once the outgoing map is full, the sender allocates a new `ShMem` using the respective `ShMemProvider`.
It then sends the information needed to map the newly-allocated page in connected processes to the old page, using an end of page (`EOP`) message. It then sends the information needed to map the newly-allocated page in connected processes to the old page, using an end of page (`EOP`) message.
Once the receiver maps the new page, flags it as safe for unmapping from the sending process (to avoid race conditions if we have more than a single EOP in a short time), and then continues to read from the new `ShMem`. Once the receiver maps the new page, flags it as safe for unmapping from the sending process (to avoid race conditions if we have more than a single EOP in a short time), and then continues to read from the new `ShMem`.
The schema for client's maps to the broker is as follows: The schema for client's maps to the broker is as follows:
```text ```text
[client0] [client1] ... [clientN] [client0] [client1] ... [clientN]
| | / | | /
@ -50,7 +49,7 @@ The schema for client's maps to the broker is as follows:
The broker loops over all incoming maps, and checks for new messages. The broker loops over all incoming maps, and checks for new messages.
On `std` builds, the broker will sleep a few milliseconds after a loop, since we do not need the messages to arrive instantly. On `std` builds, the broker will sleep a few milliseconds after a loop, since we do not need the messages to arrive instantly.
After the broker received a new message from clientN, (`clientN_out->current_id != last_message->message_id`) the broker copies the message content to its own broadcast shared memory. After the broker received a new message from clientN, (`clientN_out->current_id != last_message->message_id`) the broker copies the message content to its own broadcast map.
The clients periodically, for example after finishing `n` mutations, check for new incoming messages by checking if (`current_broadcast_map->current_id != last_message->message_id`). The clients periodically, for example after finishing `n` mutations, check for new incoming messages by checking if (`current_broadcast_map->current_id != last_message->message_id`).
While the broker uses the same EOP mechanism to map new `ShMem`s for its outgoing map, it never unmaps old pages. While the broker uses the same EOP mechanism to map new `ShMem`s for its outgoing map, it never unmaps old pages.
@ -62,7 +61,7 @@ So the outgoing messages flow like this over the outgoing broadcast `Shmem`:
```text ```text
[broker] [broker]
| |
[current_broadcast_shmem] [current_broadcast_map]
| |
|___________________________________ |___________________________________
|_________________ \ |_________________ \
@ -84,10 +83,10 @@ Finally, call `LlmpBroker::loop_forever()`.
### B2B: Connecting Fuzzers via TCP ### B2B: Connecting Fuzzers via TCP
For `broker2broker` communication, all broadcast messages are additionally forwarded via network sockets. For `broker2broker` communication, all broadcast messages are additionally forwarded via network sockets.
To facilitate this, we spawn an additional client thread in the broker, that reads the broadcast shared memory, just like any other client would. To facilitate this, we spawn an additional client thread in the broker, that reads the broadcast map, just like any other client would.
For broker2broker communication, this b2b client listens for TCP connections from other, remote brokers. For broker2broker communication, this b2b client listens for TCP connections from other, remote brokers.
It keeps a pool of open sockets to other, remote, b2b brokers around at any time. It keeps a pool of open sockets to other, remote, b2b brokers around at any time.
When receiving a new message on the local broker shared memory, the b2b client will forward it to all connected remote brokers via TCP. When receiving a new message on the local broker map, the b2b client will forward it to all connected remote brokers via TCP.
Additionally, the broker can receive messages from all connected (remote) brokers, and forward them to the local broker over a client `ShMem`. Additionally, the broker can receive messages from all connected (remote) brokers, and forward them to the local broker over a client `ShMem`.
As a sidenote, the tcp listener used for b2b communication is also used for an initial handshake when a new client tries to connect to a broker locally, simply exchanging the initial `ShMem` descriptions. As a sidenote, the tcp listener used for b2b communication is also used for an initial handshake when a new client tries to connect to a broker locally, simply exchanging the initial `ShMem` descriptions.

View File

@ -17,13 +17,11 @@ Launching nodes manually has the benefit that you can have multiple nodes with d
While it's called "restarting" manager, it uses `fork` on Unix operating systems as optimization and only actually restarts from scratch on Windows. While it's called "restarting" manager, it uses `fork` on Unix operating systems as optimization and only actually restarts from scratch on Windows.
## Launcher
## Automated, with Launcher
The Launcher is the lazy way to do multiprocessing. The Launcher is the lazy way to do multiprocessing.
You can use the Launcher builder to create a fuzzer that spawns multiple nodes with one click, all using restarting event managers and the same configuration. You can use the Launcher builder to create a fuzzer that spawns multiple nodes, all using restarting event managers.
An example may look like this:
To use launcher, first you need to write an anonymous function `let mut run_client = |state: Option<_>, mut mgr, _core_id|{}`, which uses three parameters to create individual fuzzer. Then you can specify the `shmem_provider`,`broker_port`,`monitor`,`cores` and other stuff through `Launcher::builder()`:
```rust,ignore ```rust,ignore
Launcher::builder() Launcher::builder()
@ -44,16 +42,8 @@ The value is a string indicating the cores to bind to, for example, `0,2,5` or `
For each client, `run_client` will be called. For each client, `run_client` will be called.
On Windows, the Launcher will restart each client, while on Unix, it will use `fork`. On Windows, the Launcher will restart each client, while on Unix, it will use `fork`.
Advanced use-cases:
1. To connect multiple nodes together via TCP, you can use the `remote_broker_addr`. this requires the `llmp_bind_public` compile-time feature for `LibAFL`.
2. To use multiple launchers for individual configurations, you can set `spawn_broker` to `false` on all but one.
3. Launcher will not select the cores automatically, so you need to specify the `cores` that you want.
For more examples, you can check out `qemu_launcher` and `libfuzzer_libpng_launcher` in [`./fuzzers/`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers).
## Other ways ## Other ways
The `LlmpEventManager` family is the easiest way to spawn instances, but for obscure targets, you may need to come up with other solutions. The LlmpEvenManager family is the easiest way to do spawn instances, but for obscure targets, you may need to come up with other solutions.
LLMP is even, in theory, `no_std` compatible, and even completely different EventManagers can be used for message passing. LLMP is even, in theory, `no_std` compatible, and even completely different EventManagers can be used for message passing.
If you are in this situation, please either read through the current implementations and/or reach out to us. If you are in this situation, please either read through the current implementations and/or reach out to us.

View File

@ -1,8 +1,5 @@
# Introduction # Introduction
> ## Under Construction! > ## Under Construction!
>
> This section is under construction. > This section is under construction.
> Please check back later (or open a PR) > Please check back later (or open a PR)
>
> In the meantime, find the final Lain-based fuzzer in [the fuzzers folder](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/tutorial)

View File

@ -1,4 +0,0 @@
*.qcow2
corpus
*.axf
demo

View File

@ -1,41 +0,0 @@
[package]
name = "fret"
version = "0.8.2"
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
edition = "2021"
[features]
default = ["std", "snapshot_restore", "singlecore", "restarting", "feed_systemtrace", "fuzz_int" ]
std = []
snapshot_restore = []
snapshot_fast = [ "snapshot_restore" ]
singlecore = []
restarting = ['singlecore']
trace_abbs = []
systemstate = []
feed_systemgraph = [ "systemstate" ]
feed_systemtrace = [ "systemstate" ]
feed_longest = [ ]
feed_afl = [ ]
feed_genetic = [ ]
fuzz_int = [ ]
gensize_1 = [ ]
gensize_10 = [ ]
gensize_100 = [ ]
observer_hitcounts = []
no_hash_state = []
run_until_saturation = []
[profile.release]
lto = true
codegen-units = 1
debug = true
[dependencies]
libafl = { path = "../../libafl/" }
libafl_qemu = { path = "../../libafl_qemu/", features = ["arm", "systemmode"] }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
hashbrown = { version = "0.12", features = ["serde", "ahash-compile-time-rng"] } # A faster hashmap, nostd compatible
petgraph = { version="0.6.0", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps
rand = "0.5"

View File

@ -1,26 +0,0 @@
# Qemu systemmode with launcher
This folder contains an example fuzzer for the qemu systemmode, using LLMP for fast multi-process fuzzing and crash detection.
## Build
To build this example, run
```bash
cargo build --release
cd example; sh build.sh; cd ..
```
This will build the the fuzzer (src/fuzzer.rs) and a small example binary based on FreeRTOS, which can run under a qemu emulation target.
## Run
Since the instrumentation is based on snapshtos QEMU needs a virtual drive (even if it is unused...).
Create on and then run the fuzzer:
```bash
# create an image
qemu-img create -f qcow2 dummy.qcow2 32M
# run the fuzzer
KERNEL=./example/example.elf target/release/qemu_systemmode -icount shift=auto,align=off,sleep=off -machine mps2-an385 -monitor null -kernel ./example/example.elf -serial null -nographic -snapshot -drive if=none,format=qcow2,file=dummy.qcow2 -S
```
Currently the ``KERNEL`` variable is needed because the fuzzer does not parse QEMUs arguments to find the binary.

View File

@ -1,12 +0,0 @@
*dump
timedump*
corpora
build
mnt
.R*
*.png
*.pdf
bins
.snakemake
*.zip
*.tar.*

View File

@ -1,57 +0,0 @@
TIME=7200
corpora/%/seed:
mkdir -p $$(dirname $@)
LINE=$$(grep "^$$(basename $*)" target_symbols.csv); \
export \
KERNEL=benchmark/build/$*.elf \
FUZZ_MAIN=$$(echo $$LINE | cut -d, -f2) \
FUZZ_INPUT=$$(echo $$LINE | cut -d, -f3) \
FUZZ_INPUT_LEN=$$(echo $$LINE | cut -d, -f4) \
BREAKPOINT=$$(echo $$LINE | cut -d, -f5) \
SEED_DIR=benchmark/corpora/$* \
DUMP_SEED=seed; \
../fuzzer.sh
timedump/%$(FUZZ_RANDOM)$(SUFFIX): corpora/%/seed
mkdir -p $$(dirname $@)
LINE=$$(grep "^$$(basename $*)" target_symbols.csv); \
export \
KERNEL=benchmark/build/$*.elf \
FUZZ_MAIN=$$(echo $$LINE | cut -d, -f2) \
FUZZ_INPUT=$$(echo $$LINE | cut -d, -f3) \
FUZZ_INPUT_LEN=$$(echo $$LINE | cut -d, -f4) \
BREAKPOINT=$$(echo $$LINE | cut -d, -f5) \
SEED_RANDOM=1 \
TIME_DUMP=benchmark/$@ \
CASE_DUMP=benchmark/$@; \
../fuzzer.sh + + + + + $(TIME) + + + > $@_log
#SEED_DIR=benchmark/corpora/$*
all_sequential: timedump/sequential/mpeg2$(FUZZ_RANDOM) timedump/sequential/dijkstra$(FUZZ_RANDOM) timedump/sequential/epic$(FUZZ_RANDOM) \
timedump/sequential/g723_enc$(FUZZ_RANDOM) timedump/sequential/audiobeam$(FUZZ_RANDOM) \
timedump/sequential/gsm_enc$(FUZZ_RANDOM)
all_kernel: timedump/kernel/bsort$(FUZZ_RANDOM) timedump/kernel/insertsort$(FUZZ_RANDOM) #timedump/kernel/fft$(FUZZ_RANDOM)
all_app: timedump/app/lift$(FUZZ_RANDOM)
all_system: timedump/lift$(FUZZ_RANDOM)$(SUFFIX)
all_period: timedump/waters$(FUZZ_RANDOM)$(SUFFIX)
tacle_rtos: timedump/tacle_rtos$(FUZZ_RANDOM)
graphics:
Rscript --vanilla plot_comparison.r mnt/timedump/sequential audiobeam
Rscript --vanilla plot_comparison.r mnt/timedump/sequential dijkstra
Rscript --vanilla plot_comparison.r mnt/timedump/sequential epic
Rscript --vanilla plot_comparison.r mnt/timedump/sequential g723_enc
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential gsm_enc
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential huff_dec
Rscript --vanilla plot_comparison.r mnt/timedump/sequential mpeg2
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential rijndael_dec
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential rijndael_enc
clean:
rm -rf corpora timedump

View File

@ -1,281 +0,0 @@
import csv
import os
def_flags="--no-default-features --features std,snapshot_restore,singlecore,restarting,run_until_saturation"
remote="timedump_253048_1873f6_all/"
RUNTIME=10
TARGET_REPS_A=2
TARGET_REPS_B=2
NUM_NODES=2
REP_PER_NODE_A=int(TARGET_REPS_A/NUM_NODES)
REP_PER_NODE_B=int(TARGET_REPS_B/NUM_NODES)
NODE_ID= 0 if os.getenv('NODE_ID') == None else int(os.environ['NODE_ID'])
MY_RANGE_A=range(NODE_ID*REP_PER_NODE_A,(NODE_ID+1)*REP_PER_NODE_A)
MY_RANGE_B=range(NODE_ID*REP_PER_NODE_B,(NODE_ID+1)*REP_PER_NODE_B)
rule build_showmap:
output:
directory("bins/target_showmap")
shell:
"cargo build --target-dir {output} {def_flags},systemstate"
rule build_random:
output:
directory("bins/target_random")
shell:
"cargo build --target-dir {output} {def_flags},feed_longest"
rule build_feedlongest:
output:
directory("bins/target_feedlongest")
shell:
"cargo build --target-dir {output} {def_flags},feed_longest"
rule build_frafl:
output:
directory("bins/target_frafl")
shell:
"cargo build --target-dir {output} {def_flags},feed_afl,feed_longest"
rule build_afl:
output:
directory("bins/target_afl")
shell:
"cargo build --target-dir {output} {def_flags},feed_afl,observer_hitcounts"
rule build_state:
output:
directory("bins/target_state")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace"
rule build_nohashstate:
output:
directory("bins/target_nohashstate")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace,no_hash_state"
rule build_graph:
output:
directory("bins/target_graph")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemgraph"
rule build_showmap_int:
output:
directory("bins/target_showmap_int")
shell:
"cargo build --target-dir {output} {def_flags},systemstate,fuzz_int"
rule build_random_int:
output:
directory("bins/target_random_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_longest,fuzz_int"
rule build_state_int:
output:
directory("bins/target_state_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace,fuzz_int"
rule build_nohashstate_int:
output:
directory("bins/target_nohashstate_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace,fuzz_int,no_hash_state"
rule build_frafl_int:
output:
directory("bins/target_frafl_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_afl,feed_longest,fuzz_int"
rule build_afl_int:
output:
directory("bins/target_afl_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_afl,fuzz_int,observer_hitcounts"
rule build_feedlongest_int:
output:
directory("bins/target_feedlongest_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_longest,fuzz_int"
rule build_feedgeneration1:
output:
directory("bins/target_feedgeneration1")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,gensize_1"
rule build_feedgeneration1_int:
output:
directory("bins/target_feedgeneration1_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_1"
rule build_feedgeneration10:
output:
directory("bins/target_feedgeneration10")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,gensize_10"
rule build_feedgeneration10_int:
output:
directory("bins/target_feedgeneration10_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_10"
rule build_feedgeneration100:
output:
directory("bins/target_feedgeneration100")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,gensize_100"
rule build_feedgeneration100_int:
output:
directory("bins/target_feedgeneration100_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_100"
rule run_bench:
input:
"build/{target}.elf",
"bins/target_{fuzzer}"
output:
multiext("timedump/{fuzzer}/{target}.{num}", "", ".log") # , ".case"
run:
with open('target_symbols.csv') as csvfile:
reader = csv.DictReader(csvfile)
line = next((x for x in reader if x['kernel']==wildcards.target), None)
if line == None:
return False
kernel=line['kernel']
fuzz_main=line['main_function']
fuzz_input=line['input_symbol']
fuzz_len=line['input_size']
bkp=line['return_function']
script="""
mkdir -p $(dirname {output[0]})
export KERNEL=$(pwd)/{input[0]}
export FUZZ_MAIN={fuzz_main}
export FUZZ_INPUT={fuzz_input}
export FUZZ_INPUT_LEN={fuzz_len}
export BREAKPOINT={bkp}
export SEED_RANDOM={wildcards.num}
export TIME_DUMP=$(pwd)/{output[0]}
export CASE_DUMP=$(pwd)/{output[0]}.case
export TRACE_DUMP=$(pwd)/{output[0]}.trace
export FUZZ_ITERS={RUNTIME}
export FUZZER=$(pwd)/{input[1]}/debug/fret
set +e
../fuzzer.sh > {output[1]} 2>&1
exit 0
"""
if wildcards.fuzzer.find('random') >= 0:
script="export FUZZ_RANDOM={output[1]}\n"+script
shell(script)
rule run_showmap:
input:
"{remote}build/{target}.elf",
"bins/target_showmap",
"bins/target_showmap_int",
"{remote}timedump/{fuzzer}/{target}.{num}.case"
output:
"{remote}timedump/{fuzzer}/{target}.{num}.trace.ron",
"{remote}timedump/{fuzzer}/{target}.{num}.case.time",
run:
with open('target_symbols.csv') as csvfile:
reader = csv.DictReader(csvfile)
line = next((x for x in reader if x['kernel']==wildcards.target), None)
if line == None:
return False
kernel=line['kernel']
fuzz_main=line['main_function']
fuzz_input=line['input_symbol']
fuzz_len=line['input_size']
bkp=line['return_function']
script=""
if wildcards.fuzzer.find('_int') > -1:
script="export FUZZER=$(pwd)/{input[2]}/debug/fret\n"
else:
script="export FUZZER=$(pwd)/{input[1]}/debug/fret\n"
script+="""
mkdir -p $(dirname {output})
export KERNEL=$(pwd)/{input[0]}
export FUZZ_MAIN={fuzz_main}
export FUZZ_INPUT={fuzz_input}
export FUZZ_INPUT_LEN={fuzz_len}
export BREAKPOINT={bkp}
export TRACE_DUMP=$(pwd)/{output[0]}
export DO_SHOWMAP=$(pwd)/{input[3]}
export TIME_DUMP=$(pwd)/{output[1]}
set +e
../fuzzer.sh
exit 0
"""
if wildcards.fuzzer.find('random') >= 0:
script="export FUZZ_RANDOM=1\n"+script
shell(script)
rule tarnsform_trace:
input:
"{remote}timedump/{fuzzer}/{target}.{num}.trace.ron"
output:
"{remote}timedump/{fuzzer}/{target}.{num}.trace.csv"
shell:
"$(pwd)/../../../../state2gantt/target/debug/state2gantt {input} > {output[0]}"
rule trace2gantt:
input:
"{remote}timedump/{fuzzer}/{target}.{num}.trace.csv"
output:
"{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png"
shell:
"Rscript --vanilla $(pwd)/../../../../state2gantt/gantt.R {input}"
rule all_main:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random','afl','feedgeneration10','state'], target=['waters','watersv2'],num=range(0,3))
rule all_main_int:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random_int','afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=range(0,4))
rule all_compare_feedgeneration:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1','feedgeneration10','feedgeneration100'], target=['waters_int','watersv2'],num=range(0,10))
rule all_compare_feedgeneration_int:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1_int','feedgeneration10_int','feedgeneration100_int'], target=['waters_int','watersv2_int'],num=range(0,10))
rule all_compare_afl:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl','frafl','feedlongest'], target=['waters','watersv2'],num=range(0,10))
rule all_compare_afl_int:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl_int','frafl_int','feedlongest_int'], target=['waters_int','watersv2_int'],num=range(0,10))
rule all_images:
input:
expand("{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png",remote=remote, fuzzer=['afl','feedgeneration10','state'], target=['waters','watersv2'],num=range(0,3))
rule all_images_int:
input:
expand("{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png",remote=remote, fuzzer=['afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=range(0,3))
rule clusterfuzz:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random','afl','feedgeneration10','state'], target=['waters','watersv2'],num=MY_RANGE_A),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random_int','afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_A),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1','feedgeneration10','feedgeneration100'], target=['waters_int','watersv2'],num=MY_RANGE_B),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1_int','feedgeneration10_int','feedgeneration100_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_B),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl','frafl','feedlongest'], target=['waters','watersv2'],num=MY_RANGE_B),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl_int','frafl_int','feedlongest_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_B),
rule all_bins:
input:
expand("bins/target_{target}{flag}",target=['random','afl','frafl','state','feedgeneration100'],flag=['','_int'])

View File

@ -1,83 +0,0 @@
library("mosaic")
args = commandArgs(trailingOnly=TRUE)
#myolors=c("#339933","#0066ff","#993300") # grün, balu, rot
myolors=c("dark green","dark blue","dark red", "yellow") # grün, balu, rot
if (length(args)==0) {
runtype="timedump"
target="waters"
filename_1=sprintf("%s.png",target)
filename_2=sprintf("%s_maxline.png",target)
filename_3=sprintf("%s_hist.png",target)
} else {
runtype=args[1]
target=args[2]
filename_1=sprintf("%s.png",args[2])
filename_2=sprintf("%s_maxline.png",args[2])
filename_3=sprintf("%s_hist.png",args[2])
# filename_1=args[3]
}
file_1=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_state",runtype,target)
file_2=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_afl",runtype,target)
file_3=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_random",runtype,target)
file_4=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_graph",runtype,target)
timetrace <- read.table(file_1, quote="\"", comment.char="")
timetrace_afl <- read.table(file_2, quote="\"", comment.char="")
timetrace_rand <- read.table(file_3, quote="\"", comment.char="")
timetrace_graph <- read.table(file_4, quote="\"", comment.char="")
timetrace[[2]]=seq_len(length(timetrace[[1]]))
timetrace_afl[[2]]=seq_len(length(timetrace_afl[[1]]))
timetrace_rand[[2]]=seq_len(length(timetrace_rand[[1]]))
timetrace_graph[[2]]=seq_len(length(timetrace_graph[[1]]))
names(timetrace)[1] <- "timetrace"
names(timetrace)[2] <- "iter"
names(timetrace_afl)[1] <- "timetrace"
names(timetrace_afl)[2] <- "iter"
names(timetrace_rand)[1] <- "timetrace"
names(timetrace_rand)[2] <- "iter"
names(timetrace_graph)[1] <- "timetrace"
names(timetrace_graph)[2] <- "iter"
png(file=filename_1)
# pdf(file=filename_1,width=8, height=8)
plot(timetrace[[2]],timetrace[[1]], col=myolors[1], xlab="iters", ylab="wcet", pch='.')
points(timetrace_afl[[2]],timetrace_afl[[1]], col=myolors[2], pch='.')
points(timetrace_rand[[2]],timetrace_rand[[1]], col=myolors[3], pch='.')
points(timetrace_graph[[2]],timetrace_graph[[1]], col=myolors[4], pch='.')
abline(lm(timetrace ~ iter, data=timetrace),col=myolors[1])
abline(lm(timetrace ~ iter, data=timetrace_afl),col=myolors[2])
abline(lm(timetrace ~ iter, data=timetrace_rand),col=myolors[3])
dev.off()
png(file=filename_3)
gf_histogram(~ timetrace,data=timetrace, fill=myolors[1]) %>%
gf_histogram(~ timetrace,data=timetrace_afl, fill=myolors[2]) %>%
gf_histogram(~ timetrace,data=timetrace_rand, fill=myolors[3]) %>%
gf_histogram(~ timetrace,data=timetrace_graph, fill=myolors[4])
dev.off()
# Takes a flat list
trace2maxline <- function(tr) {
maxline = tr
for (var in seq_len(length(maxline))[2:length(maxline)]) {
maxline[var] = max(maxline[var],maxline[var-1])
}
#plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET")
return(maxline)
}
timetrace[[1]] <- trace2maxline(timetrace[[1]])
timetrace_afl[[1]] <- trace2maxline(timetrace_afl[[1]])
timetrace_rand[[1]] <- trace2maxline(timetrace_rand[[1]])
timetrace_graph[[1]] <- trace2maxline(timetrace_graph[[1]])
png(file=filename_2)
plot(timetrace[[2]],timetrace[[1]], col=myolors[1], xlab="iters", ylab="wcet", pch='.')
points(timetrace_afl[[2]],timetrace_afl[[1]], col=myolors[2], pch='.')
points(timetrace_rand[[2]],timetrace_rand[[1]], col=myolors[3], pch='.')
points(timetrace_graph[[2]],timetrace_graph[[1]], col=myolors[4], pch='.')
#abline(lm(timetrace ~ iter, data=timetrace),col=myolors[1])
#abline(lm(timetrace ~ iter, data=timetrace_afl),col=myolors[2])
#abline(lm(timetrace ~ iter, data=timetrace_rand),col=myolors[3])
dev.off()

View File

@ -1,327 +0,0 @@
library("mosaic")
library("dplyr")
library("foreach")
library("doParallel")
#setup parallel backend to use many processors
cores=detectCores()
cl <- makeCluster(cores[1]-1) #not to overload your computer
registerDoParallel(cl)
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
runtype="timedump_253048_1873f6_all/timedump"
target="waters_int"
outputpath="~/code/FRET/LibAFL/fuzzers/FRET/benchmark/"
#MY_SELECTION <- c('state', 'afl', 'graph', 'random')
SAVE_FILE=TRUE
} else {
runtype=args[1]
target=args[2]
outputpath=args[3]
MY_SELECTION <- args[4:length(args)]
SAVE_FILE=TRUE
}
worst_cases <- list(waters=0, waters_int=0, tmr=405669, micro_longint=0)
worst_case <- worst_cases[[target]]
if (is.null(worst_case)) {
worst_case = 0
}
#MY_COLORS=c("green","blue","red", "orange", "pink", "black")
MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
BENCHDIR=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s",runtype)
BASENAMES=Filter(function(x) x!="" && substr(x,1,1)!='.',list.dirs(BENCHDIR,full.names=FALSE))
PATTERNS="%s.[0-9]*$"
#RIBBON='sd'
#RIBBON='span'
RIBBON='both'
DRAW_WC = worst_case > 0
LEGEND_POS="topright"
#LEGEND_POS="bottomright"
CONTINUE_LINE_TO_END=FALSE
# https://www.r-bloggers.com/2013/04/how-to-change-the-alpha-value-of-colours-in-r/
alpha <- function(col, alpha=1){
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
# Trimm a list of data frames to common length
trim_data <- function(input,len=NULL) {
if (is.null(len)) {
len <- min(sapply(input, function(v) dim(v)[1]))
}
return(lapply(input, function(d) slice_head(d,n=len)))
}
length_of_data <- function(input) {
min(sapply(input, function(v) dim(v)[1]))
}
# Takes a flat list
trace2maxline <- function(tr) {
maxline = tr
for (var in seq_len(length(maxline))[2:length(maxline)]) {
#if (maxline[var]>1000000000) {
# maxline[var]=maxline[var-1]
#} else {
maxline[var] = max(maxline[var],maxline[var-1])
#}
}
#plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET")
return(maxline)
}
# Take a list of data frames, output same form but maxlines
data2maxlines <- function(tr) {
min_length <- min(sapply(tr, function(v) dim(v)[1]))
maxline <- tr
for (var in seq_len(length(tr))) {
maxline[[var]][[1]]=trace2maxline(tr[[var]][[1]])
}
return(maxline)
}
# Take a multi-column data frame, output same form but maxlines
frame2maxlines <- function(tr) {
for (var in seq_len(length(tr))) {
tr[[var]]=trace2maxline(tr[[var]])
}
return(tr)
}
trace2maxpoints <- function(tr) {
minval = tr[1,1]
collect = tr[1,]
for (i in seq_len(dim(tr)[1])) {
if (minval < tr[i,1]) {
collect = rbind(collect,tr[i,])
minval = tr[i,1]
}
}
tmp = tr[dim(tr)[1],]
tmp[1] = minval[1]
collect = rbind(collect,tmp)
return(collect)
}
sample_maxpoints <- function(tr,po) {
index = 1
collect=NULL
endpoint = dim(tr)[1]
for (p in po) {
if (p<=tr[1,2]) {
tmp = tr[index,]
tmp[2] = p
collect = rbind(collect, tmp)
} else if (p>=tr[endpoint,2]) {
tmp = tr[endpoint,]
tmp[2] = p
collect = rbind(collect, tmp)
} else {
for (i in seq(index,endpoint)-1) {
if (p >= tr[i,2] && p<tr[i+1,2]) {
tmp = tr[i,]
tmp[2] = p
collect = rbind(collect, tmp)
index = i
break
}
}
}
}
return(collect)
}
#https://www.r-bloggers.com/2012/01/parallel-r-loops-for-windows-and-linux/
all_runtypetables <- foreach (bn=BASENAMES) %do% {
runtypefiles <- list.files(file.path(BENCHDIR,bn),pattern=sprintf(PATTERNS,target),full.names = TRUE)
if (length(runtypefiles) > 0) {
runtypetables_reduced <- foreach(i=seq_len(length(runtypefiles))) %dopar% {
rtable = read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i)))
trace2maxpoints(rtable)
}
#runtypetables <- lapply(seq_len(length(runtypefiles)),
# function(i)read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i))))
#runtypetables_reduced <- lapply(runtypetables, trace2maxpoints)
runtypetables_reduced
#all_runtypetables = c(all_runtypetables, list(runtypetables_reduced))
}
}
all_runtypetables = all_runtypetables[lapply(all_runtypetables, length) > 0]
all_min_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
ret = data.frame(min(unlist(lapply(rtt, function(v) v[dim(v)[1],2]))))
names(ret)[1] = bn
ret/(3600 * 1000)
}
all_max_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
ret = data.frame(max(unlist(lapply(rtt, function(v) v[dim(v)[1],2]))))
names(ret)[1] = bn
ret/(3600 * 1000)
}
all_points = sort(unique(Reduce(c, lapply(all_runtypetables, function(v) Reduce(c, lapply(v, function(w) w[[2]]))))))
all_maxlines <- foreach (rtt=all_runtypetables) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
runtypetables_sampled = foreach(v=rtt) %dopar% {
sample_maxpoints(v, all_points)[1]
}
#runtypetables_sampled = lapply(rtt, function(v) sample_maxpoints(v, all_points)[1])
tmp_frame <- Reduce(cbind, runtypetables_sampled)
statframe <- data.frame(rowMeans(tmp_frame),apply(tmp_frame, 1, sd),apply(tmp_frame, 1, min),apply(tmp_frame, 1, max), apply(tmp_frame, 1, median))
names(statframe) <- c(bn, sprintf("%s_sd",bn), sprintf("%s_min",bn), sprintf("%s_max",bn), sprintf("%s_med",bn))
#statframe[sprintf("%s_times",bn)] = all_points
round(statframe)
#all_maxlines = c(all_maxlines, list(round(statframe)))
}
one_frame<-data.frame(all_maxlines)
one_frame[length(one_frame)+1] <- all_points/(3600 * 1000)
names(one_frame)[length(one_frame)] <- 'time'
typenames = names(one_frame)[which(names(one_frame) != 'time')]
typenames = typenames[which(!endsWith(typenames, "_sd"))]
typenames = typenames[which(!endsWith(typenames, "_med"))]
ylow=min(one_frame[typenames])
yhigh=max(one_frame[typenames],worst_case)
typenames = typenames[which(!endsWith(typenames, "_min"))]
typenames = typenames[which(!endsWith(typenames, "_max"))]
ml2lines <- function(ml,lim) {
lines = NULL
last = 0
for (i in seq_len(dim(ml)[1])) {
if (!CONTINUE_LINE_TO_END && lim<ml[i,2]) {
break
}
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
last = ml[i,2]
}
return(lines)
}
plotting <- function(selection, filename, MY_COLORS_) {
# filter out names of iters and sd cols
typenames = names(one_frame)[which(names(one_frame) != 'times')]
typenames = typenames[which(!endsWith(typenames, "_sd"))]
typenames = typenames[which(!endsWith(typenames, "_med"))]
typenames = typenames[which(!endsWith(typenames, "_min"))]
typenames = typenames[which(!endsWith(typenames, "_max"))]
typenames = selection[which(selection %in% typenames)]
if (length(typenames) == 0) {return()}
h_ = 500
w_ = h_*4/3
if (SAVE_FILE) {png(file=sprintf("%s%s_%s.png",outputpath,target,filename), width=w_, height=h_)}
par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0))
plot(c(1,max(one_frame['time'])),c(ylow,yhigh), col='white', xlab="Time [h]", ylab="WORT [insn]", pch='.')
for (t in seq_len(length(typenames))) {
#proj = one_frame[seq(1, dim(one_frame)[1], by=max(1, length(one_frame[[1]])/(10*w_))),]
#points(proj[c('iters',typenames[t])], col=MY_COLORS_[t], pch='.')
avglines = ml2lines(one_frame[c(typenames[t],'time')],all_max_points[typenames[t]])
#lines(avglines, col=MY_COLORS_[t])
medlines = ml2lines(one_frame[c(sprintf("%s_med",typenames[t]),'time')],all_max_points[typenames[t]])
lines(medlines, col=MY_COLORS_[t], lty='solid')
milines = NULL
malines = NULL
milines = ml2lines(one_frame[c(sprintf("%s_min",typenames[t]),'time')],all_max_points[typenames[t]])
malines = ml2lines(one_frame[c(sprintf("%s_max",typenames[t]),'time')],all_max_points[typenames[t]])
if (exists("RIBBON") && ( RIBBON=='max' )) {
#lines(milines, col=MY_COLORS_[t], lty='dashed')
lines(malines, col=MY_COLORS_[t], lty='dashed')
#points(proj[c('iters',sprintf("%s_min",typenames[t]))], col=MY_COLORS_[t], pch='.')
#points(proj[c('iters',sprintf("%s_max",typenames[t]))], col=MY_COLORS_[t], pch='.')
}
if (exists("RIBBON") && RIBBON != '') {
for (i in seq_len(dim(avglines)[1]-1)) {
if (RIBBON=='both') {
# draw boxes
x_l <- milines[i,][['X']]
x_r <- milines[i+1,][['X']]
y_l <- milines[i,][['Y']]
y_h <- malines[i,][['Y']]
rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
}
if (FALSE && RIBBON=='span') {
# draw boxes
x_l <- milines[i,][['X']]
x_r <- milines[i+1,][['X']]
y_l <- milines[i,][['Y']]
y_h <- malines[i,][['Y']]
rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
}
#if (FALSE && RIBBON=='both' || RIBBON=='sd') {
# # draw sd
# x_l <- avglines[i,][['X']]
# x_r <- avglines[i+1,][['X']]
# y_l <- avglines[i,][['Y']]-one_frame[ceiling(i/2),][[sprintf("%s_sd",typenames[t])]]
# y_h <- avglines[i,][['Y']]+one_frame[ceiling(i/2),][[sprintf("%s_sd",typenames[t])]]
# if (x_r != x_l) {
# rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
# }
#}
#sd_ <- row[sprintf("%s_sd",typenames[t])][[1]]
#min_ <- row[sprintf("%s_min",typenames[t])][[1]]
#max_ <- row[sprintf("%s_max",typenames[t])][[1]]
#if (exists("RIBBON")) {
# switch (RIBBON,
# 'sd' = arrows(x_, y_-sd_, x_, y_+sd_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.03)),
# 'both' = arrows(x_, y_-sd_, x_, y_+sd_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.05)),
# 'span' = #arrows(x_, min_, x_, max_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.03))
# )
#}
##arrows(x_, y_-sd_, x_, y_+sd_, length=0.05, angle=90, code=3, col=alpha(MY_COLORS[t], alpha=0.1))
}
}
}
leglines=typenames
if (DRAW_WC) {
lines(c(0,length(one_frame[[1]])),y=c(worst_case,worst_case), lty='dotted')
leglines=c(typenames, 'worst observed')
}
legend(LEGEND_POS, legend=leglines,#"topleft"
col=c(MY_COLORS_[1:length(typenames)],"black"),
lty=c(rep("solid",length(typenames)),"dotted"))
if (SAVE_FILE) {dev.off()}
}
stopCluster(cl)
par(mar=c(3.8,3.8,0,0))
par(oma=c(0,0,0,0))
#RIBBON='both'
#MY_SELECTION = c('state_int','generation100_int')
#MY_SELECTION = c('state_int','frafl_int')
if (exists("MY_SELECTION")) {
plotting(MY_SELECTION, 'custom', MY_COLORS[c(1,2)])
} else {
# MY_SELECTION=c('state', 'afl', 'random', 'feedlongest', 'feedgeneration', 'feedgeneration10')
#MY_SELECTION=c('state_int', 'afl_int', 'random_int', 'feedlongest_int', 'feedgeneration_int', 'feedgeneration10_int')
#MY_SELECTION=c('state', 'frAFL', 'statenohash', 'feedgeneration10')
#MY_SELECTION=c('state_int', 'frAFL_int', 'statenohash_int', 'feedgeneration10_int')
MY_SELECTION=typenames
RIBBON='both'
for (i in seq_len(length(MY_SELECTION))) {
n <- MY_SELECTION[i]
plotting(c(n), n, c(MY_COLORS[i]))
}
RIBBON='max'
plotting(MY_SELECTION,'all', MY_COLORS)
}
for (t in seq_len(length(typenames))) {
li = one_frame[dim(one_frame)[1],]
pear = (li[[typenames[[t]]]]-li[[sprintf("%s_med",typenames[[t]])]])/li[[sprintf("%s_sd",typenames[[t]])]]
print(sprintf("%s pearson: %g",typenames[[t]],pear))
}

View File

@ -1,24 +0,0 @@
kernel,main_function,input_symbol,input_size,return_function
mpeg2,mpeg2_main,mpeg2_oldorgframe,90112,mpeg2_return
audiobeam,audiobeam_main,audiobeam_input,11520,audiobeam_return
epic,epic_main,epic_image,4096,epic_return
dijkstra,dijkstra_main,dijkstra_AdjMatrix,10000,dijkstra_return
fft,fft_main,fft_twidtable,2046,fft_return
bsort,bsort_main,bsort_Array,400,bsort_return
insertsort,insertsort_main,insertsort_a,400,insertsort_return
g723_enc,g723_enc_main,g723_enc_INPUT,1024,g723_enc_return
rijndael_dec,rijndael_dec_main,rijndael_dec_data,32768,rijndael_dec_return
rijndael_enc,rijndael_enc_main,rijndael_enc_data,31369,rijndael_enc_return
huff_dec,huff_dec_main,huff_dec_encoded,419,huff_dec_return
huff_enc,huff_enc_main,huff_enc_plaintext,600,huff_enc_return
gsm_enc,gsm_enc_main,gsm_enc_pcmdata,6400,gsm_enc_return
tmr,main,FUZZ_INPUT,32,trigger_Qemu_break
tacle_rtos,prvStage0,FUZZ_INPUT,604,trigger_Qemu_break
lift,main_lift,FUZZ_INPUT,100,trigger_Qemu_break
waters,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
watersv2,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
waters_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
watersv2_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
micro_branchless,main_branchless,FUZZ_INPUT,4,trigger_Qemu_break
micro_int,main_int,FUZZ_INPUT,16,trigger_Qemu_break
micro_longint,main_micro_longint,FUZZ_INPUT,16,trigger_Qemu_break
1 kernel main_function input_symbol input_size return_function
2 mpeg2 mpeg2_main mpeg2_oldorgframe 90112 mpeg2_return
3 audiobeam audiobeam_main audiobeam_input 11520 audiobeam_return
4 epic epic_main epic_image 4096 epic_return
5 dijkstra dijkstra_main dijkstra_AdjMatrix 10000 dijkstra_return
6 fft fft_main fft_twidtable 2046 fft_return
7 bsort bsort_main bsort_Array 400 bsort_return
8 insertsort insertsort_main insertsort_a 400 insertsort_return
9 g723_enc g723_enc_main g723_enc_INPUT 1024 g723_enc_return
10 rijndael_dec rijndael_dec_main rijndael_dec_data 32768 rijndael_dec_return
11 rijndael_enc rijndael_enc_main rijndael_enc_data 31369 rijndael_enc_return
12 huff_dec huff_dec_main huff_dec_encoded 419 huff_dec_return
13 huff_enc huff_enc_main huff_enc_plaintext 600 huff_enc_return
14 gsm_enc gsm_enc_main gsm_enc_pcmdata 6400 gsm_enc_return
15 tmr main FUZZ_INPUT 32 trigger_Qemu_break
16 tacle_rtos prvStage0 FUZZ_INPUT 604 trigger_Qemu_break
17 lift main_lift FUZZ_INPUT 100 trigger_Qemu_break
18 waters main_waters FUZZ_INPUT 4096 trigger_Qemu_break
19 watersv2 main_waters FUZZ_INPUT 4096 trigger_Qemu_break
20 waters_int main_waters FUZZ_INPUT 4096 trigger_Qemu_break
21 watersv2_int main_waters FUZZ_INPUT 4096 trigger_Qemu_break
22 micro_branchless main_branchless FUZZ_INPUT 4 trigger_Qemu_break
23 micro_int main_int FUZZ_INPUT 16 trigger_Qemu_break
24 micro_longint main_micro_longint FUZZ_INPUT 16 trigger_Qemu_break

View File

@ -1,2 +0,0 @@
#!/bin/sh
arm-none-eabi-gcc -ggdb -ffreestanding -nostartfiles -lgcc -T mps2_m3.ld -mcpu=cortex-m3 main.c startup.c -o example.elf

View File

@ -1,38 +0,0 @@
int BREAKPOINT() {
for (;;)
{
}
}
int LLVMFuzzerTestOneInput(unsigned int* Data, unsigned int Size) {
//if (Data[3] == 0) {while(1){}} // cause a timeout
for (int i=0; i<Size; i++) {
// if (Data[i] > 0xFFd0 && Data[i] < 0xFFFF) {return 1;} // cause qemu to crash
for (int j=i+1; j<Size; j++) {
if (Data[j] == 0) {continue;}
if (Data[j]>Data[i]) {
int tmp = Data[i];
Data[i]=Data[j];
Data[j]=tmp;
if (Data[i] <= 100) {j--;}
}
}
}
return BREAKPOINT();
}
unsigned int FUZZ_INPUT[] = {
101,201,700,230,860,
234,980,200,340,678,
230,134,900,236,900,
123,800,123,658,607,
246,804,567,568,207,
407,246,678,457,892,
834,456,878,246,699,
854,234,844,290,125,
324,560,852,928,910,
790,853,345,234,586,
};
int main() {
LLVMFuzzerTestOneInput(FUZZ_INPUT, 50);
}

View File

@ -1,143 +0,0 @@
/*
* FreeRTOS V202112.00
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
MEMORY
{
RAM (xrw) : ORIGIN = 0x00000000, LENGTH = 4M
/* Originally */
/* FLASH (xr) : ORIGIN = 0x00000000, LENGTH = 4M */
/* RAM (xrw) : ORIGIN = 0x20000000, LENGTH = 4M */
}
ENTRY(Reset_Handler)
_Min_Heap_Size = 0x300000 ; /* Required amount of heap. */
_Min_Stack_Size = 0x4000 ; /* Required amount of stack. */
M_VECTOR_RAM_SIZE = (16 + 48) * 4;
_estack = ORIGIN(RAM) + LENGTH(RAM);
SECTIONS
{
.isr_vector :
{
__vector_table = .;
KEEP(*(.isr_vector))
. = ALIGN(4);
} > RAM /* FLASH */
.text :
{
. = ALIGN(4);
*(.text*)
KEEP (*(.init))
KEEP (*(.fini))
KEEP(*(.eh_frame))
*(.rodata*)
. = ALIGN(4);
_etext = .;
} > RAM /* FLASH */
.ARM.extab :
{
. = ALIGN(4);
*(.ARM.extab* .gnu.linkonce.armextab.*)
. = ALIGN(4);
} >RAM /* FLASH */
.ARM :
{
. = ALIGN(4);
__exidx_start = .;
*(.ARM.exidx* .gnu.linkonce.armexidx.*)
__exidx_end = .;
. = ALIGN(4);
} >RAM /* FLASH */
.interrupts_ram :
{
. = ALIGN(4);
__VECTOR_RAM__ = .;
__interrupts_ram_start__ = .;
. += M_VECTOR_RAM_SIZE;
. = ALIGN(4);
__interrupts_ram_end = .;
} > RAM
_sidata = LOADADDR(.data);
.data : /* AT ( _sidata ) */
{
. = ALIGN(4);
_sdata = .;
*(.data*)
. = ALIGN(4);
_edata = .;
} > RAM /* RAM AT > FLASH */
.uninitialized (NOLOAD):
{
. = ALIGN(32);
__uninitialized_start = .;
*(.uninitialized)
KEEP(*(.keep.uninitialized))
. = ALIGN(32);
__uninitialized_end = .;
} > RAM
.bss :
{
. = ALIGN(4);
_sbss = .;
__bss_start__ = _sbss;
*(.bss*)
*(COMMON)
. = ALIGN(4);
_ebss = .;
__bss_end__ = _ebss;
} >RAM
.heap :
{
. = ALIGN(8);
PROVIDE ( end = . );
PROVIDE ( _end = . );
_heap_bottom = .;
. = . + _Min_Heap_Size;
_heap_top = .;
. = . + _Min_Stack_Size;
. = ALIGN(8);
} >RAM
/* Set stack top to end of RAM, and stack limit move down by
* size of stack_dummy section */
__StackTop = ORIGIN(RAM) + LENGTH(RAM);
__StackLimit = __StackTop - _Min_Stack_Size;
PROVIDE(__stack = __StackTop);
/* Check if data + heap + stack exceeds RAM limit */
ASSERT(__StackLimit >= _heap_top, "region RAM overflowed with stack")
}

View File

@ -1,114 +0,0 @@
/*
* FreeRTOS V202112.00
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
typedef unsigned int uint32_t;
extern int main();
extern uint32_t _estack, _sidata, _sdata, _edata, _sbss, _ebss;
/* Prevent optimization so gcc does not replace code with memcpy */
__attribute__( ( optimize( "O0" ) ) )
__attribute__( ( naked ) )
void Reset_Handler( void )
{
/* set stack pointer */
__asm volatile ( "ldr r0, =_estack" );
__asm volatile ( "mov sp, r0" );
/* copy .data section from flash to RAM */
// Not needed for this example, see linker script
// for( uint32_t * src = &_sidata, * dest = &_sdata; dest < &_edata; )
// {
// *dest++ = *src++;
// }
/* zero out .bss section */
for( uint32_t * dest = &_sbss; dest < &_ebss; )
{
*dest++ = 0;
}
/* jump to board initialisation */
void _start( void );
_start();
}
const uint32_t * isr_vector[] __attribute__( ( section( ".isr_vector" ) ) ) =
{
( uint32_t * ) &_estack,
( uint32_t * ) &Reset_Handler, /* Reset -15 */
0, /* NMI_Handler -14 */
0, /* HardFault_Handler -13 */
0, /* MemManage_Handler -12 */
0, /* BusFault_Handler -11 */
0, /* UsageFault_Handler -10 */
0, /* reserved */
0, /* reserved */
0, /* reserved */
0, /* reserved -6 */
0, /* SVC_Handler -5 */
0, /* DebugMon_Handler -4 */
0, /* reserved */
0, /* PendSV handler -2 */
0, /* SysTick_Handler -1 */
0, /* uart0 receive 0 */
0, /* uart0 transmit */
0, /* uart1 receive */
0, /* uart1 transmit */
0, /* uart 2 receive */
0, /* uart 2 transmit */
0, /* GPIO 0 combined interrupt */
0, /* GPIO 2 combined interrupt */
0, /* Timer 0 */
0, /* Timer 1 */
0, /* Dial Timer */
0, /* SPI0 SPI1 */
0, /* uart overflow 1, 2,3 */
0, /* Ethernet 13 */
};
__attribute__( ( naked ) ) void exit(__attribute__((unused)) int status )
{
/* Force qemu to exit using ARM Semihosting */
__asm volatile (
"mov r1, r0\n"
"cmp r1, #0\n"
"bne .notclean\n"
"ldr r1, =0x20026\n" /* ADP_Stopped_ApplicationExit, a clean exit */
".notclean:\n"
"movs r0, #0x18\n" /* SYS_EXIT */
"bkpt 0xab\n"
"end: b end\n"
);
}
void _start( void )
{
main( );
exit( 0 );
}

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
cd "$parent_path"
[ -n "$1" -a "$1" != "+" -a -z "$KERNEL" ] && export KERNEL="$1"
[ -n "$2" -a "$2" != "+" -a -z "$FUZZ_MAIN" ] && export FUZZ_MAIN="$2"
[ -n "$3" -a "$3" != "+" -a -z "$FUZZ_INPUT" ] && export FUZZ_INPUT="$3"
[ -n "$4" -a "$4" != "+" -a -z "$FUZZ_INPUT_LEN" ] && export FUZZ_INPUT_LEN="$4"
[ -n "$5" -a "$5" != "+" -a -z "$BREAKPOINT" ] && export BREAKPOINT="$5"
[ -n "$6" -a "$6" != "+" -a -z "$FUZZ_ITERS" ] && export FUZZ_ITERS="$6"
[ -n "$7" -a "$7" != "+" -a -z "$TIME_DUMP" ] && export TIME_DUMP="$7"
[ -n "$8" -a "$8" != "+" -a -z "$CASE_DUMP" ] && export CASE_DUMP="$8"
[ -n "$9" -a "$9" != "+" -a -z "$DO_SHOWMAP" ] && export DO_SHOWMAP="$9"
[ -n "${10}" -a "${10}" != "+" -a -z "$SHOWMAP_TEXTINPUT" ] && export SHOWMAP_TEXTINPUT="${10}"
[ -n "${11}" -a "${11}" != "+" -a -z "$TRACE_DUMP" ] && export TRACE_DUMP="${11}"
[ -z "$FUZZER" ] && export FUZZER=target/debug/fret
set +e
$FUZZER -icount shift=4,align=off,sleep=off -machine mps2-an385 -monitor null -kernel $KERNEL -serial null -nographic -S -semihosting --semihosting-config enable=on,target=native -snapshot -drive if=none,format=qcow2,file=dummy.qcow2
if [ "$exitcode" = "101" ]
then
exit 101
else
exit 0
fi

View File

@ -1,344 +0,0 @@
use hashbrown::{hash_map::Entry, HashMap};
use libafl::{
bolts::{
current_nanos,
rands::StdRand,
tuples::{tuple_list},
},
executors::{ExitKind},
fuzzer::{StdFuzzer},
inputs::{BytesInput, HasTargetBytes},
observers::{Observer,VariableMapObserver},
state::{StdState, HasNamedMetadata},
Error,
observers::ObserversTuple, prelude::UsesInput, impl_serdeany,
};
use serde::{Deserialize, Serialize};
use std::{cell::UnsafeCell, cmp::max, env, fs::OpenOptions, io::Write, time::Instant};
use libafl::bolts::tuples::Named;
use libafl_qemu::{
emu,
emu::Emulator,
executor::QemuExecutor,
helper::{QemuHelper, QemuHelperTuple, QemuInstrumentationFilter},
};
use libafl::events::EventFirer;
use libafl::state::HasClientPerfMonitor;
use libafl::inputs::Input;
use libafl::feedbacks::Feedback;
use libafl::SerdeAny;
use libafl::state::HasMetadata;
use libafl::corpus::testcase::Testcase;
use core::{fmt::Debug, time::Duration};
// use libafl::feedbacks::FeedbackState;
// use libafl::state::HasFeedbackStates;
use libafl::bolts::tuples::MatchName;
use std::time::{SystemTime, UNIX_EPOCH};
pub static mut FUZZ_START_TIMESTAMP : SystemTime = UNIX_EPOCH;
//========== Metadata
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
pub struct QemuIcountMetadata {
runtime: u64,
}
/// Metadata for [`QemuClockIncreaseFeedback`]
#[derive(Debug, Serialize, Deserialize, SerdeAny)]
pub struct MaxIcountMetadata {
pub max_icount_seen: u64,
pub name: String,
}
// impl FeedbackState for MaxIcountMetadata
// {
// fn reset(&mut self) -> Result<(), Error> {
// self.max_icount_seen = 0;
// Ok(())
// }
// }
impl Named for MaxIcountMetadata
{
#[inline]
fn name(&self) -> &str {
self.name.as_str()
}
}
impl MaxIcountMetadata
{
/// Create new `MaxIcountMetadata`
#[must_use]
pub fn new(name: &'static str) -> Self {
Self {
max_icount_seen: 0,
name: name.to_string(),
}
}
}
impl Default for MaxIcountMetadata {
fn default() -> Self {
Self::new("MaxClock")
}
}
/// A piece of metadata tracking all icounts
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
pub struct IcHist (pub Vec<(u64, u128)>, pub (u64,u128));
//========== Observer
/// A simple observer, just overlooking the runtime of the target.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct QemuClockObserver {
name: String,
start_tick: u64,
end_tick: u64,
}
impl QemuClockObserver {
/// Creates a new [`QemuClockObserver`] with the given name.
#[must_use]
pub fn new(name: &'static str) -> Self {
Self {
name: name.to_string(),
start_tick: 0,
end_tick: 0,
}
}
/// Gets the runtime for the last execution of this target.
#[must_use]
pub fn last_runtime(&self) -> u64 {
self.end_tick - self.start_tick
}
}
impl<S> Observer<S> for QemuClockObserver
where
S: UsesInput + HasMetadata,
{
fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
// Only remember the pre-run ticks if presistent mode ist used
#[cfg(not(feature = "snapshot_restore"))]
unsafe {
self.start_tick=emu::icount_get_raw();
self.end_tick=self.start_tick;
}
// unsafe {
// println!("clock pre {}",emu::icount_get_raw());
// }
Ok(())
}
fn post_exec(&mut self, _state: &mut S, _input: &S::Input, _exit_kind: &ExitKind) -> Result<(), Error> {
unsafe { self.end_tick = emu::icount_get_raw() };
// println!("clock post {}", self.end_tick);
// println!("Number of Ticks: {} <- {} {}",self.end_tick - self.start_tick, self.end_tick, self.start_tick);
let metadata =_state.metadata_mut();
let hist = metadata.get_mut::<IcHist>();
let timestamp = SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis();
match hist {
None => {
metadata.insert(IcHist(vec![(self.end_tick - self.start_tick, timestamp)],
(self.end_tick - self.start_tick, timestamp)));
}
Some(v) => {
v.0.push((self.end_tick - self.start_tick, timestamp));
if (v.1.0 < self.end_tick-self.start_tick) {
v.1 = (self.end_tick - self.start_tick, timestamp);
}
if v.0.len() >= 100 {
if let Ok(td) = env::var("TIME_DUMP") {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.append(true)
.open(td).expect("Could not open timedump");
let newv : Vec<(u64, u128)> = Vec::with_capacity(100);
for i in std::mem::replace(&mut v.0, newv).into_iter() {
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
}
} else {
// If we don't write out values we don't need to remember them at all
v.0.clear();
}
}
}
}
Ok(())
}
}
impl Named for QemuClockObserver {
#[inline]
fn name(&self) -> &str {
&self.name
}
}
impl Default for QemuClockObserver {
fn default() -> Self {
Self {
name: String::from("clock"),
start_tick: 0,
end_tick: 0,
}
}
}
//========== Feedback
/// Nop feedback that annotates execution time in the new testcase, if any
/// for this Feedback, the testcase is never interesting (use with an OR).
/// It decides, if the given [`QemuClockObserver`] value of a run is interesting.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ClockTimeFeedback {
exec_time: Option<Duration>,
name: String,
}
impl<S> Feedback<S> for ClockTimeFeedback
where
S: UsesInput + HasClientPerfMonitor + HasMetadata,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &S::Input,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
{
// TODO Replace with match_name_type when stable
let observer = observers.match_name::<QemuClockObserver>(self.name()).unwrap();
self.exec_time = Some(Duration::from_nanos(observer.last_runtime() << 4)); // Assume a somewhat realistic multiplier of clock, it does not matter
Ok(false)
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata(
&mut self,
_state: &mut S,
testcase: &mut Testcase<S::Input>,
) -> Result<(), Error> {
*testcase.exec_time_mut() = self.exec_time;
self.exec_time = None;
Ok(())
}
/// Discard the stored metadata in case that the testcase is not added to the corpus
#[inline]
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
self.exec_time = None;
Ok(())
}
}
impl Named for ClockTimeFeedback {
#[inline]
fn name(&self) -> &str {
self.name.as_str()
}
}
impl ClockTimeFeedback {
/// Creates a new [`ClockFeedback`], deciding if the value of a [`QemuClockObserver`] with the given `name` of a run is interesting.
#[must_use]
pub fn new(name: &'static str) -> Self {
Self {
exec_time: None,
name: name.to_string(),
}
}
/// Creates a new [`ClockFeedback`], deciding if the given [`QemuClockObserver`] value of a run is interesting.
#[must_use]
pub fn new_with_observer(observer: &QemuClockObserver) -> Self {
Self {
exec_time: None,
name: observer.name().to_string(),
}
}
}
/// A [`Feedback`] rewarding increasing the execution cycles on Qemu.
#[derive(Debug)]
pub struct QemuClockIncreaseFeedback {
name: String,
}
impl<S> Feedback<S> for QemuClockIncreaseFeedback
where
S: UsesInput + HasNamedMetadata + HasClientPerfMonitor + Debug,
{
fn is_interesting<EM, OT>(
&mut self,
state: &mut S,
_manager: &mut EM,
_input: &S::Input,
_observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
{
let observer = _observers.match_name::<QemuClockObserver>("clock")
.expect("QemuClockObserver not found");
let clock_state = state
.named_metadata_mut()
.get_mut::<MaxIcountMetadata>(&self.name)
.unwrap();
if observer.last_runtime() > clock_state.max_icount_seen {
// println!("Clock improving {}",observer.last_runtime());
clock_state.max_icount_seen = observer.last_runtime();
return Ok(true);
}
Ok(false)
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
// testcase.metadata_mut().insert(QemuIcountMetadata{runtime: self.last_runtime});
Ok(())
}
/// Discard the stored metadata in case that the testcase is not added to the corpus
#[inline]
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
Ok(())
}
}
impl Named for QemuClockIncreaseFeedback {
#[inline]
fn name(&self) -> &str {
&self.name
}
}
impl QemuClockIncreaseFeedback {
/// Creates a new [`HitFeedback`]
#[must_use]
pub fn new(name: &'static str) -> Self {
Self {name: String::from(name)}
}
}
impl Default for QemuClockIncreaseFeedback {
fn default() -> Self {
Self::new("MaxClock")
}
}

View File

@ -1,715 +0,0 @@
//! A fuzzer using qemu in systemmode for binary-only coverage of kernels
//!
use core::time::Duration;
use std::{env, path::PathBuf, process::{self, abort}, io::{Read, Write}, fs::{self, OpenOptions}, cmp::{min, max}, mem::transmute_copy, collections::btree_map::Range};
use libafl::{
bolts::{
core_affinity::Cores,
current_nanos,
launcher::Launcher,
rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider},
tuples::tuple_list,
AsSlice,
},
corpus::{Corpus, InMemoryCorpus, OnDiskCorpus},
events::EventConfig,
executors::{ExitKind, TimeoutExecutor},
feedback_or,
feedback_or_fast,
feedbacks::{CrashFeedback, MaxMapFeedback, TimeoutFeedback},
fuzzer::{Fuzzer, StdFuzzer},
inputs::{BytesInput, HasTargetBytes},
monitors::MultiMonitor,
observers::{VariableMapObserver},
schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler},
state::{HasCorpus, StdState, HasMetadata, HasNamedMetadata},
Error,
prelude::{SimpleMonitor, SimpleEventManager, AsMutSlice, RandBytesGenerator, Generator, SimpleRestartingEventManager, HasBytesVec, minimizer::TopRatedsMetadata, havoc_mutations, StdScheduledMutator, HitcountsMapObserver}, Evaluator, stages::StdMutationalStage,
};
use libafl_qemu::{
edges, edges::QemuEdgeCoverageHelper, elf::EasyElf, emu::Emulator, GuestPhysAddr, QemuExecutor,
QemuHooks, Regs, QemuInstrumentationFilter, GuestAddr,
emu::libafl_qemu_set_native_breakpoint, emu::libafl_qemu_remove_native_breakpoint,
};
use rand::{SeedableRng, StdRng, Rng};
use crate::{
clock::{QemuClockObserver, ClockTimeFeedback, QemuClockIncreaseFeedback, IcHist, FUZZ_START_TIMESTAMP},
qemustate::QemuStateRestoreHelper,
systemstate::{helpers::QemuSystemStateHelper, observers::QemuSystemStateObserver, feedbacks::{DumpSystraceFeedback, NovelSystemStateFeedback}, graph::{SysMapFeedback, SysGraphFeedbackState, GraphMaximizerCorpusScheduler}, schedulers::{LongestTraceScheduler, GenerationScheduler}}, worst::{TimeMaximizerCorpusScheduler, ExecTimeIncFeedback, TimeStateMaximizerCorpusScheduler, AlwaysTrueFeedback},
mutational::MyStateStage,
mutational::{MINIMUM_INTER_ARRIVAL_TIME},
};
use std::time::{SystemTime, UNIX_EPOCH};
pub static mut RNG_SEED: u64 = 1;
pub static mut LIMIT : u32 = u32::MAX;
pub const MAX_NUM_INTERRUPT: usize = 32;
pub const DO_NUM_INTERRUPT: usize = 32;
pub static mut MAX_INPUT_SIZE: usize = 32;
/// Read ELF program headers to resolve physical load addresses.
fn virt2phys(vaddr: GuestPhysAddr, tab: &EasyElf) -> GuestPhysAddr {
let ret;
for i in &tab.goblin().program_headers {
if i.vm_range().contains(&vaddr.try_into().unwrap()) {
ret = vaddr - TryInto::<GuestPhysAddr>::try_into(i.p_vaddr).unwrap()
+ TryInto::<GuestPhysAddr>::try_into(i.p_paddr).unwrap();
return ret - (ret % 2);
}
}
return vaddr;
}
extern "C" {
static mut libafl_interrupt_offsets : [u32; 32];
static mut libafl_num_interrupts : usize;
}
pub fn fuzz() {
unsafe {FUZZ_START_TIMESTAMP = SystemTime::now();}
let mut starttime = std::time::Instant::now();
if let Ok(s) = env::var("FUZZ_SIZE") {
str::parse::<usize>(&s).expect("FUZZ_SIZE was not a number");
};
// Hardcoded parameters
let timeout = Duration::from_secs(10);
let broker_port = 1337;
let cores = Cores::from_cmdline("1").unwrap();
let corpus_dirs = [PathBuf::from("./corpus")];
let objective_dir = PathBuf::from("./crashes");
let mut elf_buffer = Vec::new();
let elf = EasyElf::from_file(
env::var("KERNEL").expect("KERNEL env not set"),
&mut elf_buffer,
)
.unwrap();
// the main address where the fuzzer starts
// if this is set for freeRTOS it has an influence on where the data will have to be written,
// since the startup routine copies the data segemnt to it's virtual address
let main_addr = elf
.resolve_symbol(&env::var("FUZZ_MAIN").unwrap_or_else(|_| "FUZZ_MAIN".to_owned()), 0);
if let Some(main_addr) = main_addr {
println!("main address = {:#x}", main_addr);
}
let input_addr = elf
.resolve_symbol(
&env::var("FUZZ_INPUT").unwrap_or_else(|_| "FUZZ_INPUT".to_owned()),
0,
)
.expect("Symbol or env FUZZ_INPUT not found") as GuestPhysAddr;
let input_addr = virt2phys(input_addr,&elf) as GuestPhysAddr;
println!("FUZZ_INPUT @ {:#x}", input_addr);
let test_length_ptr = elf
.resolve_symbol("FUZZ_LENGTH", 0).map(|x| x as GuestPhysAddr);
let test_length_ptr = Option::map_or(test_length_ptr, None, |x| Some(virt2phys(x,&elf)));
let input_counter_ptr = elf
.resolve_symbol(&env::var("FUZZ_POINTER").unwrap_or_else(|_| "FUZZ_POINTER".to_owned()), 0)
.map(|x| x as GuestPhysAddr);
let input_counter_ptr = Option::map_or(input_counter_ptr, None, |x| Some(virt2phys(x,&elf)));
#[cfg(feature = "systemstate")]
let curr_tcb_pointer = elf // loads to the address specified in elf, without respecting program headers
.resolve_symbol("pxCurrentTCB", 0)
.expect("Symbol pxCurrentTCBC not found");
// let curr_tcb_pointer = virt2phys(curr_tcb_pointer,&elf);
#[cfg(feature = "systemstate")]
println!("TCB pointer at {:#x}", curr_tcb_pointer);
#[cfg(feature = "systemstate")]
let task_queue_addr = elf
.resolve_symbol("pxReadyTasksLists", 0)
.expect("Symbol pxReadyTasksLists not found");
// let task_queue_addr = virt2phys(task_queue_addr,&elf.goblin());
#[cfg(feature = "systemstate")]
println!("Task Queue at {:#x}", task_queue_addr);
#[cfg(feature = "systemstate")]
let svh = elf
.resolve_symbol("xPortPendSVHandler", 0)
.expect("Symbol xPortPendSVHandler not found");
// let svh=virt2phys(svh, &elf);
// let svh = elf
// .resolve_symbol("vPortEnterCritical", 0)
// .expect("Symbol vPortEnterCritical not found");
#[cfg(feature = "systemstate")]
let app_start = elf
.resolve_symbol("__APP_CODE_START__", 0)
.expect("Symbol __APP_CODE_START__ not found");
#[cfg(feature = "systemstate")]
let app_end = elf
.resolve_symbol("__APP_CODE_END__", 0)
.expect("Symbol __APP_CODE_END__ not found");
#[cfg(feature = "systemstate")]
let app_range = app_start..app_end;
#[cfg(feature = "systemstate")]
dbg!(app_range.clone());
let breakpoint = elf
.resolve_symbol(
&env::var("BREAKPOINT").unwrap_or_else(|_| "BREAKPOINT".to_owned()),
0,
)
.expect("Symbol or env BREAKPOINT not found");
println!("Breakpoint address = {:#x}", breakpoint);
unsafe {
libafl_num_interrupts = 0;
}
if let Ok(input_len) = env::var("FUZZ_INPUT_LEN") {
unsafe {MAX_INPUT_SIZE = str::parse::<usize>(&input_len).expect("FUZZ_INPUT_LEN was not a number");}
}
unsafe {dbg!(MAX_INPUT_SIZE);}
if let Ok(seed) = env::var("SEED_RANDOM") {
unsafe {RNG_SEED = str::parse::<u64>(&seed).expect("SEED_RANDOM must be an integer.");}
}
let mut run_client = |state: Option<_>, mut mgr, _core_id| {
// Initialize QEMU
let args: Vec<String> = env::args().collect();
let env: Vec<(String, String)> = env::vars().collect();
let emu = Emulator::new(&args, &env);
if let Some(main_addr) = main_addr {
unsafe {
libafl_qemu_set_native_breakpoint(main_addr);
emu.run();
libafl_qemu_remove_native_breakpoint(main_addr);
}
}
unsafe { libafl_qemu_set_native_breakpoint(breakpoint); }// BREAKPOINT
// The wrapped harness function, calling out to the LLVM-style harness
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let mut buf = target.as_slice();
let mut len = buf.len();
unsafe {
#[cfg(feature = "fuzz_int")]
{
let mut start_tick : u32 = 0;
for i in 0..DO_NUM_INTERRUPT {
let mut t : [u8; 4] = [0,0,0,0];
if len > (i+1)*4 {
for j in 0 as usize..4 as usize {
t[j]=buf[i*4+j];
}
if i == 0 || true {
unsafe {start_tick = u32::from_le_bytes(t) % LIMIT;}
} else {
start_tick = u32::saturating_add(start_tick,max(MINIMUM_INTER_ARRIVAL_TIME,u32::from_le_bytes(t)));
}
libafl_interrupt_offsets[i] = start_tick;
libafl_num_interrupts = i+1;
}
}
if buf.len() > libafl_num_interrupts*4 {
buf = &buf[libafl_num_interrupts*4..];
len = buf.len();
}
// println!("Load: {:?}", libafl_interrupt_offsets[0..libafl_num_interrupts].to_vec());
}
if len > MAX_INPUT_SIZE {
buf = &buf[0..MAX_INPUT_SIZE];
len = MAX_INPUT_SIZE;
}
emu.write_phys_mem(input_addr, buf);
if let Some(s) = test_length_ptr {
emu.write_phys_mem(s as u64, &len.to_le_bytes())
}
emu.run();
// If the execution stops at any point other then the designated breakpoint (e.g. a breakpoint on a panic method) we consider it a crash
let mut pcs = (0..emu.num_cpus())
.map(|i| emu.cpu_from_index(i))
.map(|cpu| -> Result<u32, String> { cpu.read_reg(Regs::Pc) });
match pcs
.find(|pc| (breakpoint..breakpoint + 5).contains(pc.as_ref().unwrap_or(&0)))
{
Some(_) => ExitKind::Ok,
None => ExitKind::Crash,
}
}
};
// Create an observation channel using the coverage map
let edges = unsafe { &mut edges::EDGES_MAP };
let edges_counter = unsafe { &mut edges::MAX_EDGES_NUM };
let edges_observer = VariableMapObserver::new("edges", edges, edges_counter);
#[cfg(feature = "observer_hitcounts")]
let edges_observer = HitcountsMapObserver::new(edges_observer);
// Create an observation channel to keep track of the execution time
let clock_time_observer = QemuClockObserver::new("clocktime");
let systemstate_observer = QemuSystemStateObserver::new();
// Feedback to rate the interestingness of an input
// This one is composed by two Feedbacks in OR
let mut feedback = feedback_or!(
// Time feedback, this one does not need a feedback state
ClockTimeFeedback::new_with_observer(&clock_time_observer)
);
#[cfg(feature = "feed_genetic")]
let mut feedback = feedback_or!(
feedback,
AlwaysTrueFeedback::new()
);
#[cfg(feature = "feed_afl")]
let mut feedback = feedback_or!(
feedback,
// New maximization map feedback linked to the edges observer and the feedback state
MaxMapFeedback::new_tracking(&edges_observer, true, true)
);
#[cfg(feature = "feed_longest")]
let mut feedback = feedback_or!(
// afl feedback needs to be activated first for MapIndexesMetadata
feedback,
// Feedback to reward any input which increses the execution time
ExecTimeIncFeedback::new()
);
#[cfg(all(feature = "systemstate",not(any(feature = "feed_systemgraph",feature = "feed_systemtrace"))))]
let mut feedback = feedback_or!(
feedback,
DumpSystraceFeedback::with_dump(env::var("TRACE_DUMP").ok().map(PathBuf::from))
);
#[cfg(feature = "feed_systemtrace")]
let mut feedback = feedback_or!(
feedback,
// AlwaysTrueFeedback::new(),
NovelSystemStateFeedback::default()
);
#[cfg(feature = "feed_systemgraph")]
let mut feedback = feedback_or!(
feedback,
SysMapFeedback::default()
);
// A feedback to choose if an input is a solution or not
let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new());
// If not restarting, create a State from scratch
let mut state = state.unwrap_or_else(|| {
StdState::new(
// RNG
unsafe {StdRand::with_seed(RNG_SEED) },
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(objective_dir.clone()).unwrap(),
// States of the feedbacks.
// The feedbacks can report the data that should persist in the State.
&mut feedback,
// Same for objective feedbacks
&mut objective,
)
.unwrap()
});
// A minimization+queue policy to get testcasess from the corpus
#[cfg(not(any(feature = "feed_afl",feature = "feed_systemgraph",feature = "feed_systemtrace", feature = "feed_genetic")))]
let scheduler = QueueScheduler::new();
#[cfg(all(feature = "feed_afl",not(any(feature = "feed_systemgraph",feature = "feed_systemtrace"))))]
let scheduler = TimeMaximizerCorpusScheduler::new(QueueScheduler::new());
#[cfg(feature = "feed_systemtrace")]
let scheduler = LongestTraceScheduler::new(TimeStateMaximizerCorpusScheduler::new(QueueScheduler::new()));
#[cfg(feature = "feed_systemgraph")]
let scheduler = GraphMaximizerCorpusScheduler::new(QueueScheduler::new());
#[cfg(feature = "feed_genetic")]
let scheduler = GenerationScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
#[cfg(not(feature = "systemstate"))]
let qhelpers = tuple_list!(
QemuEdgeCoverageHelper::default(),
QemuStateRestoreHelper::new()
);
#[cfg(feature = "systemstate")]
let qhelpers = tuple_list!(
QemuEdgeCoverageHelper::default(),
QemuStateRestoreHelper::new(),
QemuSystemStateHelper::new(svh,curr_tcb_pointer,task_queue_addr,input_counter_ptr,app_range.clone())
);
let mut hooks = QemuHooks::new(&emu,qhelpers);
#[cfg(not(feature = "systemstate"))]
let observer_list = tuple_list!(edges_observer, clock_time_observer);
#[cfg(feature = "systemstate")]
let observer_list = tuple_list!(edges_observer, clock_time_observer, systemstate_observer);
// Create a QEMU in-process executor
let executor = QemuExecutor::new(
&mut hooks,
&mut harness,
observer_list,
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create QemuExecutor");
// Wrap the executor to keep track of the timeout
let mut executor = TimeoutExecutor::new(executor, timeout);
let mutations = havoc_mutations();
// Setup an havoc mutator with a mutational stage
let mutator = StdScheduledMutator::new(mutations);
// #[cfg(not(all(feature = "feed_systemtrace", feature = "fuzz_int")))]
// let mut stages = tuple_list!(StdMutationalStage::new(mutator));
// #[cfg(all(feature = "feed_systemtrace", feature = "fuzz_int"))]
#[cfg(feature = "fuzz_int")]
let mut stages = tuple_list!(StdMutationalStage::new(mutator),MyStateStage::new());
#[cfg(not(feature = "fuzz_int"))]
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
if env::var("DO_SHOWMAP").is_ok() {
let s = &env::var("DO_SHOWMAP").unwrap();
let show_input = if s=="-" {
let mut buf = Vec::<u8>::new();
std::io::stdin().read_to_end(&mut buf).expect("Could not read Stdin");
buf
} else if s=="$" {
env::var("SHOWMAP_TEXTINPUT").expect("SHOWMAP_TEXTINPUT not set").as_bytes().to_owned()
} else {
fs::read(s).expect("Input file for DO_SHOWMAP can not be read")
};
fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, BytesInput::new(show_input))
.unwrap();
if let Ok(td) = env::var("TIME_DUMP") {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.append(true)
.open(td).expect("Could not open timedump");
if let Some(ichist) = state.metadata_mut().get_mut::<IcHist>() {
for i in ichist.0.drain(..) {
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
}
}
}
} else {
if let Ok(_) = env::var("SEED_RANDOM") {
unsafe {
let mut rng = StdRng::seed_from_u64(RNG_SEED);
for i in 0..100 {
let inp = BytesInput::new(vec![rng.gen::<u8>(); MAX_INPUT_SIZE]);
fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, inp).unwrap();
}
}
}
else if let Ok(sf) = env::var("SEED_DIR") {
state
.load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &[PathBuf::from(&sf)])
.unwrap_or_else(|_| {
println!("Failed to load initial corpus at {:?}", &corpus_dirs);
process::exit(0);
});
println!("We imported {} inputs from seedfile.", state.corpus().count());
} else if state.corpus().count() < 1 {
state
.load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &corpus_dirs)
.unwrap_or_else(|_| {
println!("Failed to load initial corpus at {:?}", &corpus_dirs);
process::exit(0);
});
println!("We imported {} inputs from disk.", state.corpus().count());
}
match env::var("FUZZ_ITERS") {
Err(_) => {
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.unwrap();
},
Ok(t) => {
println!("Iterations {}",t);
let num = str::parse::<u64>(&t).expect("FUZZ_ITERS was not a number");
if let Ok(s) = env::var("FUZZ_RANDOM") { unsafe {
if s.contains("watersv2_int") {
println!("V2");
LIMIT=7000000;
} else {
println!("V1");
LIMIT=5000000;
}
println!("Random Fuzzing, ignore corpus");
// let mut generator = RandBytesGenerator::new(MAX_INPUT_SIZE);
let target_duration = Duration::from_secs(num);
let start_time = std::time::Instant::now();
let mut rng = StdRng::seed_from_u64(RNG_SEED);
while start_time.elapsed() < target_duration {
// let inp = generator.generate(&mut state).unwrap();
// libafl's generator is too slow
let inp = BytesInput::new(vec![rng.gen::<u8>(); MAX_INPUT_SIZE]);
fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, inp).unwrap();
}
}} else {
// fuzzer
// .fuzz_loop_for_duration(&mut stages, &mut executor, &mut state, &mut mgr, Duration::from_secs(num))
// .unwrap();
fuzzer
.fuzz_loop_until(&mut stages, &mut executor, &mut state, &mut mgr, starttime.checked_add(Duration::from_secs(num)).unwrap())
.unwrap();
#[cfg(feature = "run_until_saturation")]
{
{
let mut dumper = |marker : String| {
if let Ok(td) = env::var("TIME_DUMP") {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.append(true)
.open(td).expect("Could not open timedump");
if let Some(ichist) = state.metadata_mut().get_mut::<IcHist>() {
for i in ichist.0.drain(..) {
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
}
}
}
if let Ok(td) = env::var("CASE_DUMP") {
println!("Dumping worst case to {:?}", td);
let corpus = state.corpus();
let mut worst = Duration::new(0,0);
let mut worst_input = None;
for i in 0..corpus.count() {
let tc = corpus.get(i).expect("Could not get element from corpus").borrow();
if worst < tc.exec_time().expect("Testcase missing duration") {
worst_input = Some(tc.input().as_ref().unwrap().bytes().to_owned());
worst = tc.exec_time().expect("Testcase missing duration");
}
}
match worst_input {
Some(wi) => {
// let cd = format!("{}.case",&td);
let mut cd = td.clone();
cd.push_str(&marker);
fs::write(&cd,wi).expect("Failed to write worst corpus element");
},
None => (),
}
#[cfg(feature = "feed_systemgraph")]
{
let mut gd = String::from(&td);
gd.push_str(&format!(".graph{}", marker));
if let Some(md) = state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap") {
fs::write(&gd,ron::to_string(&md).expect("Failed to serialize graph")).expect("Failed to write graph");
}
}
{
let mut gd = String::from(&td);
if let Some(md) = state.metadata_mut().get_mut::<TopRatedsMetadata>() {
let mut uniq: Vec<usize> = md.map.values().map(|x| x.clone()).collect();
uniq.sort();
uniq.dedup();
gd.push_str(&format!(".{}.toprated{}", uniq.len(), marker));
fs::write(&gd,ron::to_string(&md.map).expect("Failed to serialize metadata")).expect("Failed to write graph");
}
}
}
};
dumper(format!(".iter_{}",t));
}
println!("Start running until saturation");
let mut last = state.metadata().get::<IcHist>().unwrap().1;
while SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis() < last.1 + Duration::from_secs(10800).as_millis() {
starttime=starttime.checked_add(Duration::from_secs(30)).unwrap();
fuzzer
.fuzz_loop_until(&mut stages, &mut executor, &mut state, &mut mgr, starttime)
.unwrap();
let after = state.metadata().get::<IcHist>().unwrap().1;
if after.0 > last.0 {
last=after;
}
if let Ok(td) = env::var("CASE_DUMP") {
println!("Dumping worst case to {:?}", td);
let corpus = state.corpus();
let mut worst = Duration::new(0,0);
let mut worst_input = None;
for i in 0..corpus.count() {
let tc = corpus.get(i).expect("Could not get element from corpus").borrow();
if worst < tc.exec_time().expect("Testcase missing duration") {
worst_input = Some(tc.input().as_ref().unwrap().bytes().to_owned());
worst = tc.exec_time().expect("Testcase missing duration");
}
}
match worst_input {
Some(wi) => {
// let cd = format!("{}.case",&td);
let cd = td.clone();
fs::write(&cd,wi).expect("Failed to write worst corpus element");
},
None => (),
}
#[cfg(feature = "feed_systemgraph")]
{
let mut gd = String::from(&td);
gd.push_str(".graph" );
if let Some(md) = state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap") {
fs::write(&gd,ron::to_string(&md).expect("Failed to serialize graph")).expect("Failed to write graph");
}
}
{
let mut gd = String::from(&td);
if let Some(md) = state.metadata_mut().get_mut::<TopRatedsMetadata>() {
let mut uniq: Vec<usize> = md.map.values().map(|x| x.clone()).collect();
uniq.sort();
uniq.dedup();
gd.push_str(&format!(".{}.toprated", uniq.len()));
fs::write(&gd,ron::to_string(&md.map).expect("Failed to serialize metadata")).expect("Failed to write graph");
}
}
}
}
}
}
if let Ok(td) = env::var("TIME_DUMP") {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.append(true)
.open(td).expect("Could not open timedump");
if let Some(ichist) = state.metadata_mut().get_mut::<IcHist>() {
for i in ichist.0.drain(..) {
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
}
}
}
if let Ok(td) = env::var("CASE_DUMP") {
println!("Dumping worst case to {:?}", td);
let corpus = state.corpus();
let mut worst = Duration::new(0,0);
let mut worst_input = None;
for i in 0..corpus.count() {
let tc = corpus.get(i).expect("Could not get element from corpus").borrow();
if worst < tc.exec_time().expect("Testcase missing duration") {
worst_input = Some(tc.input().as_ref().unwrap().bytes().to_owned());
worst = tc.exec_time().expect("Testcase missing duration");
}
}
match worst_input {
Some(wi) => {
// let cd = format!("{}.case",&td);
let cd = td.clone();
fs::write(&cd,wi).expect("Failed to write worst corpus element");
},
None => (),
}
#[cfg(feature = "feed_systemgraph")]
{
let mut gd = String::from(&td);
gd.push_str(".graph");
if let Some(md) = state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap") {
fs::write(&gd,ron::to_string(&md).expect("Failed to serialize graph")).expect("Failed to write graph");
}
}
{
let mut gd = String::from(&td);
if let Some(md) = state.metadata_mut().get_mut::<TopRatedsMetadata>() {
let mut uniq: Vec<usize> = md.map.values().map(|x| x.clone()).collect();
uniq.sort();
uniq.dedup();
gd.push_str(&format!(".{}.toprated", uniq.len()));
fs::write(&gd,ron::to_string(&md.map).expect("Failed to serialize metadata")).expect("Failed to write graph");
}
}
}
},
}
}
#[cfg(not(feature = "singlecore"))]
return Ok(());
};
// Special case where no fuzzing happens, but standard input is dumped
if let Ok(input_dump) = env::var("DUMP_SEED") {
// Initialize QEMU
let args: Vec<String> = env::args().collect();
let env: Vec<(String, String)> = env::vars().collect();
let emu = Emulator::new(&args, &env);
if let Some(main_addr) = main_addr {
unsafe { libafl_qemu_set_native_breakpoint(main_addr); }// BREAKPOINT
}
unsafe {
emu.run();
let mut buf = [0u8].repeat(MAX_INPUT_SIZE);
emu.read_phys_mem(input_addr, buf.as_mut_slice());
let dir = env::var("SEED_DIR").map_or("./corpus".to_string(), |x| x);
let filename = if input_dump == "" {"input"} else {&input_dump};
println!("Dumping input to: {}/{}",&dir,filename);
fs::write(format!("{}/{}",&dir,filename), buf).expect("could not write input dump");
}
return
}
#[cfg(feature = "singlecore")]
{
let monitor = SimpleMonitor::new(|s| println!("{}", s));
#[cfg(not(feature = "restarting"))]
{
let mgr = SimpleEventManager::new(monitor);
run_client(None, mgr, 0);
}
#[cfg(feature = "restarting")]
{
let mut shmem_provider = StdShMemProvider::new().unwrap();
let (state, mut mgr) = match SimpleRestartingEventManager::launch(monitor, &mut shmem_provider)
{
// The restarting state will spawn the same process again as child, then restarted it each time it crashes.
Ok(res) => res,
Err(err) => match err {
Error::ShuttingDown => {
return;
}
_ => {
panic!("Failed to setup the restarter: {}", err);
}
},
};
run_client(state, mgr, 0);
}
}
// else -> multicore
#[cfg(not(feature = "singlecore"))]
{
// The shared memory allocator
let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory");
// The stats reporter for the broker
let monitor = MultiMonitor::new(|s| println!("{}", s));
// Build and run a Launcher
match Launcher::builder()
.shmem_provider(shmem_provider)
.broker_port(broker_port)
.configuration(EventConfig::from_build_id())
.monitor(monitor)
.run_client(&mut run_client)
.cores(&cores)
// .stdout_file(Some("/dev/null"))
.build()
.launch()
{
Ok(()) => (),
Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."),
Err(err) => panic!("Failed to run launcher: {:?}", err),
}
}
}

View File

@ -1,13 +0,0 @@
#![feature(is_sorted)]
#[cfg(target_os = "linux")]
mod fuzzer;
#[cfg(target_os = "linux")]
mod clock;
#[cfg(target_os = "linux")]
mod qemustate;
#[cfg(target_os = "linux")]
pub mod systemstate;
#[cfg(target_os = "linux")]
mod mutational;
#[cfg(target_os = "linux")]
mod worst;

View File

@ -1,24 +0,0 @@
#![feature(is_sorted)]
//! A libfuzzer-like fuzzer using qemu for binary-only coverage
#[cfg(target_os = "linux")]
mod fuzzer;
#[cfg(target_os = "linux")]
mod clock;
#[cfg(target_os = "linux")]
mod qemustate;
#[cfg(target_os = "linux")]
mod systemstate;
#[cfg(target_os = "linux")]
mod worst;
#[cfg(target_os = "linux")]
mod mutational;
#[cfg(target_os = "linux")]
pub fn main() {
fuzzer::fuzz();
}
#[cfg(not(target_os = "linux"))]
pub fn main() {
panic!("qemu-user and libafl_qemu is only supported on linux!");
}

View File

@ -1,240 +0,0 @@
//| The [`MutationalStage`] is the default stage used during fuzzing.
//! For the current input, it will perform a range of random mutations, and then run them in the executor.
use core::marker::PhantomData;
use std::cmp::{max, min};
use libafl::{
bolts::rands::Rand,
corpus::{Corpus, self},
fuzzer::Evaluator,
mark_feature_time,
stages::{Stage},
start_timer,
state::{HasClientPerfMonitor, HasCorpus, HasRand, UsesState, HasMetadata},
Error, prelude::{HasBytesVec, UsesInput, new_hash_feedback, StdRand, RandomSeed, MutationResult, Mutator},
};
use crate::{systemstate::{FreeRTOSSystemStateMetadata, RefinedFreeRTOSSystemState}, fuzzer::DO_NUM_INTERRUPT, clock::IcHist};
pub const MINIMUM_INTER_ARRIVAL_TIME : u32 = 700 * 1000 * (1 << 4);
//======================= Custom mutator
/// The default mutational stage
#[derive(Clone, Debug, Default)]
pub struct MyStateStage<E, EM, Z> {
#[allow(clippy::type_complexity)]
phantom: PhantomData<(E, EM, Z)>,
}
impl<E, EM, Z> MyStateStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: HasClientPerfMonitor + HasCorpus + HasRand,
{
pub fn new() -> Self {
Self { phantom: PhantomData }
}
}
impl<E, EM, Z> Stage<E, EM, Z> for MyStateStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata,
<Z::State as UsesInput>::Input: HasBytesVec
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut Self::State,
manager: &mut EM,
corpus_idx: usize,
) -> Result<(), Error> {
let mut _input = state
.corpus()
.get(corpus_idx)?
.borrow_mut().clone();
let mut newinput = _input.input_mut().as_mut().unwrap().clone();
// let mut tmpinput = _input.input_mut().as_mut().unwrap().clone();
let mut do_rerun = false;
{
// need our own random generator, because borrowing rules
let mut myrand = StdRand::new();
let mut target_bytes : Vec<u8> = vec![];
{
let input = _input.input_mut().as_ref().unwrap();
let tmp = &mut state.rand_mut();
myrand.set_seed(tmp.next());
target_bytes = input.bytes().to_vec();
}
// produce a slice of absolute interrupt times
let mut interrupt_offsets : [u32; 32] = [0u32; 32];
let mut num_interrupts : usize = 0;
{
let mut start_tick : u32 = 0;
for i in 0..DO_NUM_INTERRUPT {
let mut t : [u8; 4] = [0,0,0,0];
if target_bytes.len() > (i+1)*4 {
for j in 0 as usize..4 as usize {
t[j]=target_bytes[i*4+j];
}
if i == 0 || true {
start_tick = u32::from_le_bytes(t);
} else {
start_tick = u32::saturating_add(start_tick,max(MINIMUM_INTER_ARRIVAL_TIME,u32::from_le_bytes(t)));
}
interrupt_offsets[i] = start_tick;
num_interrupts = i+1;
}
}
}
interrupt_offsets.sort();
// println!("Vor Mutator: {:?}", interrupt_offsets[0..num_interrupts].to_vec());
// let num_i = min(target_bytes.len() / 4, DO_NUM_INTERRUPT);
let mut suffix = target_bytes.split_off(4 * num_interrupts);
let mut prefix : Vec<[u8; 4]> = vec![];
// let mut suffix : Vec<u8> = vec![];
#[cfg(feature = "feed_systemtrace")]
{
let tmp = _input.metadata().get::<FreeRTOSSystemStateMetadata>();
if tmp.is_some() {
let trace = tmp.expect("FreeRTOSSystemStateMetadata not found");
// calculate hits and identify snippets
let mut last_m = false;
let mut marks : Vec<(&RefinedFreeRTOSSystemState, usize, usize)>= vec![]; // 1: got interrupted, 2: interrupt handler
for i in 0..trace.inner.len() {
let curr = &trace.inner[i];
let m = interrupt_offsets[0..num_interrupts].iter().any(|x| (curr.start_tick..curr.end_tick).contains(&(*x as u64)));
if m {
marks.push((curr, i, 1));
// println!("1: {}",curr.current_task.task_name);
} else if last_m {
marks.push((curr, i, 2));
// println!("2: {}",curr.current_task.task_name);
} else {
marks.push((curr, i, 0));
}
last_m = m;
}
for i in 0..num_interrupts {
// bounds based on minimum inter-arrival time
let mut lb = 0;
let mut ub : u32 = marks[marks.len()-1].0.end_tick.try_into().expect("ticks > u32");
if i > 0 {
lb = u32::saturating_add(interrupt_offsets[i-1],MINIMUM_INTER_ARRIVAL_TIME);
}
if i < num_interrupts-1 {
ub = u32::saturating_sub(interrupt_offsets[i+1],MINIMUM_INTER_ARRIVAL_TIME);
}
// get old hit and handler
let old_hit = marks.iter().filter(
|x| x.0.start_tick < (interrupt_offsets[i] as u64) && (interrupt_offsets[i] as u64) < x.0.end_tick
).next();
let old_handler = match old_hit {
Some(s) => if s.1 < num_interrupts-1 && s.1 < marks.len()-1 {
Some(marks[s.1+1])
} else {None},
None => None
};
// find reachable alternatives
let alternatives : Vec<_> = marks.iter().filter(|x|
x.2 != 2 &&
(
x.0.start_tick < (lb as u64) && (lb as u64) < x.0.end_tick
|| x.0.start_tick < (ub as u64) && (ub as u64) < x.0.end_tick )
).collect();
// in cases there are no alternatives
if alternatives.len() == 0 {
if old_hit.is_none() {
// choose something random
let untouched : Vec<_> = marks.iter().filter(
|x| x.2 == 0
).collect();
if untouched.len() > 0 {
let tmp = interrupt_offsets[i];
let choice = myrand.choose(untouched);
interrupt_offsets[i] = myrand.between(choice.0.start_tick, choice.0.end_tick)
.try_into().expect("tick > u32");
do_rerun = true;
}
// println!("no alternatives, choose random i: {} {} -> {}",i,tmp,interrupt_offsets[i]);
continue;
} else {
// do nothing
// println!("no alternatives, do nothing i: {} {}",i,interrupt_offsets[i]);
continue;
}
}
let replacement = myrand.choose(alternatives);
if (old_hit.map_or(false, |x| x == replacement)) {
// use the old value
// println!("chose old value, do nothing i: {} {}",i,interrupt_offsets[i]);
continue;
} else {
let extra = if (old_hit.map_or(false, |x| x.1 < replacement.1)) {
// move futher back, respect old_handler
old_handler.map_or(0, |x| x.0.end_tick - x.0.start_tick)
} else { 0 };
let tmp = interrupt_offsets[i];
interrupt_offsets[i] = (myrand.between(replacement.0.start_tick,
replacement.0.end_tick) + extra).try_into().expect("ticks > u32");
// println!("chose new alternative, i: {} {} -> {}",i,tmp, interrupt_offsets[i]);
do_rerun = true;
}
}
let mut numbers : Vec<u32> = interrupt_offsets[0..num_interrupts].to_vec();
numbers.sort();
// println!("Mutator: {:?}", numbers);
let mut start : u32 = 0;
// for i in 0..numbers.len() {
// let tmp = numbers[i];
// numbers[i] = numbers[i]-start;
// start = tmp;
// }
for i in 0..numbers.len() {
prefix.push(u32::to_le_bytes(numbers[i]));
}
}
}
#[cfg(not(feature = "feed_systemtrace"))]
{
let metadata = state.metadata();
let hist = metadata.get::<IcHist>().unwrap();
let maxtick : u64 = hist.1.0;
// let maxtick : u64 = (_input.exec_time().expect("No duration found").as_nanos() >> 4).try_into().unwrap();
let mut numbers : Vec<u32> = vec![];
for i in 0..num_interrupts {
prefix.push(u32::to_le_bytes(myrand.between(0, min(maxtick, u32::MAX as u64)).try_into().expect("ticks > u32")));
}
}
let mut n : Vec<u8> = vec![];
n = [prefix.concat(), suffix].concat();
newinput.bytes_mut().clear();
newinput.bytes_mut().append(&mut n);
}
// InterruptShifterMutator::mutate(&mut mymut, state, &mut input, 0)?;
if do_rerun {
let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, newinput)?;
}
Ok(())
}
}
impl<E, EM, Z> UsesState for MyStateStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: HasClientPerfMonitor + HasCorpus + HasRand,
{
type State = Z::State;
}

View File

@ -1,96 +0,0 @@
use libafl::prelude::UsesInput;
use libafl_qemu::CPUArchState;
use libafl_qemu::Emulator;
use libafl_qemu::FastSnapshot;
use libafl_qemu::QemuExecutor;
use libafl_qemu::QemuHelper;
use libafl_qemu::QemuHelperTuple;
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
use libafl_qemu::QemuHooks;
use libafl_qemu::{
emu,
};
// TODO be thread-safe maybe with https://amanieu.github.io/thread_local-rs/thread_local/index.html
#[derive(Debug)]
pub struct QemuStateRestoreHelper {
has_snapshot: bool,
use_snapshot: bool,
saved_cpu_states: Vec<CPUArchState>,
fastsnap: Option<FastSnapshot>
}
impl QemuStateRestoreHelper {
#[must_use]
pub fn new() -> Self {
Self {
has_snapshot: false,
use_snapshot: true,
saved_cpu_states: vec![],
fastsnap: None
}
}
}
impl Default for QemuStateRestoreHelper {
fn default() -> Self {
Self::new()
}
}
impl<S> QemuHelper<S> for QemuStateRestoreHelper
where
S: UsesInput,
{
const HOOKS_DO_SIDE_EFFECTS: bool = true;
fn init_hooks<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{
}
fn first_exec<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{
}
fn post_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
// unsafe { println!("snapshot post {}",emu::icount_get_raw()) };
}
fn pre_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
// only restore in pre-exec, to preserve the post-execution state for inspection
#[cfg(feature = "snapshot_restore")]
{
#[cfg(feature = "snapshot_fast")]
match self.fastsnap {
Some(s) => emulator.restore_fast_snapshot(s),
None => {self.fastsnap = Some(emulator.create_fast_snapshot(true));},
}
#[cfg(not(feature = "snapshot_fast"))]
if !self.has_snapshot {
emulator.save_snapshot("Start", true);
self.has_snapshot = true;
}
else
{
emulator.load_snapshot("Start", true);
}
}
#[cfg(not(feature = "snapshot_restore"))]
if !self.has_snapshot {
self.saved_cpu_states = (0..emulator.num_cpus())
.map(|i| emulator.cpu_from_index(i).save_state())
.collect();
self.has_snapshot = true;
} else {
for (i, s) in self.saved_cpu_states.iter().enumerate() {
emulator.cpu_from_index(i).restore_state(s);
}
}
// unsafe { println!("snapshot pre {}",emu::icount_get_raw()) };
}
}

View File

@ -1,299 +0,0 @@
use libafl::SerdeAny;
use libafl::bolts::ownedref::OwnedSlice;
use libafl::inputs::BytesInput;
use libafl::prelude::UsesInput;
use libafl::state::HasNamedMetadata;
use std::path::PathBuf;
use crate::clock::QemuClockObserver;
use libafl::corpus::Testcase;
use libafl::bolts::tuples::MatchName;
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use std::hash::Hash;
use libafl::events::EventFirer;
use libafl::state::HasClientPerfMonitor;
use libafl::feedbacks::Feedback;
use libafl::bolts::tuples::Named;
use libafl::Error;
use hashbrown::HashMap;
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
use serde::{Deserialize, Serialize};
use super::RefinedFreeRTOSSystemState;
use super::FreeRTOSSystemStateMetadata;
use super::observers::QemuSystemStateObserver;
use petgraph::prelude::DiGraph;
use petgraph::graph::NodeIndex;
use petgraph::Direction;
use std::cmp::Ordering;
//============================= Feedback
/// Shared Metadata for a systemstateFeedback
#[derive(Debug, Serialize, Deserialize, SerdeAny, Clone, Default)]
pub struct SystemStateFeedbackState
{
known_traces: HashMap<u64,(u64,u64,usize)>, // encounters,ticks,length
longest: Vec<RefinedFreeRTOSSystemState>,
}
impl Named for SystemStateFeedbackState
{
#[inline]
fn name(&self) -> &str {
"systemstate"
}
}
// impl FeedbackState for systemstateFeedbackState
// {
// fn reset(&mut self) -> Result<(), Error> {
// self.longest.clear();
// self.known_traces.clear();
// Ok(())
// }
// }
/// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct NovelSystemStateFeedback
{
last_trace: Option<Vec<RefinedFreeRTOSSystemState>>,
// known_traces: HashMap<u64,(u64,usize)>,
}
impl<S> Feedback<S> for NovelSystemStateFeedback
where
S: UsesInput + HasClientPerfMonitor + HasNamedMetadata,
{
fn is_interesting<EM, OT>(
&mut self,
state: &mut S,
manager: &mut EM,
input: &S::Input,
observers: &OT,
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>
{
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
.expect("QemuSystemStateObserver not found");
let clock_observer = observers.match_name::<QemuClockObserver>("clocktime") //TODO not fixed
.expect("QemuClockObserver not found");
let feedbackstate = match state
.named_metadata_mut()
.get_mut::<SystemStateFeedbackState>("systemstate") {
Some(s) => s,
None => {
let n=SystemStateFeedbackState::default();
state.named_metadata_mut().insert(n, "systemstate");
state.named_metadata_mut().get_mut::<SystemStateFeedbackState>("systemstate").unwrap()
}
};
// let feedbackstate = state
// .feedback_states_mut()
// .match_name_mut::<systemstateFeedbackState>("systemstate")
// .unwrap();
// Do Stuff
let mut hasher = DefaultHasher::new();
observer.last_run.hash(&mut hasher);
let somehash = hasher.finish();
let mut is_novel = false;
let mut takes_longer = false;
match feedbackstate.known_traces.get_mut(&somehash) {
None => {
is_novel = true;
feedbackstate.known_traces.insert(somehash,(1,clock_observer.last_runtime(),observer.last_run.len()));
}
Some(s) => {
s.0+=1;
if s.1 < clock_observer.last_runtime() {
s.1 = clock_observer.last_runtime();
takes_longer = true;
}
}
}
if observer.last_run.len() > feedbackstate.longest.len() {
feedbackstate.longest=observer.last_run.clone();
}
self.last_trace = Some(observer.last_run.clone());
// if (!is_novel) { println!("not novel") };
Ok(is_novel | takes_longer)
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
let a = self.last_trace.take();
match a {
Some(s) => testcase.metadata_mut().insert(FreeRTOSSystemStateMetadata::new(s)),
None => (),
}
Ok(())
}
/// Discard the stored metadata in case that the testcase is not added to the corpus
#[inline]
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
self.last_trace = None;
Ok(())
}
}
impl Named for NovelSystemStateFeedback
{
#[inline]
fn name(&self) -> &str {
"systemstate"
}
}
//=============================
pub fn match_traces(target: &Vec<RefinedFreeRTOSSystemState>, last: &Vec<RefinedFreeRTOSSystemState>) -> bool {
let mut ret = true;
if target.len() > last.len() {return false;}
for i in 0..target.len() {
ret &= target[i].current_task.task_name==last[i].current_task.task_name;
}
ret
}
pub fn match_traces_name(target: &Vec<String>, last: &Vec<RefinedFreeRTOSSystemState>) -> bool {
let mut ret = true;
if target.len() > last.len() {return false;}
for i in 0..target.len() {
ret &= target[i]==last[i].current_task.task_name;
}
ret
}
/// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct HitSystemStateFeedback
{
target: Option<Vec<String>>,
}
impl<S> Feedback<S> for HitSystemStateFeedback
where
S: UsesInput + HasClientPerfMonitor,
{
fn is_interesting<EM, OT>(
&mut self,
state: &mut S,
manager: &mut EM,
input: &S::Input,
observers: &OT,
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>
{
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
.expect("QemuSystemStateObserver not found");
// Do Stuff
match &self.target {
Some(s) => {
// #[cfg(debug_assertions)] eprintln!("Hit systemstate Feedback trigger");
Ok(match_traces_name(s, &observer.last_run))
},
None => Ok(false),
}
}
}
impl Named for HitSystemStateFeedback
{
#[inline]
fn name(&self) -> &str {
"hit_systemstate"
}
}
impl HitSystemStateFeedback {
pub fn new(target: Option<Vec<RefinedFreeRTOSSystemState>>) -> Self {
Self {target: target.map(|x| x.into_iter().map(|y| y.current_task.task_name).collect())}
}
}
//=========================== Debugging Feedback
/// A [`Feedback`] meant to dump the system-traces for debugging. Depends on [`QemuSystemStateObserver`]
#[derive(Debug)]
pub struct DumpSystraceFeedback
{
dumpfile: Option<PathBuf>,
dump_metadata: bool,
last_trace: Option<Vec<RefinedFreeRTOSSystemState>>,
}
impl<S> Feedback<S> for DumpSystraceFeedback
where
S: UsesInput + HasClientPerfMonitor,
{
fn is_interesting<EM, OT>(
&mut self,
state: &mut S,
manager: &mut EM,
input: &S::Input,
observers: &OT,
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>
{
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
.expect("QemuSystemStateObserver not found");
let names : Vec<String> = observer.last_run.iter().map(|x| x.current_task.task_name.clone()).collect();
match &self.dumpfile {
Some(s) => {
std::fs::write(s,ron::to_string(&observer.last_run).expect("Error serializing hashmap")).expect("Can not dump to file");
self.dumpfile = None
},
None => if !self.dump_metadata {println!("{:?}\n{:?}",observer.last_run,names);}
};
if self.dump_metadata {self.last_trace=Some(observer.last_run.clone());}
Ok(!self.dump_metadata)
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
if !self.dump_metadata {return Ok(());}
let a = self.last_trace.take();
match a {
Some(s) => testcase.metadata_mut().insert(FreeRTOSSystemStateMetadata::new(s)),
None => (),
}
Ok(())
}
/// Discard the stored metadata in case that the testcase is not added to the corpus
#[inline]
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
self.last_trace = None;
Ok(())
}
}
impl Named for DumpSystraceFeedback
{
#[inline]
fn name(&self) -> &str {
"Dumpsystemstate"
}
}
impl DumpSystraceFeedback
{
/// Creates a new [`DumpSystraceFeedback`]
#[must_use]
pub fn new() -> Self {
Self {dumpfile: None, dump_metadata: false, last_trace: None}
}
pub fn with_dump(dumpfile: Option<PathBuf>) -> Self {
Self {dumpfile: dumpfile, dump_metadata: false, last_trace: None}
}
pub fn metadata_only() -> Self {
Self {dumpfile: None, dump_metadata: true, last_trace: None}
}
}

View File

@ -1,122 +0,0 @@
#![allow(non_camel_case_types,non_snake_case,non_upper_case_globals,deref_nullptr)]
use serde::{Deserialize, Serialize};
// Manual Types
use libafl_qemu::Emulator;
/*========== Start of generated Code =============*/
pub type char_ptr = ::std::os::raw::c_uint;
pub type ListItem_t_ptr = ::std::os::raw::c_uint;
pub type StackType_t_ptr = ::std::os::raw::c_uint;
pub type void_ptr = ::std::os::raw::c_uint;
pub type tskTaskControlBlock_ptr = ::std::os::raw::c_uint;
pub type xLIST_ptr = ::std::os::raw::c_uint;
pub type xLIST_ITEM_ptr = ::std::os::raw::c_uint;
/* automatically generated by rust-bindgen 0.59.2 */
pub type __uint8_t = ::std::os::raw::c_uchar;
pub type __uint16_t = ::std::os::raw::c_ushort;
pub type __uint32_t = ::std::os::raw::c_uint;
pub type StackType_t = u32;
pub type UBaseType_t = ::std::os::raw::c_uint;
pub type TickType_t = u32;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct xLIST_ITEM {
pub xItemValue: TickType_t,
pub pxNext: xLIST_ITEM_ptr,
pub pxPrevious: xLIST_ITEM_ptr,
pub pvOwner: void_ptr,
pub pvContainer: xLIST_ptr,
}
pub type ListItem_t = xLIST_ITEM;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct xMINI_LIST_ITEM {
pub xItemValue: TickType_t,
pub pxNext: xLIST_ITEM_ptr,
pub pxPrevious: xLIST_ITEM_ptr,
}
pub type MiniListItem_t = xMINI_LIST_ITEM;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct xLIST {
pub uxNumberOfItems: UBaseType_t,
pub pxIndex: ListItem_t_ptr,
pub xListEnd: MiniListItem_t,
}
pub type List_t = xLIST;
pub type TaskHandle_t = tskTaskControlBlock_ptr;
pub const eTaskState_eRunning: eTaskState = 0;
pub const eTaskState_eReady: eTaskState = 1;
pub const eTaskState_eBlocked: eTaskState = 2;
pub const eTaskState_eSuspended: eTaskState = 3;
pub const eTaskState_eDeleted: eTaskState = 4;
pub const eTaskState_eInvalid: eTaskState = 5;
pub type eTaskState = ::std::os::raw::c_uint;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct xTASK_STATUS {
pub xHandle: TaskHandle_t,
pub pcTaskName: char_ptr,
pub xTaskNumber: UBaseType_t,
pub eCurrentState: eTaskState,
pub uxCurrentPriority: UBaseType_t,
pub uxBasePriority: UBaseType_t,
pub ulRunTimeCounter: u32,
pub pxStackBase: StackType_t_ptr,
pub usStackHighWaterMark: u16,
}
pub type TaskStatus_t = xTASK_STATUS;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct tskTaskControlBlock {
pub pxTopOfStack: StackType_t_ptr,
pub xStateListItem: ListItem_t,
pub xEventListItem: ListItem_t,
pub uxPriority: UBaseType_t,
pub pxStack: StackType_t_ptr,
pub pcTaskName: [::std::os::raw::c_char; 10usize],
pub uxBasePriority: UBaseType_t,
pub uxMutexesHeld: UBaseType_t,
pub ulNotifiedValue: [u32; 1usize],
pub ucNotifyState: [u8; 1usize],
pub ucStaticallyAllocated: u8,
pub ucDelayAborted: u8,
}
pub type tskTCB = tskTaskControlBlock;
pub type TCB_t = tskTCB;
/*========== End of generated Code =============*/
pub trait emu_lookup {
fn lookup(emu: &Emulator, addr: ::std::os::raw::c_uint) -> Self;
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum rtos_struct {
TCB_struct(TCB_t),
List_struct(List_t),
List_Item_struct(ListItem_t),
List_MiniItem_struct(MiniListItem_t),
}
#[macro_export]
macro_rules! impl_emu_lookup {
($struct_name:ident) => {
impl $crate::systemstate::freertos::emu_lookup for $struct_name {
fn lookup(emu: &Emulator, addr: ::std::os::raw::c_uint) -> $struct_name {
let mut tmp : [u8; std::mem::size_of::<$struct_name>()] = [0u8; std::mem::size_of::<$struct_name>()];
unsafe {
emu.read_mem(addr.into(), &mut tmp);
std::mem::transmute::<[u8; std::mem::size_of::<$struct_name>()], $struct_name>(tmp)
}
}
}
};
}
impl_emu_lookup!(TCB_t);
impl_emu_lookup!(List_t);
impl_emu_lookup!(ListItem_t);
impl_emu_lookup!(MiniListItem_t);
impl_emu_lookup!(void_ptr);
impl_emu_lookup!(TaskStatus_t);

View File

@ -1,604 +0,0 @@
use libafl::SerdeAny;
/// Feedbacks organizing SystemStates as a graph
use libafl::inputs::HasBytesVec;
use libafl::bolts::rands::RandomSeed;
use libafl::bolts::rands::StdRand;
use libafl::mutators::Mutator;
use libafl::mutators::MutationResult;
use libafl::prelude::HasTargetBytes;
use libafl::prelude::UsesInput;
use libafl::state::HasNamedMetadata;
use libafl::state::UsesState;
use core::marker::PhantomData;
use libafl::state::HasCorpus;
use libafl::state::HasSolutions;
use libafl::state::HasRand;
use crate::worst::MaxExecsLenFavFactor;
use libafl::schedulers::MinimizerScheduler;
use libafl::bolts::HasRefCnt;
use libafl::bolts::AsSlice;
use libafl::bolts::ownedref::OwnedSlice;
use libafl::inputs::BytesInput;
use std::path::PathBuf;
use crate::clock::QemuClockObserver;
use libafl::corpus::Testcase;
use libafl::bolts::tuples::MatchName;
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use std::hash::Hash;
use libafl::events::EventFirer;
use libafl::state::HasClientPerfMonitor;
use libafl::feedbacks::Feedback;
use libafl::bolts::tuples::Named;
use libafl::Error;
use hashbrown::HashMap;
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
use serde::{Deserialize, Serialize};
use super::RefinedFreeRTOSSystemState;
use super::FreeRTOSSystemStateMetadata;
use super::observers::QemuSystemStateObserver;
use petgraph::prelude::DiGraph;
use petgraph::graph::NodeIndex;
use petgraph::Direction;
use std::cmp::Ordering;
use libafl::bolts::rands::Rand;
//============================= Data Structures
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default)]
pub struct VariantTuple
{
pub start_tick: u64,
pub end_tick: u64,
input_counter: u32,
pub input: Vec<u8>, // in the end any kind of input are bytes, regardless of type and lifetime
}
impl VariantTuple {
fn from(other: &RefinedFreeRTOSSystemState,input: Vec<u8>) -> Self {
VariantTuple{
start_tick: other.start_tick,
end_tick: other.end_tick,
input_counter: other.input_counter,
input: input,
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct SysGraphNode
{
base: RefinedFreeRTOSSystemState,
pub variants: Vec<VariantTuple>,
}
impl SysGraphNode {
fn from(base: RefinedFreeRTOSSystemState, input: Vec<u8>) -> Self {
SysGraphNode{variants: vec![VariantTuple::from(&base, input)], base:base }
}
/// unites the variants of this value with another, draining the other if the bases are equal
fn unite(&mut self, other: &mut SysGraphNode) -> bool {
if self!=other {return false;}
self.variants.append(&mut other.variants);
self.variants.dedup();
return true;
}
/// add a Varint from a [`RefinedFreeRTOSSystemState`]
fn unite_raw(&mut self, other: &RefinedFreeRTOSSystemState, input: &Vec<u8>) -> bool {
if &self.base!=other {return false;}
self.variants.push(VariantTuple::from(other, input.clone()));
self.variants.dedup();
return true;
}
/// add a Varint from a [`RefinedFreeRTOSSystemState`], if it's interesting
fn unite_interesting(&mut self, other: &RefinedFreeRTOSSystemState, input: &Vec<u8>) -> bool {
if &self.base!=other {return false;}
let interesting =
self.variants.iter().all(|x| x.end_tick-x.start_tick<other.end_tick-other.start_tick) || // longest variant
self.variants.iter().all(|x| x.end_tick-x.start_tick>other.end_tick-other.start_tick) || // shortest variant
self.variants.iter().all(|x| x.input_counter>other.input_counter) || // longest input
self.variants.iter().all(|x| x.input_counter<other.input_counter); // shortest input
if interesting {
let var = VariantTuple::from(other, input.clone());
self.variants.push(var);
}
return interesting;
}
pub fn get_taskname(&self) -> &str {
&self.base.current_task.task_name
}
pub fn get_input_counts(&self) -> Vec<u32> {
self.variants.iter().map(|x| x.input_counter).collect()
}
}
impl PartialEq for SysGraphNode {
fn eq(&self, other: &SysGraphNode) -> bool {
self.base==other.base
}
}
// Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct SysGraphMetadata {
pub inner: Vec<NodeIndex>,
indices: Vec<usize>,
tcref: isize,
}
impl SysGraphMetadata {
pub fn new(inner: Vec<NodeIndex>) -> Self{
Self {indices: inner.iter().map(|x| x.index()).collect(), inner: inner, tcref: 0}
}
}
impl AsSlice for SysGraphMetadata {
/// Convert the slice of system-states to a slice of hashes over enumerated states
fn as_slice(&self) -> &[usize] {
self.indices.as_slice()
}
type Entry = usize;
}
impl HasRefCnt for SysGraphMetadata {
fn refcnt(&self) -> isize {
self.tcref
}
fn refcnt_mut(&mut self) -> &mut isize {
&mut self.tcref
}
}
libafl::impl_serdeany!(SysGraphMetadata);
pub type GraphMaximizerCorpusScheduler<CS> =
MinimizerScheduler<CS, MaxExecsLenFavFactor<<CS as UsesState>::State>,SysGraphMetadata>;
//============================= Graph Feedback
/// Improved System State Graph
#[derive(Serialize, Deserialize, Clone, Debug, Default, SerdeAny)]
pub struct SysGraphFeedbackState
{
pub graph: DiGraph<SysGraphNode, ()>,
entrypoint: NodeIndex,
exit: NodeIndex,
name: String,
}
impl SysGraphFeedbackState
{
pub fn new() -> Self {
let mut graph = DiGraph::<SysGraphNode, ()>::new();
let mut entry = SysGraphNode::default();
entry.base.current_task.task_name="Start".to_string();
let mut exit = SysGraphNode::default();
exit.base.current_task.task_name="End".to_string();
let entry = graph.add_node(entry);
let exit = graph.add_node(exit);
Self {graph: graph, entrypoint: entry, exit: exit, name: String::from("SysMap")}
}
fn insert(&mut self, list: Vec<RefinedFreeRTOSSystemState>, input: &Vec<u8>) {
let mut current_index = self.entrypoint;
for n in list {
let mut done = false;
for i in self.graph.neighbors_directed(current_index, Direction::Outgoing) {
if n == self.graph[i].base {
done = true;
current_index = i;
break;
}
}
if !done {
let j = self.graph.add_node(SysGraphNode::from(n,input.clone()));
self.graph.add_edge(current_index, j, ());
current_index = j;
}
}
}
/// Try adding a system state path from a [Vec<RefinedFreeRTOSSystemState>], return true if the path was interesting
fn update(&mut self, list: &Vec<RefinedFreeRTOSSystemState>, input: &Vec<u8>) -> (bool, Vec<NodeIndex>) {
let mut current_index = self.entrypoint;
let mut novel = false;
let mut trace : Vec<NodeIndex> = vec![current_index];
for n in list {
let mut matching : Option<NodeIndex> = None;
for i in self.graph.neighbors_directed(current_index, Direction::Outgoing) {
let tmp = &self.graph[i];
if n == &tmp.base {
matching = Some(i);
current_index = i;
break;
}
}
match matching {
None => {
novel = true;
let j = self.graph.add_node(SysGraphNode::from(n.clone(),input.clone()));
self.graph.add_edge(current_index, j, ());
current_index = j;
},
Some(i) => {
novel |= self.graph[i].unite_interesting(&n, input);
}
}
trace.push(current_index);
}
self.graph.update_edge(current_index, self.exit, ()); // every path ends in the exit noded
return (novel, trace);
}
}
impl Named for SysGraphFeedbackState
{
#[inline]
fn name(&self) -> &str {
&self.name
}
}
impl SysGraphFeedbackState
{
fn reset(&mut self) -> Result<(), Error> {
self.graph.clear();
let mut entry = SysGraphNode::default();
entry.base.current_task.task_name="Start".to_string();
let mut exit = SysGraphNode::default();
exit.base.current_task.task_name="End".to_string();
self.entrypoint = self.graph.add_node(entry);
self.exit = self.graph.add_node(exit);
Ok(())
}
}
/// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct SysMapFeedback
{
name: String,
last_trace: Option<Vec<NodeIndex>>,
}
impl SysMapFeedback {
pub fn new() -> Self {
Self {name: String::from("SysMapFeedback"), last_trace: None }
}
}
impl<S> Feedback<S> for SysMapFeedback
where
S: UsesInput + HasClientPerfMonitor + HasNamedMetadata,
S::Input: HasTargetBytes,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>(
&mut self,
state: &mut S,
_manager: &mut EM,
_input: &S::Input,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
{
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
.expect("QemuSystemStateObserver not found");
let feedbackstate = match state
.named_metadata_mut()
.get_mut::<SysGraphFeedbackState>("SysMap") {
Some(s) => s,
None => {
let n=SysGraphFeedbackState::default();
state.named_metadata_mut().insert(n, "SysMap");
state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap").unwrap()
}
};
let ret = feedbackstate.update(&observer.last_run, &observer.last_input);
self.last_trace = Some(ret.1);
Ok(ret.0)
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
let a = self.last_trace.take();
match a {
Some(s) => testcase.metadata_mut().insert(SysGraphMetadata::new(s)),
None => (),
}
Ok(())
}
/// Discard the stored metadata in case that the testcase is not added to the corpus
#[inline]
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
self.last_trace = None;
Ok(())
}
}
impl Named for SysMapFeedback
{
#[inline]
fn name(&self) -> &str {
&self.name
}
}
//============================= Mutators
//=============================== Snippets
// pub struct RandGraphSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// phantom: PhantomData<(I, S)>,
// }
// impl<I, S> RandGraphSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// pub fn new() -> Self {
// RandGraphSnippetMutator{phantom: PhantomData}
// }
// }
// impl<I, S> Mutator<I, S> for RandGraphSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn mutate(
// &mut self,
// state: &mut S,
// input: &mut I,
// _stage_idx: i32
// ) -> Result<MutationResult, Error>
// {
// // need our own random generator, because borrowing rules
// let mut myrand = StdRand::new();
// let tmp = &mut state.rand_mut();
// myrand.set_seed(tmp.next());
// drop(tmp);
// let feedbackstate = state
// .feedback_states()
// .match_name::<SysGraphFeedbackState>("SysMap")
// .unwrap();
// let g = &feedbackstate.graph;
// let tmp = state.metadata().get::<SysGraphMetadata>();
// if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
// return Ok(MutationResult::Skipped);
// }
// let trace =tmp.expect("SysGraphMetadata not found");
// // follow the path, extract snippets from last reads, find common snippets.
// // those are likley keys parts. choose random parts from other sibling traces
// let sibling_inputs : Vec<&Vec<u8>>= g[*trace.inner.last().unwrap()].variants.iter().map(|x| &x.input).collect();
// let mut snippet_collector = vec![];
// let mut per_input_counters = HashMap::<&Vec<u8>,usize>::new(); // ugly workaround to track multiple inputs
// for t in &trace.inner {
// let node = &g[*t];
// let mut per_node_snippets = HashMap::<&Vec<u8>,&[u8]>::new();
// for v in &node.variants {
// match per_input_counters.get_mut(&v.input) {
// None => {
// if sibling_inputs.iter().any(|x| *x==&v.input) { // only collect info about siblin inputs from target
// per_input_counters.insert(&v.input, v.input_counter.try_into().unwrap());
// }
// },
// Some(x) => {
// let x_u = *x;
// if x_u<v.input_counter as usize {
// *x=v.input_counter as usize;
// per_node_snippets.insert(&v.input,&v.input[x_u..v.input_counter as usize]);
// }
// }
// }
// }
// snippet_collector.push(per_node_snippets);
// }
// let mut new_input : Vec<u8> = vec![];
// for c in snippet_collector {
// new_input.extend_from_slice(myrand.choose(c).1);
// }
// for i in new_input.iter().enumerate() {
// input.bytes_mut()[i.0]=*i.1;
// }
// Ok(MutationResult::Mutated)
// }
// fn post_exec(
// &mut self,
// _state: &mut S,
// _stage_idx: i32,
// _corpus_idx: Option<usize>
// ) -> Result<(), Error> {
// Ok(())
// }
// }
// impl<I, S> Named for RandGraphSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn name(&self) -> &str {
// "RandGraphSnippetMutator"
// }
// }
// //=============================== Snippets
// pub struct RandInputSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// phantom: PhantomData<(I, S)>,
// }
// impl<I, S> RandInputSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// pub fn new() -> Self {
// RandInputSnippetMutator{phantom: PhantomData}
// }
// }
// impl<I, S> Mutator<I, S> for RandInputSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn mutate(
// &mut self,
// state: &mut S,
// input: &mut I,
// _stage_idx: i32
// ) -> Result<MutationResult, Error>
// {
// // need our own random generator, because borrowing rules
// let mut myrand = StdRand::new();
// let tmp = &mut state.rand_mut();
// myrand.set_seed(tmp.next());
// drop(tmp);
// let feedbackstate = state
// .feedback_states()
// .match_name::<SysGraphFeedbackState>("SysMap")
// .unwrap();
// let g = &feedbackstate.graph;
// let tmp = state.metadata().get::<SysGraphMetadata>();
// if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
// return Ok(MutationResult::Skipped);
// }
// let trace = tmp.expect("SysGraphMetadata not found");
// let mut collection : Vec<Vec<u8>> = Vec::new();
// let mut current_pointer : usize = 0;
// for t in &trace.inner {
// let node = &g[*t];
// for v in &node.variants {
// if v.input == input.bytes() {
// if v.input_counter > current_pointer.try_into().unwrap() {
// collection.push(v.input[current_pointer..v.input_counter as usize].to_owned());
// current_pointer = v.input_counter as usize;
// }
// break;
// }
// }
// }
// let index_to_mutate = myrand.below(collection.len() as u64) as usize;
// for i in 0..collection[index_to_mutate].len() {
// collection[index_to_mutate][i] = myrand.below(0xFF) as u8;
// }
// for i in collection.concat().iter().enumerate() {
// input.bytes_mut()[i.0]=*i.1;
// }
// Ok(MutationResult::Mutated)
// }
// fn post_exec(
// &mut self,
// _state: &mut S,
// _stage_idx: i32,
// _corpus_idx: Option<usize>
// ) -> Result<(), Error> {
// Ok(())
// }
// }
// impl<I, S> Named for RandInputSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn name(&self) -> &str {
// "RandInputSnippetMutator"
// }
// }
// //=============================== Suffix
// pub struct RandGraphSuffixMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// phantom: PhantomData<(I, S)>,
// }
// impl<I, S> RandGraphSuffixMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// pub fn new() -> Self {
// RandGraphSuffixMutator{phantom: PhantomData}
// }
// }
// impl<I, S> Mutator<I, S> for RandGraphSuffixMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn mutate(
// &mut self,
// state: &mut S,
// input: &mut I,
// _stage_idx: i32
// ) -> Result<MutationResult, Error>
// {
// // need our own random generator, because borrowing rules
// let mut myrand = StdRand::new();
// let tmp = &mut state.rand_mut();
// myrand.set_seed(tmp.next());
// drop(tmp);
// let feedbackstate = state
// .feedback_states()
// .match_name::<SysGraphFeedbackState>("SysMap")
// .unwrap();
// let g = &feedbackstate.graph;
// let tmp = state.metadata().get::<SysGraphMetadata>();
// if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
// return Ok(MutationResult::Skipped);
// }
// let trace =tmp.expect("SysGraphMetadata not found");
// // follow the path, extract snippets from last reads, find common snippets.
// // those are likley keys parts. choose random parts from other sibling traces
// let inp_c_end = g[*trace.inner.last().unwrap()].base.input_counter;
// let mut num_to_reverse = myrand.below(trace.inner.len().try_into().unwrap());
// for t in trace.inner.iter().rev() {
// let int_c_prefix = g[*t].base.input_counter;
// if int_c_prefix < inp_c_end {
// num_to_reverse-=1;
// if num_to_reverse<=0 {
// let mut new_input=input.bytes()[..(int_c_prefix as usize)].to_vec();
// let mut ext : Vec<u8> = (int_c_prefix..inp_c_end).map(|_| myrand.next().to_le_bytes()).flatten().collect();
// new_input.append(&mut ext);
// for i in new_input.iter().enumerate() {
// if input.bytes_mut().len()>i.0 {
// input.bytes_mut()[i.0]=*i.1;
// }
// else { break };
// }
// break;
// }
// }
// }
// Ok(MutationResult::Mutated)
// }
// fn post_exec(
// &mut self,
// _state: &mut S,
// _stage_idx: i32,
// _corpus_idx: Option<usize>
// ) -> Result<(), Error> {
// Ok(())
// }
// }
// impl<I, S> Named for RandGraphSuffixMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn name(&self) -> &str {
// "RandGraphSuffixMutator"
// }
// }

View File

@ -1,209 +0,0 @@
use std::cell::UnsafeCell;
use std::io::Write;
use std::ops::Range;
use libafl::prelude::UsesInput;
use libafl_qemu::Emulator;
use libafl_qemu::GuestAddr;
use libafl_qemu::QemuHooks;
use libafl_qemu::edges::QemuEdgesMapMetadata;
use libafl_qemu::emu;
use libafl_qemu::hooks;
use crate::systemstate::RawFreeRTOSSystemState;
use crate::systemstate::CURRENT_SYSTEMSTATE_VEC;
use crate::systemstate::NUM_PRIOS;
use super::freertos::TCB_t;
use super::freertos::rtos_struct::List_Item_struct;
use super::freertos::rtos_struct::*;
use super::freertos;
use libafl_qemu::{
helper::{QemuHelper, QemuHelperTuple},
// edges::SAVED_JUMP,
};
//============================= Struct definitions
pub static mut INTR_OFFSET : Option<u64> = None;
pub static mut INTR_DONE : bool = true;
// only used when inputs are injected
pub static mut NEXT_INPUT : Vec<u8> = Vec::new();
//============================= Qemu Helper
/// A Qemu Helper with reads FreeRTOS specific structs from Qemu whenever certain syscalls occur, also inject inputs
#[derive(Debug)]
pub struct QemuSystemStateHelper {
kerneladdr: u32,
tcb_addr: u32,
ready_queues: u32,
input_counter: Option<u64>,
app_range: Range<u32>,
}
impl QemuSystemStateHelper {
#[must_use]
pub fn new(
kerneladdr: u32,
tcb_addr: u32,
ready_queues: u32,
input_counter: Option<u64>,
app_range: Range<u32>,
) -> Self {
QemuSystemStateHelper {
kerneladdr,
tcb_addr: tcb_addr,
ready_queues: ready_queues,
input_counter: input_counter,
app_range,
}
}
}
impl<S> QemuHelper<S> for QemuSystemStateHelper
where
S: UsesInput,
{
fn first_exec<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{
_hooks.instruction(self.kerneladdr, exec_syscall_hook::<QT, S>, false);
#[cfg(feature = "trace_abbs")]
_hooks.jmps(Some(gen_jmp_is_syscall::<QT, S>), Some(trace_api_call::<QT, S>));
}
// TODO: refactor duplicate code
fn pre_exec(&mut self, _emulator: &Emulator, _input: &S::Input) {
unsafe {
CURRENT_SYSTEMSTATE_VEC.clear();
let p = LAST_API_CALL.with(|x| x.get());
*p = None;
}
}
fn post_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
trigger_collection(emulator, self)
}
}
#[inline]
fn trigger_collection(emulator: &Emulator, h: &QemuSystemStateHelper) {
let listbytes : u32 = u32::try_from(std::mem::size_of::<freertos::List_t>()).unwrap();
let mut systemstate = RawFreeRTOSSystemState::default();
unsafe {
// TODO: investigate why can_do_io is not set sometimes, as this is just a workaround
let c = emulator.cpu_from_index(0);
let can_do_io = (*c.raw_ptr()).can_do_io;
(*c.raw_ptr()).can_do_io = 1;
systemstate.qemu_tick = emu::icount_get_raw();
(*c.raw_ptr()).can_do_io = can_do_io;
}
let mut buf : [u8; 4] = [0,0,0,0];
match h.input_counter {
Some(s) => unsafe { emulator.read_phys_mem(s, &mut buf); },
None => (),
};
systemstate.input_counter = u32::from_le_bytes(buf);
let curr_tcb_addr : freertos::void_ptr = freertos::emu_lookup::lookup(emulator, h.tcb_addr);
if curr_tcb_addr == 0 {
return;
};
systemstate.current_tcb = freertos::emu_lookup::lookup(emulator,curr_tcb_addr);
unsafe {
LAST_API_CALL.with(|x|
match *x.get() {
Some(s) => {
systemstate.last_pc = Some(s.0 as u64);
},
None => (),
}
);
}
// println!("{:?}",std::str::from_utf8(&current_tcb.pcTaskName));
for i in 0..NUM_PRIOS {
let target : u32 = listbytes*u32::try_from(i).unwrap()+h.ready_queues;
systemstate.prio_ready_lists[i] = freertos::emu_lookup::lookup(emulator, target);
// println!("List at {}: {:?}",target, systemstate.prio_ready_lists[i]);
let mut next_index = systemstate.prio_ready_lists[i].pxIndex;
for _j in 0..systemstate.prio_ready_lists[i].uxNumberOfItems {
// always jump over the xListEnd marker
if (target..target+listbytes).contains(&next_index) {
let next_item : freertos::MiniListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
let new_next_index=next_item.pxNext;
systemstate.dumping_ground.insert(next_index,List_MiniItem_struct(next_item));
next_index = new_next_index;
}
let next_item : freertos::ListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
// println!("Item at {}: {:?}",next_index,next_item);
assert_eq!(next_item.pvContainer,target);
let new_next_index=next_item.pxNext;
let next_tcb : TCB_t= freertos::emu_lookup::lookup(emulator,next_item.pvOwner);
// println!("TCB at {}: {:?}",next_item.pvOwner,next_tcb);
systemstate.dumping_ground.insert(next_item.pvOwner,TCB_struct(next_tcb.clone()));
systemstate.dumping_ground.insert(next_index,List_Item_struct(next_item));
next_index=new_next_index;
}
// Handle edge case where the end marker was not included yet
if (target..target+listbytes).contains(&next_index) {
let next_item : freertos::MiniListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
systemstate.dumping_ground.insert(next_index,List_MiniItem_struct(next_item));
}
}
unsafe { CURRENT_SYSTEMSTATE_VEC.push(systemstate); }
}
pub fn exec_syscall_hook<QT, S>(
hooks: &mut QemuHooks<'_, QT, S>,
_state: Option<&mut S>,
_pc: u32,
)
where
S: UsesInput,
QT: QemuHelperTuple<S>,
{
let emulator = hooks.emulator();
let h = hooks.helpers().match_first_type::<QemuSystemStateHelper>().expect("QemuSystemHelper not found in helper tupel");
trigger_collection(emulator, h);
}
thread_local!(static LAST_API_CALL : UnsafeCell<Option<(GuestAddr,GuestAddr)>> = UnsafeCell::new(None));
pub fn gen_jmp_is_syscall<QT, S>(
hooks: &mut QemuHooks<'_, QT, S>,
_state: Option<&mut S>,
src: GuestAddr,
dest: GuestAddr,
) -> Option<u64>
where
S: UsesInput,
QT: QemuHelperTuple<S>,
{
if let Some(h) = hooks.helpers().match_first_type::<QemuSystemStateHelper>() {
if h.app_range.contains(&src) && !h.app_range.contains(&dest) {
// println!("New jmp {:x} {:x}", src, dest);
return Some(1);
}
}
return None;
}
pub fn trace_api_call<QT, S>(
_hooks: &mut QemuHooks<'_, QT, S>,
_state: Option<&mut S>,
src: GuestAddr, dest: GuestAddr, id: u64
)
where
S: UsesInput,
QT: QemuHelperTuple<S>,
{
unsafe {
let p = LAST_API_CALL.with(|x| x.get());
*p = Some((src,dest));
// print!("*");
}
}

View File

@ -1,167 +0,0 @@
//! systemstate referes to the State of a FreeRTOS fuzzing target
use std::collections::hash_map::DefaultHasher;
use libafl::bolts::HasRefCnt;
use libafl::bolts::AsSlice;
use std::hash::Hasher;
use std::hash::Hash;
use hashbrown::HashMap;
use serde::{Deserialize, Serialize};
use freertos::TCB_t;
pub mod freertos;
pub mod helpers;
pub mod observers;
pub mod feedbacks;
pub mod graph;
pub mod schedulers;
// #[cfg(feature = "fuzz_interrupt")]
// pub const IRQ_INPUT_BYTES_NUMBER : u32 = 2; // Offset for interrupt bytes
// #[cfg(not(feature = "fuzz_interrupt"))]
// pub const IRQ_INPUT_BYTES_NUMBER : u32 = 0; // Offset for interrupt bytes
// pub const IRQ_INPUT_OFFSET : u32 = 347780; // Tick offset for app code start
// Constants
const NUM_PRIOS: usize = 5;
//============================= Struct definitions
/// Raw info Dump from Qemu
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct RawFreeRTOSSystemState {
qemu_tick: u64,
current_tcb: TCB_t,
prio_ready_lists: [freertos::List_t; NUM_PRIOS],
dumping_ground: HashMap<u32,freertos::rtos_struct>,
input_counter: u32,
last_pc: Option<u64>,
}
/// List of system state dumps from QemuHelpers
static mut CURRENT_SYSTEMSTATE_VEC: Vec<RawFreeRTOSSystemState> = vec![];
/// A reduced version of freertos::TCB_t
#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)]
pub struct RefinedTCB {
pub task_name: String,
pub priority: u32,
pub base_priority: u32,
mutexes_held: u32,
notify_value: u32,
notify_state: u8,
}
impl Hash for RefinedTCB {
fn hash<H: Hasher>(&self, state: &mut H) {
self.task_name.hash(state);
self.priority.hash(state);
self.mutexes_held.hash(state);
#[cfg(not(feature = "no_hash_state"))]
self.notify_state.hash(state);
// self.notify_value.hash(state);
}
}
impl RefinedTCB {
pub fn from_tcb(input: &TCB_t) -> Self {
unsafe {
let tmp = std::mem::transmute::<[i8; 10],[u8; 10]>(input.pcTaskName);
let name : String = std::str::from_utf8(&tmp).expect("TCB name was not utf8").chars().filter(|x| *x != '\0').collect::<String>();
Self {
task_name: name,
priority: input.uxPriority,
base_priority: input.uxBasePriority,
mutexes_held: input.uxMutexesHeld,
notify_value: input.ulNotifiedValue[0],
notify_state: input.ucNotifyState[0],
}
}
}
pub fn from_tcb_owned(input: TCB_t) -> Self {
unsafe {
let tmp = std::mem::transmute::<[i8; 10],[u8; 10]>(input.pcTaskName);
let name : String = std::str::from_utf8(&tmp).expect("TCB name was not utf8").chars().filter(|x| *x != '\0').collect::<String>();
Self {
task_name: name,
priority: input.uxPriority,
base_priority: input.uxBasePriority,
mutexes_held: input.uxMutexesHeld,
notify_value: input.ulNotifiedValue[0],
notify_state: input.ucNotifyState[0],
}
}
}
}
/// Refined information about the states an execution transitioned between
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct RefinedFreeRTOSSystemState {
pub start_tick: u64,
pub end_tick: u64,
last_pc: Option<u64>,
input_counter: u32,
pub current_task: RefinedTCB,
ready_list_after: Vec<RefinedTCB>,
}
impl PartialEq for RefinedFreeRTOSSystemState {
fn eq(&self, other: &Self) -> bool {
self.current_task == other.current_task && self.ready_list_after == other.ready_list_after &&
self.last_pc == other.last_pc
}
}
impl Hash for RefinedFreeRTOSSystemState {
fn hash<H: Hasher>(&self, state: &mut H) {
self.current_task.hash(state);
self.ready_list_after.hash(state);
// self.last_pc.hash(state);
}
}
impl RefinedFreeRTOSSystemState {
fn get_time(&self) -> u64 {
self.end_tick-self.start_tick
}
}
// Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct FreeRTOSSystemStateMetadata {
pub inner: Vec<RefinedFreeRTOSSystemState>,
trace_length: usize,
indices: Vec<usize>, // Hashed enumeration of States
tcref: isize,
}
impl FreeRTOSSystemStateMetadata {
pub fn new(inner: Vec<RefinedFreeRTOSSystemState>) -> Self{
let tmp = inner.iter().enumerate().map(|x| compute_hash(x) as usize).collect();
Self {trace_length: inner.len(), inner: inner, indices: tmp, tcref: 0}
}
}
pub fn compute_hash<T>(obj: T) -> u64
where
T: Hash
{
let mut s = DefaultHasher::new();
obj.hash(&mut s);
s.finish()
}
impl AsSlice for FreeRTOSSystemStateMetadata {
/// Convert the slice of system-states to a slice of hashes over enumerated states
fn as_slice(&self) -> &[usize] {
self.indices.as_slice()
}
type Entry = usize;
}
impl HasRefCnt for FreeRTOSSystemStateMetadata {
fn refcnt(&self) -> isize {
self.tcref
}
fn refcnt_mut(&mut self) -> &mut isize {
&mut self.tcref
}
}
libafl::impl_serdeany!(FreeRTOSSystemStateMetadata);

View File

@ -1,133 +0,0 @@
// use crate::systemstate::IRQ_INPUT_BYTES_NUMBER;
use libafl::prelude::{ExitKind, AsSlice};
use libafl::{inputs::HasTargetBytes, prelude::UsesInput};
use libafl::bolts::HasLen;
use libafl::bolts::tuples::Named;
use libafl::Error;
use libafl::observers::Observer;
use hashbrown::HashMap;
use serde::{Deserialize, Serialize};
use super::{
CURRENT_SYSTEMSTATE_VEC,
RawFreeRTOSSystemState,
RefinedTCB,
RefinedFreeRTOSSystemState,
freertos::{List_t, TCB_t, rtos_struct, rtos_struct::*},
};
//============================= Observer
/// The Qemusystemstate Observer retrieves the systemstate
/// that will get updated by the target.
#[derive(Serialize, Deserialize, Debug, Default)]
#[allow(clippy::unsafe_derive_deserialize)]
pub struct QemuSystemStateObserver
{
pub last_run: Vec<RefinedFreeRTOSSystemState>,
pub last_input: Vec<u8>,
name: String,
}
impl<S> Observer<S> for QemuSystemStateObserver
where
S: UsesInput,
S::Input : HasTargetBytes,
{
#[inline]
fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
unsafe {CURRENT_SYSTEMSTATE_VEC.clear(); }
Ok(())
}
#[inline]
fn post_exec(&mut self, _state: &mut S, _input: &S::Input, _exit_kind: &ExitKind) -> Result<(), Error> {
unsafe {self.last_run = refine_system_states(&mut CURRENT_SYSTEMSTATE_VEC);}
self.last_input=_input.target_bytes().as_slice().to_owned();
Ok(())
}
}
impl Named for QemuSystemStateObserver
{
#[inline]
fn name(&self) -> &str {
self.name.as_str()
}
}
impl HasLen for QemuSystemStateObserver
{
#[inline]
fn len(&self) -> usize {
self.last_run.len()
}
}
impl QemuSystemStateObserver {
pub fn new() -> Self {
Self{last_run: vec![], last_input: vec![], name: "systemstate".to_string()}
}
}
//============================= Parsing helpers
/// Parse a List_t containing TCB_t into Vec<TCB_t> from cache. Consumes the elements from cache
fn tcb_list_to_vec_cached(list: List_t, dump: &mut HashMap<u32,rtos_struct>) -> Vec<TCB_t>
{
let mut ret : Vec<TCB_t> = Vec::new();
if list.uxNumberOfItems == 0 {return ret;}
let last_list_item = match dump.remove(&list.pxIndex).expect("List_t entry was not in Hashmap") {
List_Item_struct(li) => li,
List_MiniItem_struct(mli) => match dump.remove(&mli.pxNext).expect("MiniListItem pointer invaild") {
List_Item_struct(li) => li,
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
},
_ => panic!("List_t entry was not a ListItem"),
};
let mut next_index = last_list_item.pxNext;
let last_tcb = match dump.remove(&last_list_item.pvOwner).expect("ListItem Owner not in Hashmap") {
TCB_struct(t) => t,
_ => panic!("List content does not equal type"),
};
for _ in 0..list.uxNumberOfItems-1 {
let next_list_item = match dump.remove(&next_index).expect("List_t entry was not in Hashmap") {
List_Item_struct(li) => li,
List_MiniItem_struct(mli) => match dump.remove(&mli.pxNext).expect("MiniListItem pointer invaild") {
List_Item_struct(li) => li,
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
},
_ => panic!("List_t entry was not a ListItem"),
};
match dump.remove(&next_list_item.pvOwner).expect("ListItem Owner not in Hashmap") {
TCB_struct(t) => {ret.push(t)},
_ => panic!("List content does not equal type"),
}
next_index=next_list_item.pxNext;
}
ret.push(last_tcb);
ret
}
/// Drains a List of raw SystemStates to produce a refined trace
fn refine_system_states(input: &mut Vec<RawFreeRTOSSystemState>) -> Vec<RefinedFreeRTOSSystemState> {
let mut ret = Vec::<RefinedFreeRTOSSystemState>::new();
let mut start_tick : u64 = 0;
for mut i in input.drain(..) {
let mut collector = Vec::<RefinedTCB>::new();
for j in i.prio_ready_lists.into_iter().rev() {
let mut tmp = tcb_list_to_vec_cached(j,&mut i.dumping_ground).iter().map(|x| RefinedTCB::from_tcb(x)).collect();
collector.append(&mut tmp);
}
ret.push(RefinedFreeRTOSSystemState {
current_task: RefinedTCB::from_tcb_owned(i.current_tcb),
start_tick: start_tick,
end_tick: i.qemu_tick,
ready_list_after: collector,
input_counter: i.input_counter,//+IRQ_INPUT_BYTES_NUMBER,
last_pc: i.last_pc,
});
start_tick=i.qemu_tick;
}
return ret;
}

View File

@ -1,267 +0,0 @@
//! The Minimizer schedulers are a family of corpus schedulers that feed the fuzzer
//! with testcases only from a subset of the total corpus.
use core::{marker::PhantomData};
use std::{cmp::{max, min}, mem::swap, borrow::BorrowMut};
use serde::{Deserialize, Serialize};
use libafl::{
bolts::{rands::Rand, serdeany::SerdeAny, AsSlice, HasRefCnt},
corpus::{Corpus, Testcase},
inputs::UsesInput,
schedulers::{Scheduler, TestcaseScore, minimizer::DEFAULT_SKIP_NON_FAVORED_PROB },
state::{HasCorpus, HasMetadata, HasRand, UsesState, State},
Error, SerdeAny, prelude::HasLen,
};
use crate::worst::MaxTimeFavFactor;
use super::FreeRTOSSystemStateMetadata;
/// A state metadata holding a map of favoreds testcases for each map entry
#[derive(Debug, Serialize, Deserialize, SerdeAny, Default)]
pub struct LongestTracesMetadata {
/// map index -> corpus index
pub max_trace_length: usize,
}
impl LongestTracesMetadata {
fn new(l : usize) -> Self {
Self {max_trace_length: l}
}
}
/// The [`MinimizerScheduler`] employs a genetic algorithm to compute a subset of the
/// corpus that exercise all the requested features (e.g. all the coverage seen so far)
/// prioritizing [`Testcase`]`s` using [`TestcaseScore`]
#[derive(Debug, Clone)]
pub struct LongestTraceScheduler<CS> {
base: CS,
skip_non_favored_prob: u64,
}
impl<CS> UsesState for LongestTraceScheduler<CS>
where
CS: UsesState,
{
type State = CS::State;
}
impl<CS> Scheduler for LongestTraceScheduler<CS>
where
CS: Scheduler,
CS::State: HasCorpus + HasMetadata + HasRand,
{
/// Add an entry to the corpus and return its index
fn on_add(&self, state: &mut CS::State, idx: usize) -> Result<(), Error> {
let l = state.corpus()
.get(idx)?
.borrow()
.metadata()
.get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
self.get_update_trace_length(state,l);
self.base.on_add(state, idx)
}
/// Replaces the testcase at the given idx
fn on_replace(
&self,
state: &mut CS::State,
idx: usize,
testcase: &Testcase<<CS::State as UsesInput>::Input>,
) -> Result<(), Error> {
let l = state.corpus()
.get(idx)?
.borrow()
.metadata()
.get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
self.get_update_trace_length(state, l);
self.base.on_replace(state, idx, testcase)
}
/// Removes an entry from the corpus, returning M if M was present.
fn on_remove(
&self,
state: &mut CS::State,
idx: usize,
testcase: &Option<Testcase<<CS::State as UsesInput>::Input>>,
) -> Result<(), Error> {
self.base.on_remove(state, idx, testcase)?;
Ok(())
}
/// Gets the next entry
fn next(&self, state: &mut CS::State) -> Result<usize, Error> {
let mut idx = self.base.next(state)?;
while {
let l = state.corpus()
.get(idx)?
.borrow()
.metadata()
.get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
let m = self.get_update_trace_length(state,l);
state.rand_mut().below(m) > l as u64
} && state.rand_mut().below(100) < self.skip_non_favored_prob
{
idx = self.base.next(state)?;
}
Ok(idx)
}
}
impl<CS> LongestTraceScheduler<CS>
where
CS: Scheduler,
CS::State: HasCorpus + HasMetadata + HasRand,
{
pub fn get_update_trace_length(&self, state: &mut CS::State, par: usize) -> u64 {
// Create a new top rated meta if not existing
if let Some(td) = state.metadata_mut().get_mut::<LongestTracesMetadata>() {
let m = max(td.max_trace_length, par);
td.max_trace_length = m;
m as u64
} else {
state.add_metadata(LongestTracesMetadata::new(par));
par as u64
}
}
pub fn new(base: CS) -> Self {
Self {
base,
skip_non_favored_prob: DEFAULT_SKIP_NON_FAVORED_PROB,
}
}
}
//==========================================================================================
/// A state metadata holding a map of favoreds testcases for each map entry
#[derive(Debug, Serialize, Deserialize, SerdeAny, Default)]
pub struct GeneticMetadata {
pub current_gen: Vec<(usize, f64)>,
pub current_cursor: usize,
pub next_gen: Vec<(usize, f64)>,
pub gen: usize
}
impl GeneticMetadata {
fn new(current_gen: Vec<(usize, f64)>, next_gen: Vec<(usize, f64)>) -> Self {
Self {current_gen, current_cursor: 0, next_gen, gen: 0}
}
}
#[derive(Debug, Clone)]
pub struct GenerationScheduler<S> {
phantom: PhantomData<S>,
gen_size: usize,
}
impl<S> UsesState for GenerationScheduler<S>
where
S: UsesInput,
{
type State = S;
}
impl<S> Scheduler for GenerationScheduler<S>
where
S: HasCorpus + HasMetadata,
S::Input: HasLen,
{
/// get first element in current gen,
/// if current_gen is empty, swap lists, sort by FavFactor, take top k and return first
fn next(&self, state: &mut Self::State) -> Result<usize, Error> {
let mut to_remove : Vec<(usize, f64)> = vec![];
let mut to_return : usize = 0;
let c = state.corpus().count();
let gm = state.metadata_mut().get_mut::<GeneticMetadata>().expect("Corpus Scheduler empty");
// println!("index: {} curr: {:?} next: {:?} gen: {} corp: {}", gm.current_cursor, gm.current_gen.len(), gm.next_gen.len(), gm.gen,
// c);
match gm.current_gen.get(gm.current_cursor) {
Some(c) => {
gm.current_cursor+=1;
// println!("normal next: {}", (*c).0);
return Ok((*c).0)
},
None => {
swap(&mut to_remove, &mut gm.current_gen);
swap(&mut gm.next_gen, &mut gm.current_gen);
gm.current_gen.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
// gm.current_gen.reverse();
if gm.current_gen.len() == 0 {panic!("Corpus is empty");}
let d : Vec<(usize, f64)> = gm.current_gen.drain(min(gm.current_gen.len(), self.gen_size)..).collect();
to_remove.extend(d);
// move all indices to the left, since all other indices will be deleted
gm.current_gen.sort_by(|a,b| a.0.cmp(&(*b).0)); // in order of the corpus index
for i in 0..gm.current_gen.len() {
gm.current_gen[i] = (i, gm.current_gen[i].1);
}
to_return = gm.current_gen.get(0).unwrap().0;
gm.current_cursor=1;
gm.gen+=1;
}
};
// removing these elements will move all indices left by to_remove.len()
to_remove.sort_by(|x,y| x.0.cmp(&(*y).0));
to_remove.reverse();
for i in to_remove {
state.corpus_mut().remove(i.0).unwrap();
}
// println!("switch next: {to_return}");
return Ok(to_return);
}
/// Add the new input to the next generation
fn on_add(
&self,
state: &mut Self::State,
idx: usize
) -> Result<(), Error> {
// println!("On Add {idx}");
let mut tc = state.corpus_mut().get(idx).unwrap().borrow_mut().clone();
let ff = MaxTimeFavFactor::compute(&mut tc, state).unwrap();
if let Some(gm) = state.metadata_mut().get_mut::<GeneticMetadata>() {
gm.next_gen.push((idx,ff));
} else {
state.add_metadata(GeneticMetadata::new(vec![], vec![(idx,ff)]));
}
Ok(())
}
fn on_replace(
&self,
_state: &mut Self::State,
_idx: usize,
_prev: &Testcase<<Self::State as UsesInput>::Input>
) -> Result<(), Error> {
// println!("On Replace {_idx}");
Ok(())
}
fn on_remove(
&self,
state: &mut Self::State,
idx: usize,
_testcase: &Option<Testcase<<Self::State as UsesInput>::Input>>
) -> Result<(), Error> {
// println!("On Remove {idx}");
if let Some(gm) = state.metadata_mut().get_mut::<GeneticMetadata>() {
gm.next_gen = gm.next_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
gm.current_gen = gm.current_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
} else {
state.add_metadata(GeneticMetadata::new(vec![], vec![]));
}
Ok(())
}
}
impl<S> GenerationScheduler<S>
{
pub fn new() -> Self {
Self {
phantom: PhantomData,
gen_size: 100,
}
}
}

View File

@ -1,381 +0,0 @@
use core::fmt::Debug;
use core::cmp::Ordering::{Greater,Less,Equal};
use libafl::inputs::BytesInput;
use libafl::inputs::HasTargetBytes;
use libafl::feedbacks::MapIndexesMetadata;
use libafl::corpus::Testcase;
use libafl::prelude::{UsesInput, AsSlice};
use core::marker::PhantomData;
use libafl::schedulers::{MinimizerScheduler, TestcaseScore};
use std::path::PathBuf;
use std::fs;
use hashbrown::{HashMap};
use libafl::observers::ObserversTuple;
use libafl::executors::ExitKind;
use libafl::events::EventFirer;
use libafl::state::{HasClientPerfMonitor, HasCorpus, UsesState};
use libafl::inputs::Input;
use libafl::feedbacks::Feedback;
use libafl::state::HasMetadata;
use libafl_qemu::edges::QemuEdgesMapMetadata;
use libafl::observers::MapObserver;
use serde::{Deserialize, Serialize};
use std::cmp;
use libafl::{
bolts::{
tuples::Named,
HasLen,
},
observers::Observer,
Error,
};
use crate::clock::QemuClockObserver;
use crate::systemstate::FreeRTOSSystemStateMetadata;
//=========================== Scheduler
pub type TimeMaximizerCorpusScheduler<CS> =
MinimizerScheduler<CS, MaxTimeFavFactor<<CS as UsesState>::State>, MapIndexesMetadata>;
/// Multiply the testcase size with the execution time.
/// This favors small and quick testcases.
#[derive(Debug, Clone)]
pub struct MaxTimeFavFactor<S>
where
S: HasCorpus + HasMetadata,
S::Input: HasLen,
{
phantom: PhantomData<S>,
}
impl<S> TestcaseScore<S> for MaxTimeFavFactor<S>
where
S: HasCorpus + HasMetadata,
S::Input: HasLen,
{
fn compute(entry: &mut Testcase<<S as UsesInput>::Input>, state: &S) -> Result<f64, Error> {
// TODO maybe enforce entry.exec_time().is_some()
let et = entry.exec_time().expect("testcase.exec_time is needed for scheduler");
let tns : i64 = et.as_nanos().try_into().expect("failed to convert time");
Ok(-tns as f64)
}
}
pub type LenTimeMaximizerCorpusScheduler<CS> =
MinimizerScheduler<CS, MaxExecsLenFavFactor<<CS as UsesState>::State>, MapIndexesMetadata>;
pub type TimeStateMaximizerCorpusScheduler<CS> =
MinimizerScheduler<CS, MaxTimeFavFactor<<CS as UsesState>::State>, FreeRTOSSystemStateMetadata>;
/// Multiply the testcase size with the execution time.
/// This favors small and quick testcases.
#[derive(Debug, Clone)]
pub struct MaxExecsLenFavFactor<S>
where
S: HasCorpus + HasMetadata,
S::Input: HasLen,
{
phantom: PhantomData<S>,
}
impl<S> TestcaseScore<S> for MaxExecsLenFavFactor<S>
where
S: HasCorpus + HasMetadata,
S::Input: HasLen,
{
fn compute(entry: &mut Testcase<S::Input>, state: &S) -> Result<f64, Error> {
let execs_per_hour = (3600.0/entry.exec_time().expect("testcase.exec_time is needed for scheduler").as_secs_f64());
let execs_times_length_per_hour = execs_per_hour*entry.cached_len()? as f64;
Ok(execs_times_length_per_hour)
}
}
//===================================================================
/// A Feedback reporting if the Input consists of strictly decreasing bytes.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SortedFeedback {
}
impl<S> Feedback<S> for SortedFeedback
where
S: UsesInput + HasClientPerfMonitor,
S::Input: HasTargetBytes,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &S::Input,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
{
let t = _input.target_bytes();
let tmp = t.as_slice();
if tmp.len()<32 {return Ok(false);}
let tmp = Vec::<u8>::from(&tmp[0..32]);
// tmp.reverse();
if tmp.is_sorted_by(|a,b| match a.partial_cmp(b).unwrap_or(Less) {
Less => Some(Greater),
Equal => Some(Greater),
Greater => Some(Less),
}) {return Ok(true)};
return Ok(false);
}
}
impl Named for SortedFeedback {
#[inline]
fn name(&self) -> &str {
"Sorted"
}
}
impl SortedFeedback {
/// Creates a new [`HitFeedback`]
#[must_use]
pub fn new() -> Self {
Self {}
}
}
impl Default for SortedFeedback {
fn default() -> Self {
Self::new()
}
}
//===================================================================
/// A Feedback which expects a certain minimum execution time
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExecTimeReachedFeedback
{
target_time: u64,
}
impl<S> Feedback<S> for ExecTimeReachedFeedback
where
S: UsesInput + HasClientPerfMonitor,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &S::Input,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
{
let observer = observers.match_name::<QemuClockObserver>("clock")
.expect("QemuClockObserver not found");
Ok(observer.last_runtime() >= self.target_time)
}
}
impl Named for ExecTimeReachedFeedback
{
#[inline]
fn name(&self) -> &str {
"ExecTimeReachedFeedback"
}
}
impl ExecTimeReachedFeedback
where
{
/// Creates a new [`ExecTimeReachedFeedback`]
#[must_use]
pub fn new(target_time : u64) -> Self {
Self {target_time: target_time}
}
}
pub static mut EXEC_TIME_COLLECTION : Vec<u32> = Vec::new();
/// A Noop Feedback which records a list of all execution times
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExecTimeCollectorFeedback
{
}
impl<S> Feedback<S> for ExecTimeCollectorFeedback
where
S: UsesInput + HasClientPerfMonitor,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &S::Input,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
{
let observer = observers.match_name::<QemuClockObserver>("clock")
.expect("QemuClockObserver not found");
unsafe { EXEC_TIME_COLLECTION.push(observer.last_runtime().try_into().unwrap()); }
Ok(false)
}
}
impl Named for ExecTimeCollectorFeedback
{
#[inline]
fn name(&self) -> &str {
"ExecTimeCollectorFeedback"
}
}
impl ExecTimeCollectorFeedback
where
{
/// Creates a new [`ExecTimeCollectorFeedback`]
#[must_use]
pub fn new() -> Self {
Self {}
}
}
/// Shared Metadata for a SysStateFeedback
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct ExecTimeCollectorFeedbackState
{
collection: Vec<u32>,
}
impl Named for ExecTimeCollectorFeedbackState
{
#[inline]
fn name(&self) -> &str {
"ExecTimeCollectorFeedbackState"
}
}
//===================================================================
/// A Feedback which expects a certain minimum execution time
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExecTimeIncFeedback
{
longest_time: u64,
last_is_longest: bool
}
impl<S> Feedback<S> for ExecTimeIncFeedback
where
S: UsesInput + HasClientPerfMonitor,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &S::Input,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
{
let observer = observers.match_name::<QemuClockObserver>("clocktime")
.expect("QemuClockObserver not found");
if observer.last_runtime() > self.longest_time {
self.longest_time = observer.last_runtime();
self.last_is_longest = true;
Ok(true)
} else {
self.last_is_longest = false;
Ok(false)
}
}
fn append_metadata(
&mut self,
_state: &mut S,
testcase: &mut Testcase<<S as UsesInput>::Input>,
) -> Result<(), Error> {
#[cfg(feature = "feed_afl")]
if self.last_is_longest {
let mim : Option<&mut MapIndexesMetadata>= testcase.metadata_mut().get_mut();
// pretend that the longest input alone excercises some non-existing edge, to keep it relevant
mim.unwrap().list.push(usize::MAX);
};
Ok(())
}
}
impl Named for ExecTimeIncFeedback
{
#[inline]
fn name(&self) -> &str {
"ExecTimeReachedFeedback"
}
}
impl ExecTimeIncFeedback
where
{
/// Creates a new [`ExecTimeReachedFeedback`]
#[must_use]
pub fn new() -> Self {
Self {longest_time: 0, last_is_longest: false}
}
}
/// A Noop Feedback which records a list of all execution times
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct AlwaysTrueFeedback
{
}
impl<S> Feedback<S> for AlwaysTrueFeedback
where
S: UsesInput + HasClientPerfMonitor,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &S::Input,
_observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
{
Ok(true)
}
}
impl Named for AlwaysTrueFeedback
{
#[inline]
fn name(&self) -> &str {
"AlwaysTrueFeedback"
}
}
impl AlwaysTrueFeedback
where
{
/// Creates a new [`ExecTimeCollectorFeedback`]
#[must_use]
pub fn new() -> Self {
Self {
}
}
}

View File

@ -6,7 +6,6 @@ edition = "2021"
[features] [features]
default = ["std"] default = ["std"]
tui = []
std = [] std = []
[profile.dev] [profile.dev]

View File

@ -1,70 +0,0 @@
from pylibafl import libafl
# LIBRARY WRAPPER
def map_observer_wrapper(map_observer):
if type(map_observer).__name__ == "OwnedMapObserverI32":
return libafl.MapObserverI32.new_from_owned(map_observer)
def executor_wrapper(executor):
if type(executor).__name__ == "OwnedInProcessExecutorI32":
return libafl.ExecutorI32.new_from_inprocess(executor)
def monitor_wrapper(monitor):
if type(monitor).__name__ == "SimpleMonitor":
return libafl.Monitor.new_from_simple(monitor)
def event_manager_wrapper(event_manager):
if type(event_manager).__name__ == "SimpleEventManager":
return libafl.EventManagerI32.new_from_simple(event_manager)
def corpus_wrapper(corpus):
if type(corpus).__name__ == "InMemoryCorpus":
return libafl.Corpus.new_from_in_memory(corpus)
if type(corpus).__name__ == "OnDiskCorpus":
return libafl.Corpus.new_from_on_disk(corpus)
def rand_wrapper(rand):
if type(rand).__name__ == "StdRand":
return libafl.Rand.new_from_std(rand)
def stage_wrapper(stage):
if type(stage).__name__ == "StdScheduledHavocMutationsStageI32":
return libafl.StageI32.new_from_std_scheduled(stage)
# CODE WRITTEN BY USER
def harness(inp):
if len(inp.hex()) >= 2 and inp.hex()[:2] == '61':
raise Exception("NOOOOOO =)")
map_observer = libafl.OwnedMapObserverI32("signals", [0] * 16)
feedback_state = libafl.MapFeedbackStateI32.with_observer(map_observer_wrapper(map_observer))
feedback = libafl.MaxMapFeedbackI32(feedback_state, map_observer_wrapper(map_observer))
state = libafl.StdStateI32(
rand_wrapper(libafl.StdRand.with_current_nanos()),
corpus_wrapper(libafl.InMemoryCorpus()),
corpus_wrapper(libafl.OnDiskCorpus("./crashes")),
feedback_state
)
monitor = libafl.SimpleMonitor()
mgr = libafl.SimpleEventManager(monitor_wrapper(monitor))
fuzzer = libafl.StdFuzzerI32(feedback)
executor = libafl.OwnedInProcessExecutorI32(harness, map_observer_wrapper(map_observer), fuzzer, state, event_manager_wrapper(mgr))
generator = libafl.RandPrintablesGeneratorI32(32)
state.generate_initial_inputs(fuzzer, executor_wrapper(executor), generator, event_manager_wrapper(mgr), 8)
stage = libafl.StdScheduledHavocMutationsStageI32.new_from_scheduled_havoc_mutations()
stage_tuple_list = libafl.StagesOwnedListI32(stage_wrapper(stage))
fuzzer.fuzz_loop(executor_wrapper(executor), state, event_manager_wrapper(mgr), stage_tuple_list)

View File

@ -1,23 +1,20 @@
use std::path::PathBuf; use std::path::PathBuf;
#[cfg(windows)] #[cfg(windows)]
use std::ptr::write_volatile; use std::ptr::write_volatile;
#[cfg(feature = "tui")]
use libafl::monitors::tui::TuiMonitor;
#[cfg(not(feature = "tui"))]
use libafl::monitors::SimpleMonitor;
use libafl::{ use libafl::{
bolts::{current_nanos, rands::StdRand, tuples::tuple_list, AsSlice}, bolts::{current_nanos, rands::StdRand, tuples::tuple_list},
corpus::{InMemoryCorpus, OnDiskCorpus}, corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager, events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind}, executors::{inprocess::InProcessExecutor, ExitKind},
feedbacks::{CrashFeedback, MaxMapFeedback}, feedbacks::{CrashFeedback, MapFeedbackState, MaxMapFeedback},
fuzzer::{Fuzzer, StdFuzzer}, fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator, generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes}, inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator}, mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::StdMapObserver, observers::StdMapObserver,
schedulers::QueueScheduler,
stages::mutational::StdMutationalStage, stages::mutational::StdMutationalStage,
state::StdState, state::StdState,
}; };
@ -43,7 +40,7 @@ pub fn main() {
signals_set(2); signals_set(2);
if buf.len() > 2 && buf[2] == b'c' { if buf.len() > 2 && buf[2] == b'c' {
#[cfg(unix)] #[cfg(unix)]
panic!("Artificial bug triggered =)"); panic!("=(");
// panic!() raises a STATUS_STACK_BUFFER_OVERRUN exception which cannot be caught by the exception handler. // panic!() raises a STATUS_STACK_BUFFER_OVERRUN exception which cannot be caught by the exception handler.
// Here we make it raise STATUS_ACCESS_VIOLATION instead. // Here we make it raise STATUS_ACCESS_VIOLATION instead.
@ -60,14 +57,16 @@ pub fn main() {
}; };
// Create an observation channel using the signals map // Create an observation channel using the signals map
let observer = let observer = StdMapObserver::new("signals", unsafe { &mut SIGNALS });
unsafe { StdMapObserver::new_from_ptr("signals", SIGNALS.as_mut_ptr(), SIGNALS.len()) };
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
// Feedback to rate the interestingness of an input // Feedback to rate the interestingness of an input
let mut feedback = MaxMapFeedback::new(&observer); let feedback = MaxMapFeedback::new(&feedback_state, &observer);
// A feedback to choose if an input is a solution or not // A feedback to choose if an input is a solution or not
let mut objective = CrashFeedback::new(); let objective = CrashFeedback::new();
// create a State from scratch // create a State from scratch
let mut state = StdState::new( let mut state = StdState::new(
@ -79,25 +78,19 @@ pub fn main() {
// on disk so the user can get them after stopping the fuzzer // on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(), OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks. // States of the feedbacks.
// The feedbacks can report the data that should persist in the State. // They are the data related to the feedbacks that you want to persist in the State.
&mut feedback, tuple_list!(feedback_state),
// Same for objective feedbacks );
&mut objective,
)
.unwrap();
// The Monitor trait define how the fuzzer stats are displayed to the user // The Monitor trait define how the fuzzer stats are displayed to the user
#[cfg(not(feature = "tui"))]
let mon = SimpleMonitor::new(|s| println!("{}", s)); let mon = SimpleMonitor::new(|s| println!("{}", s));
#[cfg(feature = "tui")]
let mon = TuiMonitor::new(String::from("Baby Fuzzer"), false);
// The event manager handle the various events generated during the fuzzing loop // The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus // such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon); let mut mgr = SimpleEventManager::new(mon);
// A queue policy to get testcasess from the corpus // A queue policy to get testcasess from the corpus
let scheduler = QueueScheduler::new(); let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler // A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);

View File

@ -1,6 +1,6 @@
[package] [package]
name = "baby_fuzzer_gramatron" name = "baby_fuzzer"
version = "0.8.2" version = "0.7.1"
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"] authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
edition = "2021" edition = "2021"

View File

@ -1,15 +1,8 @@
# Baby Gramatron # Baby fuzzer
This fuzzer shows how to implement grammar-aware fuzzing. [Gramatron](https://github.com/HexHive/Gramatron) uses grammar automatons in conjunction with aggressive mutation operators to synthesize complex bug triggers. `auto.json` records grammar automaton of php,which is corresponding to `libafl::generators::Automaton`and serialized into `auto.postcard`. `libafl::generators::gramatron` will generate valid grammar sequences using `Automaton` and then pass them into `harness`. The function of `harness` is to print the original input. This is a minimalistic example about how to create a libafl based fuzzer.
When you use `cargo run`, You may see output as follows: It runs on a single core until a crash occurs and then exits.
```
b=mlhs_node.isz(c,c, ) The tested program is a simple Rust function without any instrumentation.
d=false.keyword__FILE__(c,b,a,b) For real fuzzing, you will want to add some sort to add coverage or other feedback.
a=select.Jan(d)
a=first.literal( )
b=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,nil].DomainError(c)
next a
b=Oo.gsub(a,d,b)
d=0.hex( )
```

View File

@ -1,17 +1,19 @@
#[cfg(windows)] use std::io::Read;
use std::ptr::write_volatile;
use std::{ use std::{
fs, fs,
io::{BufReader, Read}, io::BufReader,
path::{Path, PathBuf}, path::{Path, PathBuf},
}; };
#[cfg(windows)]
use std::ptr::write_volatile;
use libafl::{ use libafl::{
bolts::{current_nanos, rands::StdRand, tuples::tuple_list}, bolts::{current_nanos, rands::StdRand, tuples::tuple_list},
corpus::{InMemoryCorpus, OnDiskCorpus}, corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager, events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind}, executors::{inprocess::InProcessExecutor, ExitKind},
feedbacks::{CrashFeedback, MaxMapFeedback}, feedbacks::{CrashFeedback, MapFeedbackState, MaxMapFeedback},
fuzzer::{Fuzzer, StdFuzzer}, fuzzer::{Fuzzer, StdFuzzer},
generators::{Automaton, GramatronGenerator}, generators::{Automaton, GramatronGenerator},
inputs::GramatronInput, inputs::GramatronInput,
@ -21,7 +23,6 @@ use libafl::{
StdScheduledMutator, StdScheduledMutator,
}, },
observers::StdMapObserver, observers::StdMapObserver,
schedulers::QueueScheduler,
stages::mutational::StdMutationalStage, stages::mutational::StdMutationalStage,
state::StdState, state::StdState,
}; };
@ -59,11 +60,14 @@ pub fn main() {
// Create an observation channel using the signals map // Create an observation channel using the signals map
let observer = StdMapObserver::new("signals", unsafe { &mut SIGNALS }); let observer = StdMapObserver::new("signals", unsafe { &mut SIGNALS });
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
// Feedback to rate the interestingness of an input // Feedback to rate the interestingness of an input
let mut feedback = MaxMapFeedback::new(&observer); let feedback = MaxMapFeedback::new(&feedback_state, &observer);
// A feedback to choose if an input is a solution or not // A feedback to choose if an input is a solution or not
let mut objective = CrashFeedback::new(); let objective = CrashFeedback::new();
// create a State from scratch // create a State from scratch
let mut state = StdState::new( let mut state = StdState::new(
@ -75,12 +79,9 @@ pub fn main() {
// on disk so the user can get them after stopping the fuzzer // on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(), OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks. // States of the feedbacks.
// The feedbacks can report the data that should persist in the State. // They are the data related to the feedbacks that you want to persist in the State.
&mut feedback, tuple_list!(feedback_state),
// Same for objective feedbacks );
&mut objective,
)
.unwrap();
// The Monitor trait define how the fuzzer stats are reported to the user // The Monitor trait define how the fuzzer stats are reported to the user
let monitor = SimpleMonitor::new(|s| println!("{}", s)); let monitor = SimpleMonitor::new(|s| println!("{}", s));
@ -90,7 +91,7 @@ pub fn main() {
let mut mgr = SimpleEventManager::new(monitor); let mut mgr = SimpleEventManager::new(monitor);
// A queue policy to get testcasess from the corpus // A queue policy to get testcasess from the corpus
let scheduler = QueueScheduler::new(); let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler // A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
@ -143,7 +144,7 @@ pub fn main() {
.expect("Failed to generate the initial corpus"); .expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator // Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::with_max_stack_pow( let mutator = StdScheduledMutator::with_max_iterations(
tuple_list!( tuple_list!(
GramatronRandomMutator::new(&generator), GramatronRandomMutator::new(&generator),
GramatronRandomMutator::new(&generator), GramatronRandomMutator::new(&generator),

View File

@ -1 +0,0 @@
libpng-*

View File

@ -1,22 +0,0 @@
[package]
name = "baby_fuzzer_grimoire"
version = "0.8.2"
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
edition = "2021"
[features]
default = ["std"]
std = []
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[dependencies]
libafl = { path = "../../libafl/" }

View File

@ -1,7 +0,0 @@
# baby grimoire fuzzer
This fuzzer shows how to implement [Grimoire fuzzer](https://www.usenix.org/system/files/sec19-blazytko.pdf), a fully automated coverage-guided fuzzer which works without any form of human interaction or pre-configuration. `libafl::mutators::grimoire` provides four mutators :
`GrimoireExtensionMutator`,`GrimoireRecursiveReplacementMutator`,
`GrimoireStringReplacementMutator`,`GrimoireRandomDeleteMutator`.
The fuzzer will regard all files in `./corpus` as inputs. Inputs will be mutated by `mutator`(havoc_mutations) and `grimoire_mutator`. `harness` will firstly check if `input` contains substring `fn` or `pippopippo` then print the input mutated by `grimoire_mutator`.
> **_NOTE:_** This harness is not designed for a crash, so `cargo run` will not terminate.

View File

@ -1,4 +0,0 @@
fn pippo(v) { return "hello world " + v; }
var a = 666;
name = "scozzo" + a;
pippo(name);

View File

@ -1,173 +0,0 @@
#[cfg(windows)]
use std::ptr::write_volatile;
use std::{fs, io::Read, path::PathBuf};
use libafl::{
bolts::{current_nanos, rands::StdRand, tuples::tuple_list, AsSlice},
corpus::{InMemoryCorpus, OnDiskCorpus},
events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind},
feedbacks::{CrashFeedback, MaxMapFeedback},
fuzzer::{Evaluator, Fuzzer, StdFuzzer},
inputs::{GeneralizedInput, HasTargetBytes},
monitors::SimpleMonitor,
mutators::{
havoc_mutations, scheduled::StdScheduledMutator, GrimoireExtensionMutator,
GrimoireRandomDeleteMutator, GrimoireRecursiveReplacementMutator,
GrimoireStringReplacementMutator, Tokens,
},
observers::StdMapObserver,
schedulers::QueueScheduler,
stages::{mutational::StdMutationalStage, GeneralizationStage},
state::{HasMetadata, StdState},
};
/// Coverage map with explicit assignments due to the lack of instrumentation
static mut SIGNALS: [u8; 16] = [0; 16];
/// Assign a signal to the signals map
fn signals_set(idx: usize) {
unsafe { SIGNALS[idx] = 1 };
}
fn is_sub<T: PartialEq>(mut haystack: &[T], needle: &[T]) -> bool {
if needle.is_empty() {
return true;
}
while !haystack.is_empty() {
if haystack.starts_with(needle) {
return true;
}
haystack = &haystack[1..];
}
false
}
#[allow(clippy::similar_names)]
pub fn main() {
let mut initial_inputs = vec![];
for entry in fs::read_dir("./corpus").unwrap() {
let path = entry.unwrap().path();
let attr = fs::metadata(&path);
if attr.is_err() {
continue;
}
let attr = attr.unwrap();
if attr.is_file() && attr.len() > 0 {
println!("Loading file {:?} ...", &path);
let mut file = fs::File::open(path).expect("no file found");
let mut buffer = vec![];
file.read_to_end(&mut buffer).expect("buffer overflow");
let input = GeneralizedInput::new(buffer);
initial_inputs.push(input);
}
}
// The closure that we want to fuzz
let mut harness = |input: &GeneralizedInput| {
let target_bytes = input.target_bytes();
let bytes = target_bytes.as_slice();
if is_sub(bytes, "fn".as_bytes()) {
signals_set(2);
}
if is_sub(bytes, "pippopippo".as_bytes()) {
signals_set(3);
}
unsafe {
if input.grimoire_mutated {
// println!(">>> {:?}", input.generalized());
println!(">>> {:?}", std::str::from_utf8_unchecked(bytes));
}
}
signals_set(1);
ExitKind::Ok
};
// Create an observation channel using the signals map
let observer = StdMapObserver::new("signals", unsafe { &mut SIGNALS });
// Feedback to rate the interestingness of an input
let mut feedback = MaxMapFeedback::new_tracking(&observer, false, true);
// A feedback to choose if an input is a solution or not
let mut objective = CrashFeedback::new();
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// The feedbacks can report the data that should persist in the State.
&mut feedback,
// Same for objective feedbacks
&mut objective,
)
.unwrap();
if state.metadata().get::<Tokens>().is_none() {
state.add_metadata(Tokens::from([b"FOO".to_vec(), b"BAR".to_vec()]));
}
// The Monitor trait define how the fuzzer stats are reported to the user
let monitor = SimpleMonitor::new(|s| println!("{}", s));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(monitor);
// A queue policy to get testcasess from the corpus
let scheduler = QueueScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
let generalization = GeneralizationStage::new(&observer);
// Create the executor for an in-process function with just one observer
let mut executor = InProcessExecutor::new(
&mut harness,
tuple_list!(observer),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the Executor");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::with_max_stack_pow(havoc_mutations(), 2);
let grimoire_mutator = StdScheduledMutator::with_max_stack_pow(
tuple_list!(
GrimoireExtensionMutator::new(),
GrimoireRecursiveReplacementMutator::new(),
GrimoireStringReplacementMutator::new(),
// give more probability to avoid large inputs
GrimoireRandomDeleteMutator::new(),
GrimoireRandomDeleteMutator::new(),
),
3,
);
let mut stages = tuple_list!(
generalization,
StdMutationalStage::new(mutator),
StdMutationalStage::new(grimoire_mutator)
);
for input in initial_inputs {
fuzzer
.evaluate_input(&mut state, &mut executor, &mut mgr, input)
.unwrap();
}
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
}

View File

@ -1,3 +0,0 @@
corpus
minimized
solutions

View File

@ -1,23 +0,0 @@
[package]
name = "baby_fuzzer_minimizing"
version = "0.8.2"
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>", "Addison Crump <research@addisoncrump.info>"]
edition = "2021"
[features]
default = ["std"]
tui = []
std = []
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[dependencies]
libafl = { path = "../../libafl/", features = ["prelude"] }

View File

@ -1,9 +0,0 @@
# Baby fuzzer
This is a minimalistic example about how to create a libafl based fuzzer which leverages minimisation.
The fuzzer steps until a crash occurs, minimising each corpus entry as it is discovered. Then, once a
solution is found, it attempts to minimise that as well.
The tested program is a simple Rust function without any instrumentation.
For real fuzzing, you will want to add some sort to add coverage or other feedback.

View File

@ -1,142 +0,0 @@
use std::path::PathBuf;
#[cfg(windows)]
use std::ptr::write_volatile;
use libafl::prelude::*;
/// Coverage map with explicit assignments due to the lack of instrumentation
static mut SIGNALS: [u8; 16] = [0; 16];
/// Assign a signal to the signals map
fn signals_set(idx: usize) {
unsafe { SIGNALS[idx] = 1 };
}
#[allow(clippy::similar_names)]
pub fn main() -> Result<(), Error> {
// The closure that we want to fuzz
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
signals_set(0);
if !buf.is_empty() && buf[0] == b'a' {
signals_set(1);
if buf.len() > 1 && buf[1] == b'b' {
signals_set(2);
if buf.len() > 2 && buf[2] == b'c' {
return ExitKind::Crash;
}
}
}
ExitKind::Ok
};
// Create an observation channel using the signals map
let observer =
unsafe { StdMapObserver::new_from_ptr("signals", SIGNALS.as_mut_ptr(), SIGNALS.len()) };
let factory = MapEqualityFactory::new_from_observer(&observer);
// Feedback to rate the interestingness of an input
let mut feedback = MaxMapFeedback::new(&observer);
// A feedback to choose if an input is a solution or not
let mut objective = CrashFeedback::new();
// The Monitor trait define how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{}", s));
let mut mgr = SimpleEventManager::new(mon);
let corpus_dir = PathBuf::from("./corpus");
let solution_dir = PathBuf::from("./solutions");
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
OnDiskCorpus::new(&corpus_dir).unwrap(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(&solution_dir).unwrap(),
// States of the feedbacks.
// The feedbacks can report the data that should persist in the State.
&mut feedback,
// Same for objective feedbacks
&mut objective,
)
.unwrap();
// A queue policy to get testcasess from the corpus
let scheduler = QueueScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for an in-process function with just one observer
let mut executor = InProcessExecutor::new(
&mut harness,
tuple_list!(observer),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the Executor");
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let minimizer = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(
StdMutationalStage::new(mutator),
StdTMinMutationalStage::new(minimizer, factory, 128)
);
while state.solutions().is_empty() {
fuzzer.fuzz_one(&mut stages, &mut executor, &mut state, &mut mgr)?;
}
let minimized_dir = PathBuf::from("./minimized");
let mut state = StdState::new(
StdRand::with_seed(current_nanos()),
OnDiskCorpus::new(&minimized_dir).unwrap(),
InMemoryCorpus::new(),
&mut (),
&mut (),
)
.unwrap();
// The Monitor trait define how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{}", s));
let mut mgr = SimpleEventManager::new(mon);
let minimizer = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdTMinMutationalStage::new(
minimizer,
CrashFeedbackFactory::default(),
1 << 10
));
let scheduler = QueueScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, (), ());
// Create the executor for an in-process function with just one observer
let mut executor = InProcessExecutor::new(&mut harness, (), &mut fuzzer, &mut state, &mut mgr)?;
state.load_initial_inputs_forced(&mut fuzzer, &mut executor, &mut mgr, &[solution_dir])?;
stages.perform_all(&mut fuzzer, &mut executor, &mut state, &mut mgr, 0)?;
Ok(())
}

View File

@ -1,6 +1,6 @@
[package] [package]
name = "baby_fuzzer_nautilus" name = "baby_fuzzer"
version = "0.8.2" version = "0.7.1"
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"] authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
edition = "2018" edition = "2018"

View File

@ -1,9 +1,8 @@
## baby nautilus fuzzer # Baby fuzzer
(Nautilus)[https://www.ndss-symposium.org/ndss-paper/nautilus-fishing-for-deep-bugs-with-grammars/] is a coverage-guided and grammar-based fuzzer. It needs to read the mruby's context-free grammar stored in `grammar.json`. And then use the corresponding feedback, generator, and mutator to fuzz.
`libafl::mutators::nautilus` contains: This is a minimalistic example about how to create a libafl based fuzzer.
```
NautilusInput,NautilusContext It runs on a single core until a crash occurs and then exits.
NautilusChunksMetadata,NautilusFeedback
NautilusGenerator The tested program is a simple Rust function without any instrumentation.
NautilusRandomMutator,NautilusRecursionMutator,NautilusSpliceMutator For real fuzzing, you will want to add some sort to add coverage or other feedback.
```

View File

@ -1,14 +1,17 @@
use std::path::PathBuf; use std::path::PathBuf;
#[cfg(windows)] #[cfg(windows)]
use std::ptr::write_volatile; use std::ptr::write_volatile;
use libafl::{ use libafl::{
bolts::{current_nanos, rands::StdRand, tuples::tuple_list}, bolts::{current_nanos, rands::StdRand, tuples::tuple_list},
corpus::{InMemoryCorpus, OnDiskCorpus}, corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager, events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind}, executors::{inprocess::InProcessExecutor, ExitKind},
feedback_or, feedback_or,
feedbacks::{CrashFeedback, MaxMapFeedback, NautilusChunksMetadata, NautilusFeedback}, feedbacks::{
CrashFeedback, MapFeedbackState, MaxMapFeedback, NautilusChunksMetadata, NautilusFeedback,
},
fuzzer::{Fuzzer, StdFuzzer}, fuzzer::{Fuzzer, StdFuzzer},
generators::{NautilusContext, NautilusGenerator}, generators::{NautilusContext, NautilusGenerator},
inputs::NautilusInput, inputs::NautilusInput,
@ -17,7 +20,6 @@ use libafl::{
NautilusRandomMutator, NautilusRecursionMutator, NautilusSpliceMutator, StdScheduledMutator, NautilusRandomMutator, NautilusRecursionMutator, NautilusSpliceMutator, StdScheduledMutator,
}, },
observers::StdMapObserver, observers::StdMapObserver,
schedulers::QueueScheduler,
stages::mutational::StdMutationalStage, stages::mutational::StdMutationalStage,
state::{HasMetadata, StdState}, state::{HasMetadata, StdState},
}; };
@ -48,14 +50,17 @@ pub fn main() {
// Create an observation channel using the signals map // Create an observation channel using the signals map
let observer = StdMapObserver::new("signals", unsafe { &mut SIGNALS }); let observer = StdMapObserver::new("signals", unsafe { &mut SIGNALS });
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
// Feedback to rate the interestingness of an input // Feedback to rate the interestingness of an input
let mut feedback = feedback_or!( let feedback = feedback_or!(
MaxMapFeedback::new(&observer), MaxMapFeedback::new(&feedback_state, &observer),
NautilusFeedback::new(&context) NautilusFeedback::new(&context)
); );
// A feedback to choose if an input is a solution or not // A feedback to choose if an input is a solution or not
let mut objective = CrashFeedback::new(); let objective = CrashFeedback::new();
// create a State from scratch // create a State from scratch
let mut state = StdState::new( let mut state = StdState::new(
@ -67,12 +72,9 @@ pub fn main() {
// on disk so the user can get them after stopping the fuzzer // on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(), OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks. // States of the feedbacks.
// The feedbacks can report the data that should persist in the State. // They are the data related to the feedbacks that you want to persist in the State.
&mut feedback, tuple_list!(feedback_state),
// Same for objective feedbacks );
&mut objective,
)
.unwrap();
if state.metadata().get::<NautilusChunksMetadata>().is_none() { if state.metadata().get::<NautilusChunksMetadata>().is_none() {
state.add_metadata(NautilusChunksMetadata::new("/tmp/".into())); state.add_metadata(NautilusChunksMetadata::new("/tmp/".into()));
@ -86,7 +88,7 @@ pub fn main() {
let mut mgr = SimpleEventManager::new(monitor); let mut mgr = SimpleEventManager::new(monitor);
// A queue policy to get testcasess from the corpus // A queue policy to get testcasess from the corpus
let scheduler = QueueScheduler::new(); let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler // A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
@ -138,7 +140,7 @@ pub fn main() {
.expect("Failed to generate the initial corpus"); .expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator // Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::with_max_stack_pow( let mutator = StdScheduledMutator::with_max_iterations(
tuple_list!( tuple_list!(
NautilusRandomMutator::new(&context), NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context), NautilusRandomMutator::new(&context),

View File

@ -1 +0,0 @@
libpng-*

View File

@ -1,40 +0,0 @@
[package]
name = "baby_fuzzer_swap_differential"
version = "0.7.1"
authors = ["Addison Crump <research@addisoncrump.info>"]
edition = "2021"
default-run = "fuzzer_sd"
[features]
tui = []
multimap = []
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[build-dependencies]
anyhow = "1"
bindgen = "0.61"
cc = "1.0"
[dependencies]
libafl = { path = "../../libafl" }
libafl_targets = { path = "../../libafl_targets", features = ["sancov_pcguard_hitcounts", "libfuzzer", "sancov_cmplog", "pointer_maps"] }
mimalloc = { version = "*", default-features = false }
libafl_cc = { path = "../../libafl_cc/" }
[[bin]]
name = "fuzzer_sd"
path = "src/main.rs"
[[bin]]
name = "libafl_cc"
path = "src/bin/libafl_cc.rs"

View File

@ -1,45 +0,0 @@
# Variables
[env]
FUZZER_NAME='fuzzer_sd'
CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } }
LIBAFL_CC = '${CARGO_TARGET_DIR}/release/libafl_cc'
FUZZER = '${CARGO_TARGET_DIR}/release/${FUZZER_NAME}'
PROJECT_DIR = { script = ["pwd"] }
# Compilers
[tasks.cc]
command = "cargo"
args = ["build" , "--release", "--bin", "libafl_cc"]
# Harness
[tasks.fuzzer]
command = "cargo"
args = ["build" , "--release", "--bin", "${FUZZER_NAME}"]
dependencies = [ "cc" ]
# Run the fuzzer
[tasks.run]
command = "${CARGO_TARGET_DIR}/release/${FUZZER_NAME}"
dependencies = [ "fuzzer" ]
# Test
[tasks.test]
linux_alias = "test_unix"
mac_alias = "test_unix"
windows_alias = "unsupported"
[tasks.test_unix]
script_runner = "@shell"
script='''
timeout 10s ${CARGO_TARGET_DIR}/release/${FUZZER_NAME}
'''
dependencies = [ "fuzzer" ]
# Clean up
[tasks.clean]
# Disable default `clean` definition
clear = true
script_runner="@shell"
script='''
cargo clean
'''

View File

@ -1,11 +0,0 @@
# Baby fuzzer (swap differential)
This is a minimalistic example about how to create a libafl-based differential fuzzer which swaps out the AFL map during
execution so that both maps may be measured.
It runs on a single core until an input is discovered which both inputs accept.
The tested programs are provided in `first.c` and `second.c`.
You may execute this fuzzer with `cargo make run`. If you prefer to do so manually, you may also simply use
`cargo build --release --bin libafl_cc` followed by `cargo run --release --bin fuzzer_sd`

View File

@ -1,45 +0,0 @@
use std::{env, path::PathBuf, str::FromStr};
fn main() -> anyhow::Result<()> {
if env::var("CARGO_BIN_NAME").map_or(true, |v| v != "libafl_cc") {
println!("cargo:rerun-if-changed=./first.h");
println!("cargo:rerun-if-changed=./first.c");
println!("cargo:rerun-if-changed=./second.h");
println!("cargo:rerun-if-changed=./second.c");
println!("cargo:rerun-if-changed=./common.c");
// Configure and generate bindings.
let bindings = bindgen::builder()
.header("first.h")
.header("second.h")
.parse_callbacks(Box::new(bindgen::CargoCallbacks))
.generate()?;
// Write the generated bindings to an output file.
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings
.write_to_file(out_path.join("bindings.rs"))
.expect("Couldn't write bindings!");
let compiler = env::var("CARGO_TARGET_DIR")
.map_or(PathBuf::from_str("target").unwrap(), |v| {
PathBuf::from_str(&v).unwrap()
})
.join("release/libafl_cc");
println!("cargo:rerun-if-changed={}", compiler.to_str().unwrap());
if !compiler.try_exists().unwrap_or(false) {
println!("cargo:warning=Can't find libafl_cc; assuming that we're building it.");
} else {
cc::Build::new()
.compiler(compiler)
.file("first.c")
.file("second.c")
.file("common.c")
.compile("diff-target");
println!("cargo:rustc-link-lib=diff-target");
}
}
Ok(())
}

View File

@ -1,12 +0,0 @@
#include "common.h"
bool both_require(const uint8_t *bytes, size_t len) {
if (len >= 1 && bytes[0] == 'a') {
if (len >= 2 && bytes[1] == 'b') {
if (len >= 3 && bytes[2] == 'c') {
return ACCEPT;
}
}
}
return REJECT;
}

View File

@ -1,13 +0,0 @@
#ifndef LIBAFL_COMMON_H
#define LIBAFL_COMMON_H
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#define ACCEPT true
#define REJECT false
bool both_require(const uint8_t *bytes, size_t len);
#endif // LIBAFL_COMMON_H

View File

@ -1,10 +0,0 @@
#include "first.h"
bool inspect_first(const uint8_t *bytes, size_t len) {
if (both_require(bytes, len)) {
if (len >= 4 && bytes[3] == 'd') {
return ACCEPT;
}
}
return REJECT;
}

View File

@ -1,8 +0,0 @@
#ifndef LIBAFL_FIRST_H
#define LIBAFL_FIRST_H
#include "common.h"
bool inspect_first(const uint8_t *bytes, size_t len);
#endif // LIBAFL_FIRST_H

View File

@ -1,10 +0,0 @@
#include "second.h"
bool inspect_second(const uint8_t *bytes, size_t len) {
if (both_require(bytes, len)) {
if (len >= 5 && bytes[4] == 'e') {
return ACCEPT;
}
}
return REJECT;
}

View File

@ -1,8 +0,0 @@
#ifndef LIBAFL_SECOND_H
#define LIBAFL_SECOND_H
#include "common.h"
bool inspect_second(const uint8_t *bytes, size_t len);
#endif // LIBAFL_SECOND_H

View File

@ -1,35 +0,0 @@
use std::env;
use libafl_cc::{ClangWrapper, CompilerWrapper};
pub fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 {
let mut dir = env::current_exe().unwrap();
let wrapper_name = dir.file_name().unwrap().to_str().unwrap();
let is_cpp = match wrapper_name[wrapper_name.len()-2..].to_lowercase().as_str() {
"cc" => false,
"++" | "pp" | "xx" => true,
_ => panic!("Could not figure out if c or c++ wrapper was called. Expected {:?} to end with c or cxx", dir),
};
dir.pop();
let mut cc = ClangWrapper::new();
if let Some(code) = cc
.cpp(is_cpp)
// silence the compiler wrapper output, needed for some configure scripts.
.silence(true)
.parse_args(&args)
.expect("Failed to parse the command line")
.add_arg("-fsanitize-coverage=trace-pc-guard")
.run()
.expect("Failed to run the wrapped compiler")
{
std::process::exit(code);
}
} else {
panic!("LibAFL CC: No Arguments given");
}
}

View File

@ -1,246 +0,0 @@
#[cfg(windows)]
use std::ptr::write_volatile;
use std::{
alloc::{alloc_zeroed, Layout},
path::PathBuf,
};
#[cfg(feature = "tui")]
use libafl::monitors::tui::TuiMonitor;
#[cfg(not(feature = "tui"))]
use libafl::monitors::SimpleMonitor;
use libafl::{
bolts::{current_nanos, rands::StdRand, tuples::tuple_list, AsSlice},
corpus::{Corpus, InMemoryCorpus, OnDiskCorpus},
events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, DiffExecutor, ExitKind},
feedbacks::{CrashFeedback, MaxMapFeedback},
fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes},
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::StdMapObserver,
schedulers::QueueScheduler,
stages::mutational::StdMutationalStage,
state::{HasSolutions, StdState},
};
use libafl_targets::{DifferentialAFLMapSwapObserver, MAX_EDGES_NUM};
use mimalloc::MiMalloc;
#[global_allocator]
static GLOBAL: MiMalloc = MiMalloc;
// bindings to the functions defined in the target
mod bindings {
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(unused)]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
}
use bindings::{inspect_first, inspect_second};
#[cfg(feature = "multimap")]
mod multimap {
pub use libafl::observers::{HitcountsIterableMapObserver, MultiMapObserver};
pub static mut FIRST_EDGES: &'static mut [u8] = &mut [];
pub static mut SECOND_EDGES: &'static mut [u8] = &mut [];
pub static mut COMBINED_EDGES: [&'static mut [u8]; 2] = [&mut [], &mut []];
}
#[cfg(feature = "multimap")]
use multimap::*;
#[cfg(not(feature = "multimap"))]
mod slicemap {
pub use libafl::observers::HitcountsMapObserver;
pub static mut EDGES: &'static mut [u8] = &mut [];
}
#[cfg(not(feature = "multimap"))]
use slicemap::*;
#[allow(clippy::similar_names)]
pub fn main() {
// The closure that we want to fuzz
let mut first_harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
if unsafe { inspect_first(buf.as_ptr(), buf.len()) } {
ExitKind::Crash
} else {
ExitKind::Ok
}
};
let mut second_harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
if unsafe { inspect_second(buf.as_ptr(), buf.len()) } {
ExitKind::Crash
} else {
ExitKind::Ok
}
};
#[cfg(feature = "multimap")]
let (first_map_observer, second_map_observer, map_swapper, map_observer) = {
// initialize the maps
unsafe {
let layout = Layout::from_size_align(MAX_EDGES_NUM, 64).unwrap();
FIRST_EDGES = core::slice::from_raw_parts_mut(alloc_zeroed(layout), MAX_EDGES_NUM);
SECOND_EDGES = core::slice::from_raw_parts_mut(alloc_zeroed(layout), MAX_EDGES_NUM);
COMBINED_EDGES = [&mut FIRST_EDGES, &mut SECOND_EDGES];
}
// create the base maps used to observe the different executors from two independent maps
let mut first_map_observer = StdMapObserver::new("first-edges", unsafe { FIRST_EDGES });
let mut second_map_observer = StdMapObserver::new("second-edges", unsafe { SECOND_EDGES });
// create a map swapper so that we can replace the coverage map pointer (requires feature pointer_maps!)
let map_swapper =
DifferentialAFLMapSwapObserver::new(&mut first_map_observer, &mut second_map_observer);
// create a combined map observer, e.g. for calibration
// we use MultiMapObserver::differential to indicate that we want to use the observer in
// differential mode
let map_observer = HitcountsIterableMapObserver::new(MultiMapObserver::differential(
"combined-edges",
unsafe { &mut COMBINED_EDGES },
));
(
first_map_observer,
second_map_observer,
map_swapper,
map_observer,
)
};
#[cfg(not(feature = "multimap"))]
let (first_map_observer, second_map_observer, map_swapper, map_observer) = {
// initialize the map
unsafe {
let layout = Layout::from_size_align(MAX_EDGES_NUM * 2, 64).unwrap();
EDGES = core::slice::from_raw_parts_mut(alloc_zeroed(layout), MAX_EDGES_NUM * 2);
}
// create the base maps used to observe the different executors by splitting a slice
let mut first_map_observer =
StdMapObserver::new("first-edges", unsafe { &mut EDGES[..MAX_EDGES_NUM] });
let mut second_map_observer =
StdMapObserver::new("second-edges", unsafe { &mut EDGES[MAX_EDGES_NUM..] });
// create a map swapper so that we can replace the coverage map pointer (requires feature pointer_maps!)
let map_swapper =
DifferentialAFLMapSwapObserver::new(&mut first_map_observer, &mut second_map_observer);
// create a combined map observer, e.g. for calibration
// we use StdMapObserver::differential to indicate that we want to use the observer in
// differential mode
let map_observer =
HitcountsMapObserver::new(StdMapObserver::differential("combined-edges", unsafe {
EDGES
}));
(
first_map_observer,
second_map_observer,
map_swapper,
map_observer,
)
};
// Feedback to rate the interestingness of an input
let mut feedback = MaxMapFeedback::new(&map_observer);
// A feedback to choose if an input is a solution or not
// Crash here means "both crashed", which is our objective
let mut objective = CrashFeedback::new();
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// The feedbacks can report the data that should persist in the State.
&mut feedback,
// Same for objective feedbacks
&mut objective,
)
.unwrap();
// The Monitor trait define how the fuzzer stats are displayed to the user
#[cfg(not(feature = "tui"))]
let mon = SimpleMonitor::new(|s| println!("{}", s));
#[cfg(feature = "tui")]
let mon = TuiMonitor::new(String::from("Baby Fuzzer"), false);
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
// A queue policy to get testcases from the corpus
let scheduler = QueueScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for an in-process function with just one observer
let first_executor = InProcessExecutor::new(
&mut first_harness,
tuple_list!(first_map_observer),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the first executor");
let second_executor = InProcessExecutor::new(
&mut second_harness,
tuple_list!(second_map_observer),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the second executor");
// create the differential executor, providing both the map swapper (which will ensure the
// instrumentation picks the correct map to write to) and the map observer (which provides the
// combined feedback)
let mut differential_executor = DiffExecutor::new(
first_executor,
second_executor,
tuple_list!(map_swapper, map_observer),
);
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(
&mut fuzzer,
&mut differential_executor,
&mut generator,
&mut mgr,
8,
)
.expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
while state.solutions().is_empty() {
fuzzer
.fuzz_one(
&mut stages,
&mut differential_executor,
&mut state,
&mut mgr,
)
.expect("Error in the fuzzing loop");
}
}

View File

@ -1,6 +1,6 @@
[package] [package]
name = "baby_fuzzer_tokens" name = "baby_fuzzer"
version = "0.8.2" version = "0.7.1"
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"] authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
edition = "2021" edition = "2021"

Some files were not shown because too many files have changed in this diff Show More