Compare commits

..

86 Commits

Author SHA1 Message Date
798aa2ceb9 remove dead code 2023-06-02 10:00:13 +02:00
183ff32beb igonre archives 2023-06-02 08:32:23 +02:00
89979b64d9 eval script wrangeling 2023-05-27 13:19:19 +02:00
35da9fdf24 HACK: interrupt limit for random fuzzing 2023-05-25 08:40:43 +02:00
1bca346b39 plot enpoints 2023-05-25 08:39:47 +02:00
8b90886299 paralellize plots 2023-05-23 12:06:14 +02:00
1bd7d853ac update plot script 2023-05-11 12:56:12 +02:00
253048e534 tweak time outputs 2023-05-10 09:25:22 +02:00
52cc00fedc add run_until_saturation 2023-05-08 18:23:32 +02:00
eec998c426 update snakefile 2023-05-04 11:47:56 +02:00
a328ddfd5f fix empty iterator crash, restart 2023-05-02 09:41:53 +02:00
6a042da5c1 set up configurations 2023-04-28 13:11:48 +02:00
2e20a22dc6 add missing use 2023-04-27 13:36:01 +02:00
bbc83ef6be randomize interrupts until wort 2023-04-24 15:33:03 +02:00
48466ac2d7 Test: remove pc from hash 2023-04-24 12:52:29 +02:00
ad8cecdba4 Test: hash notification states 2023-04-24 12:51:09 +02:00
c2afc0186e allow plotting from remote mount 2023-04-24 11:16:10 +02:00
4df67db479 update snakefile 2023-04-24 11:12:38 +02:00
402eff7b47 small fixes 2023-04-21 17:22:22 +02:00
a8a6c175c8 WIP: add simple interrupt time randomizer 2023-04-21 17:11:18 +02:00
8a79e12f91 update target_symbols 2023-04-21 14:12:04 +02:00
a3e38b6abb skip unchanged interrupts 2023-04-20 16:50:23 +02:00
eb04325f09 fix staeg setup 2023-04-20 16:32:19 +02:00
cfb8fa2b32 fix use 2023-04-20 16:04:45 +02:00
2889e9bf61 WIP: move interrupt mutation to new stage 2023-04-20 15:50:22 +02:00
960764cf85 wip: interrupt placement 2023-04-17 17:33:21 +02:00
e6816cc2de add interrupt mutator 2023-04-17 09:50:18 +02:00
f3180a35cc plot min and max lines 2023-03-23 13:20:23 +01:00
54312b2577 plot lines instead of points 2023-03-22 16:10:19 +01:00
6d920fd962 fixes 2023-03-21 16:58:44 +01:00
281979ecd8 revert changes 2023-03-21 16:39:21 +01:00
c628afaa81 add generation based genetic testing 2023-03-21 16:34:05 +01:00
c548c6bc09 snakefile: dump cases, fix random fuzzing 2023-03-17 11:15:55 +01:00
6e8769907d add a new scheduler for systemtraces 2023-03-16 16:13:16 +01:00
bf639e42fa fix snakefile, symbols 2023-03-14 17:08:05 +01:00
a05ff97d0c seed rng from SEED_RANDOM 2023-03-13 14:45:21 +01:00
f09034b7fe determinism fixes, scheduler precision, restarts 2023-03-13 14:43:58 +01:00
d118eeacbd switch to native breakpoints 2023-03-13 12:19:24 +01:00
57fc441118 fix interrupt config 2023-03-09 17:21:26 +01:00
10b5fe8a74 fix rng seed 2023-03-09 10:53:40 +01:00
7f987b037d configure restarting manager 2023-03-09 10:16:08 +01:00
58be280a62 add micro_longint 2023-03-03 12:30:36 +01:00
3c586f5047 fuzz multiple interrupts 2023-03-02 15:30:53 +01:00
9336b932d0 rework plotting 2023-02-28 17:01:04 +01:00
e0f73778e2 add interrupt fuzzing 2023-02-27 10:39:52 +01:00
e5ac5ba825 dump time for showmap 2023-02-24 12:25:08 +01:00
2acf3ef301 add plotting to snakefile 2023-02-24 12:07:53 +01:00
28bac2a850 add feed_longest to record random cases 2023-02-23 22:33:13 +01:00
41586dd8b1 plotting: respect types 2023-02-23 22:28:25 +01:00
7420aabeeb change feedback order 2023-02-20 12:28:39 +01:00
d118ff0056 fix build 2023-02-19 19:25:43 +01:00
dfe4f713b9 fix feedbacks 2023-02-19 18:38:31 +01:00
f7a05d2a7c benchmark using snakemake 2023-02-16 22:56:43 +01:00
2593bdf42f trace_abbs and dump path 2023-02-15 09:17:48 +01:00
8c8ab7c44e add graph feedback 2023-02-10 13:46:07 +01:00
9cadc5d61c update input sizes, dump worstcase, benchmarking 2023-02-07 14:59:21 +01:00
594554eca0 remove address translations, extend plots 2023-01-26 14:03:18 +01:00
267309b954 add hists to plot script 2023-01-26 09:47:12 +01:00
35435fbd97 speed up random generation 2023-01-25 16:14:17 +01:00
8fcc54bbdd write out times over time 2023-01-25 14:55:04 +01:00
1f538f9834 add sytemstate sceduler, fuzz until time 2023-01-25 12:59:17 +01:00
ba01f600ee re-add system state fuzzing 2023-01-24 09:11:45 +01:00
2cb479581d add virtual edge to longest runs 2023-01-19 10:33:13 +01:00
1fbf948478 do not force generated inputs 2023-01-17 10:26:27 +01:00
6e1d5695e3 debug stuff 2023-01-17 10:18:24 +01:00
8d31196614 random seeds, better plots 2023-01-17 10:01:15 +01:00
4c90144db5 add more benchmarks 2023-01-13 16:05:43 +01:00
eeaf7eb43f exectime increase feedback 2023-01-11 16:09:06 +01:00
68c4887dad rename bin, allow random fuzzing 2023-01-09 13:53:32 +01:00
7ca2d43f3d benchmark with duration 2023-01-09 12:39:51 +01:00
9f97852e4a add benchmark scripts 2023-01-09 12:39:35 +01:00
f4e1990387 add systemstate feature and dump times 2023-01-05 17:34:53 +01:00
d936234976 fix multicore build 2023-01-05 13:35:51 +01:00
795fbff61a ignore artifacts 2023-01-05 13:31:33 +01:00
6a9df35e28 minimal changes 2023-01-05 13:30:24 +01:00
9b9fbc3677 add interrupt injection 2023-01-03 20:09:45 +01:00
decae09931 input length and read input pointer 2022-12-23 15:32:20 +01:00
b812e994a6 draft: add graph feedback 2022-12-19 18:14:52 +01:00
4587f442d0 add TimeMaximizerCorpusScheduler 2022-12-19 17:44:58 +01:00
c748fecbe2 add last api callsite to system state 2022-12-19 13:13:38 +01:00
7595d25192 libafl_qemu: add jmp instrumentation 2022-12-19 13:12:37 +01:00
79bca99cc7 WIP: add systemstate tracking 2022-12-15 15:23:07 +01:00
b07f7ccbca add arguments 2022-12-12 17:41:33 +01:00
e3f38edd0a get time from ClockTimeFeedback 2022-12-12 15:30:05 +01:00
6ad55e3b29 fixup 2022-12-12 15:16:45 +01:00
f7ee38ebb2 WIP: port fret 2022-12-12 14:58:28 +01:00
905 changed files with 42636 additions and 115204 deletions

View File

@ -6,27 +6,22 @@
"context": "..", "context": "..",
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename. // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
"dockerFile": "../Dockerfile", "dockerFile": "../Dockerfile",
"customizations": { // Set *default* container specific settings.json values on container create.
"vscode": { "settings": {},
// Add the IDs of extensions you want installed when the container is created. // Add the IDs of extensions you want installed when the container is created.
"extensions": ["matklad.rust-analyzer", "microsoft.Docker"], "extensions": [
// Set *default* container specific settings.json values on container create. "matklad.rust-analyzer"
"settings": { ],
"rust-analyzer.cargo.noDefaultFeatures": true
}
}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally. // Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [], // "forwardPorts": [],
// Uncomment the next line to run commands after the container is created - for example installing curl. // Uncomment the next line to run commands after the container is created - for example installing curl.
// Install development components that shouldn't be in the main Dockerfile // "postCreateCommand": "apt-get update && apt-get install -y curl",
"postCreateCommand": "rustup component add --toolchain nightly rustfmt clippy llvm-tools-preview && cargo install --locked cargo-make",
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust // Uncomment when using a ptrace-based debugger like C++, Go, and Rust
"runArgs": [ "runArgs": [
"--cap-add=SYS_PTRACE", "--cap-add=SYS_PTRACE",
"--security-opt", "--security-opt",
"seccomp=unconfined" "seccomp=unconfined"
] ],
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker. // Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ], // "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root. // Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.

View File

@ -13,8 +13,6 @@ Thank you for making LibAFL better!
**Describe the bug** **Describe the bug**
A clear and concise description of what the bug is. A clear and concise description of what the bug is.
If you want to present the backtrace, don't forget to run with `errors_backtrace` feature and log from `RUST_LOG`
In addition, please tell us what is your fuzzer's Cargo.toml
**To Reproduce** **To Reproduce**
Steps to reproduce the behavior: Steps to reproduce the behavior:

View File

@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "daily"

View File

@ -2,37 +2,25 @@ name: build and test
on: on:
push: push:
branches: [ main, "pr/**" ] branches: [ main ]
pull_request: pull_request:
branches: [ main ] branches: [ main ]
workflow_dispatch:
merge_group:
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
CARGO_NET_GIT_FETCH_WITH_CLI: true
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs: jobs:
common: common:
strategy: strategy:
fail-fast: false
matrix: matrix:
os: [ ubuntu-latest, windows-latest, macOS-latest ] os: [ubuntu-latest, windows-latest, macOS-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- name: Install mimetype
if: runner.os == 'Linux'
run: sudo apt-get install libfile-mimeinfo-perl
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
profile: minimal profile: minimal
toolchain: nightly toolchain: nightly
override: true override: true
- name: Install mimetype
if: runner.os == 'Linux'
run: sudo apt-get install libfile-mimeinfo-perl
- name: install mdbook - name: install mdbook
uses: baptiste0928/cargo-install@v1.3.0 uses: baptiste0928/cargo-install@v1.3.0
with: with:
@ -43,17 +31,16 @@ jobs:
crate: mdbook-linkcheck crate: mdbook-linkcheck
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: { shared-key: "ubuntu" } - name: Install mimetype
if: runner.os == 'Linux' if: runner.os == 'Linux'
- uses: Swatinem/rust-cache@v2 run: sudo apt-get install libfile-mimeinfo-perl
if: runner.os != 'Linux'
- name: Check for binary blobs - name: Check for binary blobs
if: runner.os == 'Linux' if: runner.os == 'Linux'
run: ./scripts/check_for_blobs.sh run: ./scripts/check_for_blobs.sh
- name: default nightly
run: rustup default nightly
- name: Build libafl debug - name: Build libafl debug
run: cargo build -p libafl run: cargo build -p libafl
- name: Build the book
run: cd docs && mdbook build
- name: Test the book - name: Test the book
# TODO: fix books test fail with updated windows-rs # TODO: fix books test fail with updated windows-rs
if: runner.os != 'Windows' if: runner.os != 'Windows'
@ -62,485 +49,244 @@ jobs:
run: cargo test run: cargo test
- name: Test libafl no_std - name: Test libafl no_std
run: cd libafl && cargo test --no-default-features run: cd libafl && cargo test --no-default-features
- name: Test libafl_bolts no_std no_alloc
run: cd libafl_bolts && cargo test --no-default-features
- name: Test libafl_targets no_std - name: Test libafl_targets no_std
run: cd libafl_targets && cargo test --no-default-features run: cd libafl_targets && cargo test --no-default-features
llvm-tester:
runs-on: ubuntu-22.04
continue-on-error: true
strategy:
matrix:
llvm-version: [ "16", "17" ] # Add 18 when KyleMayes/install-llvm-action enables it
steps:
- name: Remove Dotnet & Haskell
run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
with: { shared-key: "llvm-tester" }
- name: Install LLVM and Clang
uses: KyleMayes/install-llvm-action@v2
with:
version: "${{matrix.llvm-version}}"
- name: Build and test with llvm-${{ matrix.llvm-version }}
run: pwd && ls & cd libafl_cc && cargo build --release
ubuntu-doc-build:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: ./.github/workflows/ubuntu-prepare
- uses: Swatinem/rust-cache@v2
# ---- doc check ----
- name: Build Docs
run: RUSTFLAGS="--cfg docsrs" cargo +nightly doc --all-features --no-deps
ubuntu-doc-test:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: ./.github/workflows/ubuntu-prepare
- uses: Swatinem/rust-cache@v2
# ---- doc check ----
- name: Test Docs
run: RUSTFLAGS="--cfg docsrs" cargo +nightly test --doc --all-features
ubuntu-miri:
runs-on: ubuntu-22.04
needs: ubuntu
steps:
- uses: actions/checkout@v3
- uses: ./.github/workflows/ubuntu-prepare
- uses: Swatinem/rust-cache@v2
- name: Add nightly clippy
run: rustup toolchain install nightly --component miri --allow-downgrade
# --- miri undefined behavior test --
- name: Run miri tests
run: RUST_BACKTRACE=1 MIRIFLAGS="-Zmiri-disable-isolation" cargo +nightly miri test
ubuntu: ubuntu:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- name: Remove Dotnet & Haskell - name: Remove Dotnet & Haskell
run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- name: Remove existing clang and LLVM - name: set mold linker as default linker
run: sudo apt purge llvm* clang* lld* lldb* opt* uses: rui314/setup-mold@v1
- name: Install and cache deps - name: Install and cache deps
run: sudo apt update && sudo apt install ninja-build shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev libz3-dev uses: awalsh128/cache-apt-pkgs-action@v1.1.0
- name: Add nightly clippy with:
run: rustup toolchain install nightly --component clippy --component miri --allow-downgrade packages: llvm llvm-dev clang ninja-build clang-format-13 shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev
- uses: actions/checkout@v3 - name: get clang version
- uses: Swatinem/rust-cache@v2 run: command -v llvm-config && clang -v
with: { shared-key: "ubuntu" } - name: Install cargo-hack
- name: Install LLVM and Clang run: curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin
uses: KyleMayes/install-llvm-action@v2 - name: Add nightly rustfmt and clippy
with: run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
directory: ${{ runner.temp }}/llvm - uses: actions/checkout@v3
version: 17 - uses: Swatinem/rust-cache@v2
# pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately
- name: Check pcguard edges
run: cargo check --features=sancov_pcguard_edges
- name: run shellcheck
run: shellcheck ./scripts/*.sh
# ---- build normal and examples ----
- name: Run a normal build
run: cargo build --verbose
- name: Build examples
run: cargo build --examples --verbose
ubuntu-clippy: # ---- format check ----
runs-on: ubuntu-22.04 # pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately
steps: - name: Check pcguard edges
- name: Remove Dotnet & Haskell run: cargo check --features=sancov_pcguard_edges
run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc - name: Format
- uses: actions-rs/toolchain@v1 run: cargo fmt -- --check
with: - name: Run clang-format style check for C/C++ programs.
profile: minimal run: clang-format-13 -n -Werror --style=file $(find . -type f \( -name '*.cpp' -o -iname '*.hpp' -o -name '*.cc' -o -name '*.cxx' -o -name '*.cc' -o -name '*.h' \) | grep -v '/target/' | grep -v 'libpng-1\.6\.37' | grep -v 'stb_image\.h' | grep -v 'dlmalloc\.c' | grep -v 'QEMU-Nyx')
toolchain: stable - name: run shellcheck
run: shellcheck ./scripts/*.sh
- name: Run clippy
run: ./scripts/clippy.sh
- name: Install and cache deps # ---- doc check ----
run: sudo apt update && sudo apt install ninja-build shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev libz3-dev - name: Build Docs
- name: Add nightly clippy run: cargo doc
run: rustup toolchain install nightly --component clippy --allow-downgrade && rustup default nightly - name: Test Docs
- uses: actions/checkout@v3 run: cargo +nightly test --doc --all-features
- uses: Swatinem/rust-cache@v2
with: { shared-key: "ubuntu" }
- name: Install LLVM and Clang
uses: KyleMayes/install-llvm-action@v2
with:
directory: ${{ runner.temp }}/llvm
version: 17
- name: Run clippy
run: ./scripts/clippy.sh
# --- test embedding the libafl_libfuzzer_runtime library
# Fix me plz
# - name: Test Build libafl_libfuzzer with embed
# run: cargo +nightly test --features=embed-runtime --manifest-path libafl_libfuzzer/Cargo.toml
ubuntu-check: # ---- build and feature check ----
runs-on: ubuntu-22.04 - name: Run a normal build
needs: ubuntu run: cargo build --verbose
strategy: # cargo-hack's --feature-powerset would be nice here but libafl has a too many knobs
matrix: - name: Check each feature
instance_idx: [ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17" ] # Skipping `python` as it has to be built with the `maturin` tool
steps: # `agpl`, `nautilus` require nightly
- uses: actions/checkout@v3 # `sancov_pcguard_edges` is tested seperately
- uses: ./.github/workflows/ubuntu-prepare run: cargo hack check --each-feature --clean-per-run --exclude-features=prelude,agpl,nautilus,python,sancov_pcguard_edges,arm,aarch64,i386,be,systemmode --no-dev-deps
- uses: Swatinem/rust-cache@v2 - name: Check nightly features
with: { shared-key: "ubuntu" } run: cargo +nightly check --features=agpl && cargo +nightly check --features=nautilus
# ---- build and feature check ---- - name: Build examples
# cargo-hack's --feature-powerset would be nice here but libafl has a too many knobs run: cargo build --examples --verbose
- name: Check each feature
# Skipping `python` as it has to be built with the `maturin` tool
# `sancov_pcguard_edges` is tested seperatelyc
run: python3 ./scripts/parallellize_cargo_check.py ${{ matrix.instance_idx }}
ubuntu-concolic: ubuntu-concolic:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: ubuntu
steps: steps:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: { shared-key: "ubuntu" } - name: Install smoke test deps
- name: Install smoke test deps run: sudo ./libafl_concolic/test/smoke_test_ubuntu_deps.sh
run: sudo ./libafl_concolic/test/smoke_test_ubuntu_deps.sh - name: Run smoke test
- name: Run smoke test run: ./libafl_concolic/test/smoke_test.sh
run: ./libafl_concolic/test/smoke_test.sh
python-bindings: bindings:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- name: Remove existing clang and LLVM - name: set mold linker as default linker
run: sudo apt purge llvm* clang* uses: rui314/setup-mold@v1
- name: Install LLVM and Clang - name: Install deps
uses: KyleMayes/install-llvm-action@v2 run: sudo apt-get install -y llvm llvm-dev clang ninja-build python3-dev python3-pip python3-venv
with: - name: Install maturin
directory: ${{ runner.temp }}/llvm run: python3 -m pip install maturin
version: 17 - uses: actions/checkout@v3
- name: Install deps - uses: Swatinem/rust-cache@v2
run: sudo apt-get install -y ninja-build python3-dev python3-pip python3-venv libz3-dev - name: Run a maturin build
- name: Install maturin run: cd ./bindings/pylibafl && maturin build
run: python3 -m pip install maturin
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Run a maturin build
run: export LLVM_CONFIG=llvm-config-16 && cd ./bindings/pylibafl && python3 -m venv .env && . .env/bin/activate && pip install --upgrade --force-reinstall . && ./test.sh
- name: Run python test
run: . ./bindings/pylibafl/.env/bin/activate # && cd ./fuzzers/python_qemu/ && python3 fuzzer.py 2>&1 | grep "Bye"
cargo-fmt:
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
components: rustfmt
- uses: actions/checkout@v3
- name: Remove existing clang and LLVM
run: sudo apt purge llvm* clang*
- name: Install LLVM and Clang
uses: KyleMayes/install-llvm-action@v2
with:
directory: ${{ runner.temp }}/llvm
version: 17
- name: Format Check
run: ./scripts/fmt_all.sh check
fuzzers-preflight:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Fuzzer in CI Check
run: ./scripts/check_tested_fuzzers.sh
fuzzers: fuzzers:
needs:
- ubuntu
- fuzzers-preflight
strategy: strategy:
matrix: matrix:
os: [ ubuntu-latest ] os: [ubuntu-latest, macos-latest]
fuzzer:
- ./fuzzers/cargo_fuzz
- ./fuzzers/fuzzbench_fork_qemu
- ./fuzzers/libfuzzer_stb_image_sugar
- ./fuzzers/nyx_libxml2_standalone
- ./fuzzers/baby_fuzzer_gramatron
- ./fuzzers/tinyinst_simple
- ./fuzzers/baby_fuzzer_with_forkexecutor
- ./fuzzers/baby_no_std
- ./fuzzers/baby_fuzzer_swap_differential
- ./fuzzers/baby_fuzzer_grimoire
- ./fuzzers/baby_fuzzer
- ./fuzzers/libfuzzer_libpng_launcher
- ./fuzzers/libfuzzer_libpng_accounting
- ./fuzzers/forkserver_libafl_cc
- ./fuzzers/libfuzzer_libpng_tcp_manager
- ./fuzzers/backtrace_baby_fuzzers
- ./fuzzers/fuzzbench_qemu
- ./fuzzers/nyx_libxml2_parallel
- ./fuzzers/frida_gdiplus
- ./fuzzers/libfuzzer_stb_image_concolic
- ./fuzzers/nautilus_sync
- ./fuzzers/push_harness
- ./fuzzers/libfuzzer_libpng_centralized
- ./fuzzers/baby_fuzzer_nautilus
- ./fuzzers/fuzzbench_text
- ./fuzzers/libfuzzer_libpng_cmin
- ./fuzzers/forkserver_simple
- ./fuzzers/baby_fuzzer_unicode
- ./fuzzers/libfuzzer_libpng_norestart
- ./fuzzers/baby_fuzzer_multi
- ./fuzzers/libafl_atheris
- ./fuzzers/frida_libpng
- ./fuzzers/fuzzbench_ctx
- ./fuzzers/fuzzbench_forkserver_cmplog
- ./fuzzers/push_stage_harness
- ./fuzzers/libfuzzer_libmozjpeg
- ./fuzzers/libfuzzer_libpng_aflpp_ui
- ./fuzzers/libfuzzer_libpng
- ./fuzzers/baby_fuzzer_wasm
- ./fuzzers/fuzzbench
- ./fuzzers/libfuzzer_stb_image
- ./fuzzers/fuzzbench_forkserver
# - ./fuzzers/libfuzzer_windows_asan
# - ./fuzzers/dynamic_analysis
- ./fuzzers/baby_fuzzer_minimizing
- ./fuzzers/frida_executable_libpng
- ./fuzzers/tutorial
- ./fuzzers/baby_fuzzer_tokens
- ./fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor
- ./fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor
- ./fuzzers/backtrace_baby_fuzzers/command_executor
- ./fuzzers/backtrace_baby_fuzzers/forkserver_executor
- ./fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor
- ./fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1
- uses: ./.github/workflows/fuzzer-tester-prepare with:
- name: Build and run example fuzzers (Linux) profile: minimal
if: runner.os == 'Linux' toolchain: stable
shell: bash - name: set mold linker as default linker
run: RUN_ON_CI=1 LLVM_CONFIG=llvm-config ./scripts/test_fuzzer.sh ${{ matrix.fuzzer }} if: runner.os == 'Linux' # mold only support linux until now
uses: rui314/setup-mold@v1
changes: - name: Add nightly rustfmt and clippy
runs-on: ubuntu-latest run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
permissions: - name: Add no_std toolchain
pull-requests: read run: rustup toolchain install nightly-x86_64-unknown-linux-gnu ; rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu
outputs: - name: Install python
qemu: ${{ steps.filter.outputs.qemu }} if: runner.os == 'macOS'
steps: run: brew install --force-bottle --overwrite python@3.11
- uses: actions/checkout@v3 - uses: lyricwulf/abc@v1
- uses: dorny/paths-filter@v3 with:
id: filter # todo: remove afl++-clang when nyx support samcov_pcguard
with: linux: llvm llvm-dev clang nasm ninja-build gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libgtk-3-dev afl++-clang pax-utils
filters: | # update bash for macos to support `declare -A` command`
qemu: macos: llvm libpng nasm coreutils z3 bash
- 'libafl_qemu/**' - name: pip install
- 'fuzzers/*qemu*/**' run: python3 -m pip install msgpack jinja2
# Note that nproc needs to have coreutils installed on macOS, so the order of CI commands matters.
fuzzers-qemu: - name: enable mult-thread for `make`
needs: changes run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)"
if: ${{ needs.changes.outputs.qemu == 'true' }} - name: install cargo-make
strategy: uses: baptiste0928/cargo-install@v1.3.0
matrix: with:
os: [ubuntu-latest] crate: cargo-make
fuzzer: - uses: actions/checkout@v3
- ./fuzzers/qemu_cmin with:
- ./fuzzers/qemu_systemmode submodules: true # recursively checkout submodules
- ./fuzzers/qemu_coverage - uses: Swatinem/rust-cache@v2
- ./fuzzers/qemu_launcher - name: Build and run example fuzzers (Linux)
if: runner.os == 'Linux'
runs-on: [ self-hosted, qemu ] run: ./scripts/test_all_fuzzers.sh
container: registry.gitlab.com/qemu-project/qemu/qemu/ubuntu2204:latest - name: Build and run example fuzzers (macOS)
steps: if: runner.os == 'macOS' # use bash v4
- uses: actions/checkout@v3 run: /usr/local/bin/bash ./scripts/test_all_fuzzers.sh
- uses: ./.github/workflows/qemu-fuzzer-tester-prepare
- name: Build and run example QEMU fuzzers (Linux)
if: runner.os == 'Linux'
shell: bash
run: RUN_ON_CI=1 LLVM_CONFIG=llvm-config ./scripts/test_fuzzer.sh ${{ matrix.fuzzer }}
nostd-build: nostd-build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
profile: minimal profile: minimal
toolchain: nightly toolchain: nightly
override: true override: true
components: rust-src components: rustfmt, clippy, rust-src
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
- name: Add targets - name: Add targets
run: rustup target add arm-linux-androideabi && rustup target add thumbv6m-none-eabi run: rustup target add arm-linux-androideabi && rustup target add thumbv6m-none-eabi
- name: Build aarch64-unknown-none - name: Build aarch64-unknown-none
run: cd ./fuzzers/baby_no_std && cargo +nightly build -Zbuild-std=core,alloc --target aarch64-unknown-none -v --release && cd ../.. run: cd ./fuzzers/baby_no_std && cargo +nightly build -Zbuild-std=core,alloc --target aarch64-unknown-none -v --release && cd ../..
- name: run x86_64 until panic! - name: run x86_64 until panic!
run: cd ./fuzzers/baby_no_std && cargo +nightly run || test $? -ne 0 || exit 1 run: cd ./fuzzers/baby_no_std && cargo +nightly run || test $? -ne 0 || exit 1
- name: no_std tests - name: no_std tests
run: cd ./libafl && cargo test --no-default-features run: cd ./libafl && cargo test --no-default-features
- name: libafl armv6m-none-eabi (32 bit no_std) clippy
nostd-clippy: run: cd ./libafl && cargo clippy --target thumbv6m-none-eabi --no-default-features
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
components: clippy, rust-src
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Add targets
run: rustup target add arm-linux-androideabi && rustup target add thumbv6m-none-eabi
- name: libafl armv6m-none-eabi (32 bit no_std) clippy
run: cd ./libafl && cargo clippy --target thumbv6m-none-eabi --no-default-features
- name: Build no_std no_alloc bolts
run: cd ./libafl_bolts && cargo +nightly build -Zbuild-std=core --target aarch64-unknown-none --no-default-features -v --release && cd ../
build-docker: build-docker:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Build docker - name: Build docker
run: docker build -t libafl . run: docker build -t libafl .
windows-frida-libpng: windows:
runs-on: windows-latest runs-on: windows-latest
needs:
- common
steps: steps:
- uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1
- uses: ./.github/workflows/windows-tester-prepare with:
- name: Build fuzzers/frida_libpng profile: minimal
run: cd fuzzers/frida_libpng/ && cargo make test toolchain: stable
- uses: actions/checkout@v3
windows-frida-libfuzzer-stb-image: - uses: Swatinem/rust-cache@v2
runs-on: windows-latest - name: Windows Build
needs: run: cargo build --verbose
- common - name: Run clippy
steps: uses: actions-rs/cargo@v1
- uses: actions/checkout@v3 with:
- uses: ./.github/workflows/windows-tester-prepare command: clippy
- name: Build fuzzers/libfuzzer_stb_image - name: Build docs
run: cd fuzzers/libfuzzer_stb_image && cargo build --release run: cargo doc
- name: Set LIBCLANG_PATH
windows-frida-gdiplus: run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
runs-on: windows-latest - name: install cargo-make
needs: run: cargo install --force cargo-make
- common - uses: ilammy/msvc-dev-cmd@v1
steps: - name: Build fuzzers/frida_libpng
- uses: actions/checkout@v3 run: cd fuzzers/frida_libpng/ && cargo make test
- uses: ./.github/workflows/windows-tester-prepare - name: Build fuzzers/frida_gdiplus
- name: Build fuzzers/frida_gdiplus run: cd fuzzers/frida_gdiplus/ && cargo make test
run: cd fuzzers/frida_gdiplus/ && cargo make test && cargo make test_cmplog
windows-tinyinst-simple:
runs-on: windows-latest
needs:
- common
steps:
- uses: actions/checkout@v3
- uses: ./.github/workflows/windows-tester-prepare
- name: install cxx bridge
run: cargo install cxxbridge-cmd
- name: Build fuzzers/tinyinst_simple
run: cd fuzzers/tinyinst_simple/ && cargo make test
windows-clippy:
runs-on: windows-latest
needs:
- common
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: actions/checkout@v3
- uses: ./.github/workflows/windows-tester-prepare
- uses: Swatinem/rust-cache@v2
- name: Run real clippy, not the fake one
shell: pwsh
run: .\scripts\clippy.ps1
macos: macos:
runs-on: macOS-latest runs-on: macOS-latest
steps: steps:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- name: Add nightly clippy - name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component clippy --allow-downgrade && rustup default nightly run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- name: Install deps - name: Install deps
run: brew install z3 gtk+3 run: brew install z3 gtk+3
- name: Install cxxbridge - uses: actions/checkout@v3
run: cargo install cxxbridge-cmd - uses: Swatinem/rust-cache@v2
- uses: actions/checkout@v3 - name: MacOS Build
- uses: Swatinem/rust-cache@v2 run: cargo build --verbose
- name: MacOS Build - name: Run clippy
run: cargo build --verbose run: ./scripts/clippy.sh
- name: Increase map sizes - name: Increase map sizes
run: ./scripts/shmem_limits_macos.sh run: ./scripts/shmem_limits_macos.sh
- name: Run Tests - name: Run Tests
run: cargo test run: cargo test
- name: Clippy
run: cargo +nightly clippy --tests --all --exclude libafl_nyx --exclude symcc_runtime --exclude runtime_test
ios: other_targets:
runs-on: macOS-latest runs-on: macOS-latest
steps: steps:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- name: install ios - uses: nttld/setup-ndk@v1
run: rustup target add aarch64-apple-ios with:
- uses: actions/checkout@v3 ndk-version: r21e
- uses: Swatinem/rust-cache@v2 - name: install ios
- name: Build iOS run: rustup target add aarch64-apple-ios
run: cargo build --target aarch64-apple-ios && cd libafl_frida && cargo build --target aarch64-apple-ios && cd .. - name: install android
run: rustup target add aarch64-linux-android
android: - name: install cargo ndk
runs-on: ubuntu-22.04 run: cargo install cargo-ndk
steps: - uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1 - uses: Swatinem/rust-cache@v2
with: - name: Build iOS
profile: minimal run: cargo build --target aarch64-apple-ios
toolchain: stable - name: Build Android
- uses: nttld/setup-ndk@v1 run: cargo ndk -t arm64-v8a build --release
with:
ndk-version: r25b
- name: install android
run: rustup target add aarch64-linux-android
- name: install cargo ndk
run: cargo install cargo-ndk
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Build Android
run: cd libafl && cargo ndk -t arm64-v8a build --release
#run: cargo build --target aarch64-linux-android #run: cargo build --target aarch64-linux-android
# TODO: Figure out how to properly build stuff with clang # TODO: Figure out how to properly build stuff with clang
#- name: Add clang path to $PATH env #- name: Add clang path to $PATH env
@ -552,34 +298,34 @@ jobs:
# run: C:\Rust\.cargo\bin\cargo.exe test --verbose # run: C:\Rust\.cargo\bin\cargo.exe test --verbose
freebsd: freebsd:
runs-on: ubuntu-22.04 runs-on: macos-12
name: Simple build in FreeBSD name: Simple build in FreeBSD
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Test in FreeBSD - name: Test in FreeBSD
id: test id: test
uses: vmactions/freebsd-vm@v1 uses: vmactions/freebsd-vm@v0
with: with:
usesh: true usesh: true
sync: rsync sync: rsync
copyback: false copyback: false
mem: 2048 mem: 2048
release: 13.2 release: 13.1
prepare: | prepare: |
pkg install -y curl bash sudo llvm16 pkg install -y curl bash sudo llvm14
curl https://sh.rustup.rs -sSf | sh -s -- -y curl https://sh.rustup.rs -sSf | sh -s -- -y
run: | run: |
freebsd-version freebsd-version
. "$HOME/.cargo/env" . "$HOME/.cargo/env"
rustup toolchain install nightly rustup toolchain install nightly
export LLVM_CONFIG=/usr/local/bin/llvm-config16 export LLVM_CONFIG=/usr/local/bin/llvm-config14
pwd pwd
ls -lah ls -lah
echo "local/bin" echo "local/bin"
ls -lah /usr/local/bin/ ls -lah /usr/local/bin/
which llvm-config which llvm-config
chmod +x ./scripts/clippy.sh chmod +x ./scripts/clippy.sh
bash ./scripts/shmem_limits_fbsd.sh bash ./scripts/shmem_limits_fbsd.sh
bash ./scripts/clippy.sh bash ./scripts/clippy.sh
cargo test cargo test

View File

@ -1,65 +0,0 @@
name: Setup Rust Environment
description: Sets up the Rust environment for the CI workflow
runs:
using: composite
steps:
- uses: actions/checkout@v3
with:
submodules: true
fetch-depth: 0
- uses: Swatinem/rust-cache@v2
with: { shared-key: "${{ runner.os }}-shared-fuzzer-cache" }
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- name: Add stable clippy
shell: bash
run: rustup toolchain install stable --component clippy --allow-downgrade
- name: Add nightly clippy
shell: bash
run: rustup toolchain install nightly --component clippy --allow-downgrade
- name: Add no_std toolchain
shell: bash
run: rustup toolchain install nightly-x86_64-unknown-linux-gnu ; rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu
- name: Add wasm target
shell: bash
run: rustup target add wasm32-unknown-unknown
- name: Remove obsolete llvm (Linux)
if: runner.os == 'Linux'
shell: bash
run: sudo apt purge -y llvm* clang*
- name: Install LLVM and Clang
uses: KyleMayes/install-llvm-action@v2
with:
directory: ${{ runner.temp }}/llvm
version: 17
- name: Install deps
shell: bash
run: sudo apt update && sudo apt install -y nasm nlohmann-json3-dev ninja-build gcc-arm-linux-gnueabi g++-arm-linux-gnueabi gcc-aarch64-linux-gnu g++-aarch64-linux-gnu gcc-mipsel-linux-gnu g++-mipsel-linux-gnu gcc-powerpc-linux-gnu g++-powerpc-linux-gnu libc6-dev-i386-cross libc6-dev libc6-dev-i386 lib32gcc-11-dev lib32stdc++-11-dev libgtk-3-dev pax-utils libz3-dev
- name: pip install
shell: bash
run: python3 -m pip install msgpack jinja2 find_libpython
- name: enable mult-thread for `make`
shell: bash
run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)"
- name: install cargo-make
uses: baptiste0928/cargo-install@v1.3.0
with:
crate: cargo-make
- name: install wasm-pack
uses: baptiste0928/cargo-install@v1.3.0
with:
crate: wasm-pack
- name: install cxxbridge-cmd
uses: baptiste0928/cargo-install@v1.3.0
with:
crate: cxxbridge-cmd
- name: install chrome
uses: browser-actions/setup-chrome@v1
with:
chrome-version: stable
- name: Symlink Headers
if: runner.os == 'Linux'
shell: bash
run: sudo ln -s /usr/include/asm-generic /usr/include/asm

View File

@ -1,47 +0,0 @@
name: Setup QEMU Fuzzers environment
description: Sets up the QEMU fuzzers environment
runs:
using: composite
steps:
- uses: actions/checkout@v3
with:
submodules: true
fetch-depth: 0
- name: Install deps
shell: bash
run: apt update && apt install -y nasm ninja-build libc6-dev libgtk-3-dev pax-utils libz3-dev wget qemu-utils libsqlite3-dev gcc-arm-none-eabi sudo gcc g++ build-essential gcc-arm-linux-gnueabi g++-arm-linux-gnueabi
- uses: Swatinem/rust-cache@v2
with: { shared-key: "${{ runner.os }}-shared-fuzzer-cache" }
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- name: Add stable clippy
shell: bash
run: rustup toolchain install stable --component clippy --allow-downgrade
- name: Add nightly clippy
shell: bash
run: rustup toolchain install nightly --component clippy --allow-downgrade
- name: Remove obsolete llvm (Linux)
if: runner.os == 'Linux'
shell: bash
run: sudo apt purge -y llvm* clang*
- name: Install LLVM and Clang
uses: KyleMayes/install-llvm-action@v2
with:
directory: ${{ runner.temp }}/llvm
version: 17
- name: pip install
shell: bash
run: python3 -m pip install msgpack jinja2 find_libpython
- name: enable mult-thread for `make`
shell: bash
run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)"
- name: install cargo-make
uses: baptiste0928/cargo-install@v1.3.0
with:
crate: cargo-make
- name: Symlink Headers
if: runner.os == 'Linux'
shell: bash
run: sudo ln -s /usr/include/asm-generic /usr/include/asm

View File

@ -1,27 +0,0 @@
name: Setup Rust Environment
description: Sets up the Rust environment for the CI workflow
runs:
using: composite
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
components: llvm-tools
- name: Remove existing clang and LLVM
shell: bash
run: sudo apt purge llvm* clang*
- name: Install and cache deps
shell: bash
run: sudo apt update && sudo apt install ninja-build clang-format shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev libz3-dev
- name: Install cargo-hack
shell: bash
run: curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin
- name: Add nightly
shell: bash
run: rustup toolchain install nightly --allow-downgrade
- name: Install LLVM and Clang
uses: KyleMayes/install-llvm-action@v2
with:
directory: ${{ runner.temp }}/llvm
version: 17

View File

@ -1,21 +0,0 @@
name: Setup Rust Environment
description: Sets up the Rust environment for the CI workflow
runs:
using: composite
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Build docs
shell: pwsh
run: cargo doc
- uses: ilammy/msvc-dev-cmd@v1
- name: Set LIBCLANG_PATH
shell: pwsh
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
- name: install cargo-make
shell: pwsh
run: cargo install --force cargo-make

21
.gitignore vendored
View File

@ -21,8 +21,6 @@ vendor
*.obj *.obj
.cur_input .cur_input
.cur_input_*
cur_input
.venv .venv
crashes crashes
@ -34,8 +32,6 @@ perf.data.old
.vscode .vscode
test.dict test.dict
.idea/
# Ignore all built fuzzers # Ignore all built fuzzers
fuzzer_* fuzzer_*
AFLplusplus AFLplusplus
@ -50,7 +46,6 @@ a
forkserver_test forkserver_test
__pycache__ __pycache__
*.lafl_lock *.lafl_lock
*.metadata
*atomic_file_testfile* *atomic_file_testfile*
**/libxml2 **/libxml2
@ -59,19 +54,3 @@ __pycache__
libafl_nyx/QEMU-Nyx libafl_nyx/QEMU-Nyx
libafl_nyx/packer libafl_nyx/packer
.z3-trace
# No gdb history
.gdb_history
# No llvm IR
*.ll
*.tar.gz
# common harness names
harness
program
fuzzer
fuzzer_libpng*
forkserver_simple

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "libafl_concolic/symcc_runtime/symcc"]
path = libafl_concolic/symcc_runtime/symcc
url = https://github.com/AFLplusplus/symcc.git

View File

@ -1,9 +0,0 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: local
hooks:
- id: fmt
name: fmt
entry: scripts/fmt_all.sh check
language: script

View File

@ -1,18 +0,0 @@
# How to Contribute to LibAFL
For bugs, feel free to open issues or contact us directly. Thank you for your support. <3
## Pull Request guideline
Even though we will gladly assist you in finishing up your PR, try to
- keep all the crates compiling with *stable* rust (hide the eventual non-stable code under [`cfg`s](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/build.rs#L26))
- run `cargo +nightly fmt` on your code before pushing
- check the output of `cargo clippy --all` or `./clippy.sh`
- run `cargo build --no-default-features` to check for `no_std` compatibility (and possibly add `#[cfg(feature = "std")]`) to hide parts of your code.
Some of the parts in this list may be hard, don't be afraid to open a PR if you cannot fix them by yourself, so we can help.
### Pre-commit hooks
Some of these checks can be performed automatically during commit using [pre-commit](https://pre-commit.com/).
Once the package is installed, simply run `pre-commit install` to enable the hooks, the checks will run automatically before the commit becomes effective.

View File

@ -1,49 +1,42 @@
[workspace] [workspace]
resolver = "2"
members = [ members = [
"libafl", "libafl",
"libafl_bolts", "libafl_derive",
"libafl_cc", "libafl_cc",
"libafl_targets",
"libafl_frida",
"libafl_qemu",
"libafl_tinyinst",
"libafl_sugar",
"libafl_nyx",
"libafl_concolic/symcc_runtime", "libafl_concolic/symcc_runtime",
"libafl_concolic/symcc_libafl", "libafl_concolic/symcc_libafl",
"libafl_concolic/test/dump_constraints", "libafl_concolic/test/dump_constraints",
"libafl_concolic/test/runtime_test", "libafl_concolic/test/runtime_test",
"libafl_derive",
"libafl_frida",
"libafl_libfuzzer",
"libafl_nyx",
"libafl_qemu",
"libafl_sugar",
"libafl_targets",
"libafl_tinyinst",
"utils/build_and_test_fuzzers",
"utils/deexit", "utils/deexit",
"utils/libafl_benches",
"utils/gramatron/construct_automata", "utils/gramatron/construct_automata",
"utils/libafl_benches",
] ]
default-members = [ default-members = [
"libafl", "libafl",
"libafl_bolts",
"libafl_cc",
"libafl_derive", "libafl_derive",
"libafl_cc",
"libafl_targets", "libafl_targets",
] ]
exclude = [ exclude = [
"bindings",
"fuzzers", "fuzzers",
"libafl_qemu/libafl_qemu_build", "bindings",
"libafl_qemu/libafl_qemu_sys",
"utils/noaslr",
"utils/gdb_qemu",
"utils/libafl_fmt",
"scripts", "scripts",
"libafl_qemu/libafl_qemu_build",
"libafl_qemu/libafl_qemu_sys"
] ]
[workspace.package] [workspace.package]
version = "0.13.0" version = "0.8.2"
[profile.release] [profile.release]
lto = true lto = true
codegen-units = 1 codegen-units = 1
opt-level = 3 opt-level = 3
debug = true debug = true

View File

@ -1,10 +1,10 @@
# syntax=docker/dockerfile:1.2 # syntax=docker/dockerfile:1.2
FROM rust:1.76.0 AS libafl FROM rust:bullseye AS libafl
LABEL "maintainer"="afl++ team <afl@aflplus.plus>" LABEL "maintainer"="afl++ team <afl@aflplus.plus>"
LABEL "about"="LibAFL Docker image" LABEL "about"="LibAFL Docker image"
# install sccache to cache subsequent builds of dependencies # install sccache to cache subsequent builds of dependencies
RUN cargo install --locked sccache RUN cargo install sccache
ENV HOME=/root ENV HOME=/root
ENV SCCACHE_CACHE_SIZE="1G" ENV SCCACHE_CACHE_SIZE="1G"
@ -16,18 +16,10 @@ RUN sh -c 'echo set encoding=utf-8 > /root/.vimrc' \
mkdir ~/.cargo && \ mkdir ~/.cargo && \
echo "[build]\nrustc-wrapper = \"${RUSTC_WRAPPER}\"" >> ~/.cargo/config echo "[build]\nrustc-wrapper = \"${RUSTC_WRAPPER}\"" >> ~/.cargo/config
RUN rustup default nightly
RUN rustup component add rustfmt clippy RUN rustup component add rustfmt clippy
# Install clang 18, common build tools # Install clang 11, common build tools
ENV LLVM_VERSION=18 RUN apt update && apt install -y build-essential gdb git wget clang clang-tools libc++-11-dev libc++abi-11-dev llvm
RUN apt update && apt install -y build-essential gdb git wget python3-venv ninja-build lsb-release software-properties-common gnupg cmake
# Workaround until https://github.com/llvm/llvm-project/issues/62475 is resolved
RUN set -ex &&\
echo "deb http://apt.llvm.org/bookworm/ llvm-toolchain-bookworm-${LLVM_VERSION} main" > /etc/apt/sources.list.d/apt.llvm.org.list &&\
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc &&\
apt update &&\
apt-get install -y clang-${LLVM_VERSION} lldb-${LLVM_VERSION} lld-${LLVM_VERSION} clangd-${LLVM_VERSION} clang-tidy-${LLVM_VERSION} clang-format-${LLVM_VERSION} clang-tools-${LLVM_VERSION} llvm-${LLVM_VERSION}-dev lld-${LLVM_VERSION} lldb-${LLVM_VERSION} llvm-${LLVM_VERSION}-tools libomp-${LLVM_VERSION}-dev libc++-${LLVM_VERSION}-dev libc++abi-${LLVM_VERSION}-dev libclang-common-${LLVM_VERSION}-dev libclang-${LLVM_VERSION}-dev libclang-cpp${LLVM_VERSION}-dev libunwind-${LLVM_VERSION}-dev libclang-rt-${LLVM_VERSION}-dev libpolly-${LLVM_VERSION}-dev
# Copy a dummy.rs and Cargo.toml first, so that dependencies are cached # Copy a dummy.rs and Cargo.toml first, so that dependencies are cached
WORKDIR /libafl WORKDIR /libafl
@ -36,24 +28,21 @@ COPY Cargo.toml README.md ./
COPY libafl_derive/Cargo.toml libafl_derive/Cargo.toml COPY libafl_derive/Cargo.toml libafl_derive/Cargo.toml
COPY scripts/dummy.rs libafl_derive/src/lib.rs COPY scripts/dummy.rs libafl_derive/src/lib.rs
COPY libafl/Cargo.toml libafl/build.rs libafl/README.md libafl/ COPY libafl/Cargo.toml libafl/build.rs libafl/
COPY libafl/examples libafl/examples
COPY scripts/dummy.rs libafl/src/lib.rs COPY scripts/dummy.rs libafl/src/lib.rs
COPY libafl_bolts/Cargo.toml libafl_bolts/build.rs libafl_bolts/README.md libafl_bolts/
COPY libafl_bolts/examples libafl_bolts/examples
COPY scripts/dummy.rs libafl_bolts/src/lib.rs
COPY libafl_frida/Cargo.toml libafl_frida/build.rs libafl_frida/ COPY libafl_frida/Cargo.toml libafl_frida/build.rs libafl_frida/
COPY scripts/dummy.rs libafl_frida/src/lib.rs COPY scripts/dummy.rs libafl_frida/src/lib.rs
COPY libafl_frida/src/gettls.c libafl_frida/src/gettls.c COPY libafl_frida/src/gettls.c libafl_frida/src/gettls.c
COPY libafl_qemu/Cargo.toml libafl_qemu/build.rs libafl_qemu/build_linux.rs libafl_qemu/ COPY libafl_qemu/Cargo.toml libafl_qemu/build.rs libafl_qemu/
COPY scripts/dummy.rs libafl_qemu/src/lib.rs COPY scripts/dummy.rs libafl_qemu/src/lib.rs
COPY libafl_qemu/libafl_qemu_build/Cargo.toml libafl_qemu/libafl_qemu_build/ COPY libafl_qemu/libafl_qemu_build/Cargo.toml libafl_qemu/libafl_qemu_build/
COPY scripts/dummy.rs libafl_qemu/libafl_qemu_build/src/lib.rs COPY scripts/dummy.rs libafl_qemu/libafl_qemu_build/src/lib.rs
COPY libafl_qemu/libafl_qemu_sys/Cargo.toml libafl_qemu/libafl_qemu_sys/build.rs libafl_qemu/libafl_qemu_sys/build_linux.rs libafl_qemu/libafl_qemu_sys/ COPY libafl_qemu/libafl_qemu_sys/Cargo.toml libafl_qemu/libafl_qemu_sys/build.rs libafl_qemu/libafl_qemu_sys/
COPY scripts/dummy.rs libafl_qemu/libafl_qemu_sys/src/lib.rs COPY scripts/dummy.rs libafl_qemu/libafl_qemu_sys/src/lib.rs
COPY libafl_sugar/Cargo.toml libafl_sugar/ COPY libafl_sugar/Cargo.toml libafl_sugar/
@ -80,16 +69,12 @@ COPY scripts/dummy.rs libafl_concolic/symcc_runtime/src/lib.rs
COPY libafl_concolic/symcc_libafl/Cargo.toml libafl_concolic/symcc_libafl/ COPY libafl_concolic/symcc_libafl/Cargo.toml libafl_concolic/symcc_libafl/
COPY scripts/dummy.rs libafl_concolic/symcc_libafl/src/lib.rs COPY scripts/dummy.rs libafl_concolic/symcc_libafl/src/lib.rs
COPY libafl_nyx/Cargo.toml libafl_nyx/build.rs libafl_nyx/build_nyx_support.sh libafl_nyx/ COPY libafl_nyx/Cargo.toml libafl_nyx/build.rs libafl_nyx/
COPY scripts/dummy.rs libafl_nyx/src/lib.rs COPY scripts/dummy.rs libafl_nyx/src/lib.rs
COPY libafl_tinyinst/Cargo.toml libafl_tinyinst/ COPY libafl_tinyinst/Cargo.toml libafl_tinyinst/
COPY scripts/dummy.rs libafl_tinyinst/src/lib.rs COPY scripts/dummy.rs libafl_tinyinst/src/lib.rs
# avoid pulling in the runtime, as this is quite an expensive build, until later
COPY libafl_libfuzzer/Cargo.toml libafl_libfuzzer/
COPY scripts/dummy.rs libafl_libfuzzer/src/lib.rs
COPY utils utils COPY utils utils
RUN cargo build && cargo build --release RUN cargo build && cargo build --release
@ -110,8 +95,6 @@ COPY libafl_cc/src libafl_cc/src
RUN touch libafl_cc/src/lib.rs RUN touch libafl_cc/src/lib.rs
COPY libafl_derive/src libafl_derive/src COPY libafl_derive/src libafl_derive/src
RUN touch libafl_derive/src/lib.rs RUN touch libafl_derive/src/lib.rs
COPY libafl_bolts/src libafl_bolts/src
RUN touch libafl_bolts/src/lib.rs
COPY libafl/src libafl/src COPY libafl/src libafl/src
RUN touch libafl/src/lib.rs RUN touch libafl/src/lib.rs
COPY libafl_targets/src libafl_targets/src COPY libafl_targets/src libafl_targets/src
@ -121,8 +104,6 @@ RUN touch libafl_qemu/libafl_qemu_build/src/lib.rs
COPY libafl_qemu/libafl_qemu_build/src libafl_qemu/libafl_qemu_build/src COPY libafl_qemu/libafl_qemu_build/src libafl_qemu/libafl_qemu_build/src
RUN touch libafl_qemu/libafl_qemu_sys/src/lib.rs RUN touch libafl_qemu/libafl_qemu_sys/src/lib.rs
COPY libafl_qemu/libafl_qemu_sys/src libafl_qemu/libafl_qemu_sys/src COPY libafl_qemu/libafl_qemu_sys/src libafl_qemu/libafl_qemu_sys/src
COPY libafl_qemu/runtime libafl_qemu/runtime
COPY libafl_qemu/libqasan libafl_qemu/libqasan
RUN touch libafl_qemu/src/lib.rs RUN touch libafl_qemu/src/lib.rs
COPY libafl_qemu/src libafl_qemu/src COPY libafl_qemu/src libafl_qemu/src
RUN touch libafl_frida/src/lib.rs RUN touch libafl_frida/src/lib.rs
@ -131,16 +112,11 @@ COPY libafl_concolic/symcc_runtime libafl_concolic/symcc_runtime
COPY libafl_concolic/test libafl_concolic/test COPY libafl_concolic/test libafl_concolic/test
COPY libafl_nyx/src libafl_nyx/src COPY libafl_nyx/src libafl_nyx/src
RUN touch libafl_nyx/src/lib.rs RUN touch libafl_nyx/src/lib.rs
COPY libafl_libfuzzer/src libafl_libfuzzer/src
COPY libafl_libfuzzer/libafl_libfuzzer_runtime libafl_libfuzzer/libafl_libfuzzer_runtime
COPY libafl_libfuzzer/build.rs libafl_libfuzzer/build.rs
RUN touch libafl_libfuzzer/src/lib.rs
RUN cargo build && cargo build --release RUN cargo build && cargo build --release
# Copy fuzzers over # Copy fuzzers over
COPY fuzzers fuzzers COPY fuzzers fuzzers
# RUN ./scripts/test_fuzzer.sh --no-fmt # RUN ./scripts/test_all_fuzzers.sh --no-fmt
ENTRYPOINT [ "/bin/bash", "-c" ] ENTRYPOINT [ "/bin/bash" ]
CMD ["/bin/bash"]

View File

@ -1,6 +1,6 @@
# LibAFL, the fuzzer library. # LibAFL, the fuzzer library.
<img align="right" src="https://raw.githubusercontent.com/AFLplusplus/Website/main/static/libafl_logo.svg" alt="LibAFL logo" width="250" heigh="250"> <img align="right" src="https://github.com/AFLplusplus/Website/raw/master/static/logo_256x256.png" alt="AFL++ Logo">
Advanced Fuzzing Library - Slot your own fuzzers together and extend their features using Rust. Advanced Fuzzing Library - Slot your own fuzzers together and extend their features using Rust.
@ -10,7 +10,6 @@ LibAFL is written and maintained by
* [Dominik Maier](https://twitter.com/domenuk) <dominik@aflplus.plus> * [Dominik Maier](https://twitter.com/domenuk) <dominik@aflplus.plus>
* [s1341](https://twitter.com/srubenst1341) <github@shmarya.net> * [s1341](https://twitter.com/srubenst1341) <github@shmarya.net>
* [Dongjia Zhang](https://github.com/tokatoka) <toka@aflplus.plus> * [Dongjia Zhang](https://github.com/tokatoka) <toka@aflplus.plus>
* [Addison Crump](https://github.com/addisoncrump) <me@addisoncrump.info>
## Why LibAFL? ## Why LibAFL?
@ -34,7 +33,7 @@ LibAFL offers integrations with popular instrumentation frameworks. At the momen
+ SanitizerCoverage, in [libafl_targets](./libafl_targets) + SanitizerCoverage, in [libafl_targets](./libafl_targets)
+ Frida, in [libafl_frida](./libafl_frida) + Frida, in [libafl_frida](./libafl_frida)
+ QEMU user-mode and system mode, including hooks for emulation, in [libafl_qemu](./libafl_qemu) + QEMU user-mode, in [libafl_qemu](./libafl_qemu)
+ TinyInst, in [libafl_tinyinst](./libafl_tinyinst) by [elbiazo](https://github.com/elbiazo) + TinyInst, in [libafl_tinyinst](./libafl_tinyinst) by [elbiazo](https://github.com/elbiazo)
## Getting started ## Getting started
@ -45,39 +44,36 @@ We highly recommend *not* to use e.g. your Linux distribition package as this is
Rust directly, instructions can be found [here](https://www.rust-lang.org/tools/install). Rust directly, instructions can be found [here](https://www.rust-lang.org/tools/install).
- LLVM tools - LLVM tools
The LLVM tools (including clang, clang++) are needed (newer than LLVM 15.0.0 up to LLVM 18.1.3) The LLVM tools are needed (newer than LLVM 11.0.0 but older than LLVM 15.0.0)
If you are using Debian/Ubuntu, again, we highly recommmend that you install the package from [here](https://apt.llvm.org/)
(In `libafl_concolic`, we only support LLVM version newer than 18)
- Cargo-make - Cargo-make
We use cargo-make to build the fuzzers in `fuzzers/` directory. You can install it with We use cargo-make to build the fuzzers in `fuzzers/` directory. You can install it with
```sh ```
cargo install cargo-make cargo install cargo-make
``` ```
2. Clone the LibAFL repository with 2. Clone the LibAFL repository with
```sh ```
git clone https://github.com/AFLplusplus/LibAFL git clone https://github.com/AFLplusplus/LibAFL
``` ```
3. Build the library using 3. Build the library using
```sh ```
cargo build --release cargo build --release
``` ```
4. Build the API documentation with 4. Build the API documentation with
```sh ```
cargo doc cargo doc
``` ```
5. Browse the LibAFL book (WIP!) with (requires [mdbook](https://rust-lang.github.io/mdBook/index.html)) 5. Browse the LibAFL book (WIP!) with (requires [mdbook](https://github.com/rust-lang/mdBook))
```sh ```
cd docs && mdbook serve cd docs && mdbook serve
``` ```
@ -85,11 +81,9 @@ We collect all example fuzzers in [`./fuzzers`](./fuzzers/).
Be sure to read their documentation (and source), this is *the natural way to get started!* Be sure to read their documentation (and source), this is *the natural way to get started!*
You can run each example fuzzer with You can run each example fuzzer with
```
```sh
cargo make run cargo make run
``` ```
as long as the fuzzer directory has `Makefile.toml` file. as long as the fuzzer directory has `Makefile.toml` file.
The best-tested fuzzer is [`./fuzzers/libfuzzer_libpng`](./fuzzers/libfuzzer_libpng), a multicore libfuzzer-like fuzzer using LibAFL for a libpng harness. The best-tested fuzzer is [`./fuzzers/libfuzzer_libpng`](./fuzzers/libfuzzer_libpng), a multicore libfuzzer-like fuzzer using LibAFL for a libpng harness.
@ -112,11 +106,17 @@ The best-tested fuzzer is [`./fuzzers/libfuzzer_libpng`](./fuzzers/libfuzzer_lib
+ Blogpost on binary-only fuzzing lib libaf_qemu, [Hacking TMNF - Fuzzing the game server](https://blog.bricked.tech/posts/tmnf/part1/), by [RickdeJager](https://github.com/RickdeJager). + Blogpost on binary-only fuzzing lib libaf_qemu, [Hacking TMNF - Fuzzing the game server](https://blog.bricked.tech/posts/tmnf/part1/), by [RickdeJager](https://github.com/RickdeJager).
+ [A LibAFL Introductory Workshop](https://www.atredis.com/blog/2023/12/4/a-libafl-introductory-workshop), by [Jordan Whitehead](https://github.com/jordan9001)
## Contributing ## Contributing
Please check out [CONTRIBUTING.md](CONTRIBUTING.md) for the contributing guideline. For bugs, feel free to open issues or contact us directly. Thank you for your support. <3
Even though we will gladly assist you in finishing up your PR, try to
- keep all the crates compiling with *stable* rust (hide the eventual non-stable code under [`cfg`s](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/build.rs#L26))
- run `cargo fmt` on your code before pushing
- check the output of `cargo clippy --all` or `./clippy.sh`
- run `cargo build --no-default-features` to check for `no_std` compatibility (and possibly add `#[cfg(feature = "std")]`) to hide parts of your code.
Some of the parts in this list may be hard, don't be afraid to open a PR if you cannot fix them by yourself, so we can help.
## Cite ## Cite
@ -149,3 +149,11 @@ Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions. be dual licensed as above, without any additional terms or conditions.
</sub> </sub>
<br>
<sub>
Dependencies under more restrictive licenses, such as GPL or AGPL, can be enabled
using the respective feature in each crate when it is present, such as the
'agpl' feature of the libafl crate.
</sub>

View File

@ -1,17 +0,0 @@
# Bugs found by `libafl` and `libafl_libfuzzer`
* pdf-rs
* <https://github.com/pdf-rs/pdf/issues/183>
* <https://github.com/pdf-rs/pdf/issues/184>
* <https://github.com/pdf-rs/pdf/issues/185>
* <https://github.com/pdf-rs/pdf/issues/186>
* <https://github.com/pdf-rs/pdf/issues/187>
* <https://github.com/pdf-rs/pdf/issues/189>
* nu-shell
* https://github.com/nushell/nushell/issues/10365
* https://github.com/nushell/nushell/issues/9417
* exrs
* https://github.com/johannesvollmer/exrs/pull/221
* pcre2
* https://github.com/PCRE2Project/pcre2/issues/275

View File

@ -1 +0,0 @@
dist/

View File

@ -1,16 +1,13 @@
[package] [package]
name = "pylibafl" name = "pylibafl"
version = "0.13.0" version = "0.8.2"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
pyo3 = { version = "0.18.3", features = ["extension-module"] } pyo3 = { version = "0.17", features = ["extension-module"] }
pyo3-log = "0.8.1" libafl_qemu = { path = "../../libafl_qemu", version = "0.8.2", features = ["python"] }
libafl_sugar = { path = "../../libafl_sugar", version = "0.13.0", features = ["python"] } libafl_sugar = { path = "../../libafl_sugar", version = "0.8.2", features = ["python"] }
libafl_bolts = { path = "../../libafl_bolts", version = "0.13.0", features = ["python"] } libafl = { path = "../../libafl", version = "0.8.2", features = ["python"] }
[target.'cfg(target_os = "linux")'.dependencies]
libafl_qemu = { path = "../../libafl_qemu", version = "0.13.0", features = ["python"] }
[build-dependencies] [build-dependencies]
pyo3-build-config = { version = "0.17" } pyo3-build-config = { version = "0.17" }

View File

@ -1,26 +0,0 @@
[build-system]
requires = ["maturin[patchelf]>=0.14.10,<0.15"]
build-backend = "maturin"
[project]
name = "PyLibAFL"
version = "0.10.1"
description = "Advanced Fuzzing Library for Python"
readme = "README.md"
requires-python = ">=3.8"
license = {text = "Apache-2.0"}
classifiers = [
"License :: OSI Approved :: Apache Software License",
"License :: OSI Approved :: MIT License",
"Programming Language :: Rust",
"Topic :: Security",
]
[project.urls]
repository = "https://github.com/AFLplusplus/LibAFL.git"
[tool.maturin]
bindings = "pylibafl"
manifest-path = "Cargo.toml"
python-source = "python"
all-features = true

View File

@ -1,33 +1,121 @@
use pyo3::prelude::*; use libafl;
#[cfg(target_os = "linux")]
use libafl_qemu;
use libafl_sugar;
use pyo3::{prelude::*, types::PyDict};
const LIBAFL_CODE: &str = r#"
class BaseObserver:
def flush(self):
pass
def pre_exec(self, state, input):
pass
def post_exec(self, state, input, exit_kind):
pass
def pre_exec_child(self, state, input):
pass
def post_exec_child(self, state, input, exit_kind):
pass
def name(self):
return type(self).__name__
def as_observer(self):
return Observer.new_py(self)
class BaseFeedback:
def init_state(self, state):
pass
def is_interesting(self, state, mgr, input, observers, exit_kind) -> bool:
return False
def append_metadata(self, state, testcase):
pass
def discard_metadata(self, state, input):
pass
def name(self):
return type(self).__name__
def as_feedback(self):
return Feedback.new_py(self)
class BaseExecutor:
def observers(self) -> ObserversTuple:
raise NotImplementedError('Implement this yourself')
def run_target(self, fuzzer, state, mgr, input) -> ExitKind:
raise NotImplementedError('Implement this yourself')
def as_executor(self):
return Executor.new_py(self)
class BaseStage:
def perform(self, fuzzer, executor, state, manager, corpus_idx):
pass
def as_stage(self):
return Stage.new_py(self)
class BaseMutator:
def mutate(self, state, input, stage_idx):
pass
def post_exec(self, state, stage_idx, corpus_idx):
pass
def as_mutator(self):
return Mutator.new_py(self)
class FnStage(BaseStage):
def __init__(self, fn):
self.fn = fn
def __call__(self, fuzzer, executor, state, manager, corpus_idx):
self.fn(fuzzer, executor, state, manager, corpus_idx)
def perform(self, fuzzer, executor, state, manager, corpus_idx):
self.fn(fuzzer, executor, state, manager, corpus_idx)
def feedback_not(a):
return NotFeedback(a).as_feedback()
def feedback_and(a, b):
return EagerAndFeedback(a, b).as_feedback()
def feedback_and_fast(a, b):
return FastAndFeedback(a, b).as_feedback()
def feedback_or(a, b):
return EagerOrFeedback(a, b).as_feedback()
def feedback_or_fast(a, b):
return FastOrFeedback(a, b).as_feedback()
"#;
/// Setup python modules for `libafl_qemu` and `libafl_sugar`.
///
/// # Errors
/// Returns error if python libafl setup failed.
#[pymodule] #[pymodule]
#[pyo3(name = "pylibafl")] #[pyo3(name = "pylibafl")]
pub fn python_module(py: Python, m: &PyModule) -> PyResult<()> { pub fn python_module(py: Python, m: &PyModule) -> PyResult<()> {
pyo3_log::init();
let modules = py.import("sys")?.getattr("modules")?; let modules = py.import("sys")?.getattr("modules")?;
let sugar_module = PyModule::new(py, "sugar")?; let sugar_module = PyModule::new(py, "sugar")?;
libafl_sugar::python_module(py, sugar_module)?; libafl_sugar::python_module(py, sugar_module)?;
m.add_submodule(sugar_module)?; m.add_submodule(sugar_module)?;
modules.set_item("pylibafl.sugar", sugar_module)?; modules.set_item("pylibafl.sugar", sugar_module)?;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
{ let qemu_module = PyModule::new(py, "qemu")?;
let qemu_module = PyModule::new(py, "qemu")?; #[cfg(target_os = "linux")]
libafl_qemu::python_module(py, qemu_module)?; libafl_qemu::python_module(py, qemu_module)?;
m.add_submodule(qemu_module)?; #[cfg(target_os = "linux")]
modules.set_item("pylibafl.qemu", qemu_module)?; m.add_submodule(qemu_module)?;
#[cfg(target_os = "linux")]
modules.set_item("pylibafl.qemu", qemu_module)?;
let libafl_module = PyModule::new(py, "libafl")?;
libafl::pybind::python_module(py, libafl_module)?;
libafl_module.add("__builtins__", py.import("builtins")?)?;
let locals = PyDict::new(py);
py.run(LIBAFL_CODE, Some(libafl_module.dict()), Some(locals))?;
for (key, val) in locals.iter() {
libafl_module.add(key.extract::<&str>()?, val)?;
} }
let bolts_module = PyModule::new(py, "libafl_bolts")?; m.add_submodule(libafl_module)?;
libafl_bolts::pybind::python_module(py, bolts_module)?;
m.add_submodule(bolts_module)?; modules.set_item("pylibafl.libafl", libafl_module)?;
modules.set_item("pylibafl.libafl_bolts", bolts_module)?;
Ok(()) Ok(())
} }

View File

@ -1,7 +1,94 @@
import pylibafl.sugar as sugar from pylibafl.libafl import *
import ctypes import ctypes
import platform
print("Starting to fuzz from python!")
fuzzer = sugar.InMemoryBytesCoverageSugar(input_dirs=["./in"], output_dir="out", broker_port=1337, cores=[0,1]) class FooObserver(BaseObserver):
fuzzer.run(lambda b: print("foo")) def __init__(self):
self.n = 0
def name(self):
return "Foo"
def pre_exec(self, state, input):
if self.n % 10000 == 0:
print("FOO!", self.n, input)
self.n += 1
class FooFeedback(BaseFeedback):
def is_interesting(self, state, mgr, input, observers, exit_kind):
ob = observers.match_name("Foo").unwrap_py()
return ob.n % 10000 == 0
class FooExecutor(BaseExecutor):
def __init__(self, harness, observers: ObserversTuple):
self.h = harness
self.o = observers
def observers(self):
return self.o
def run_target(self, fuzzer, state, mgr, input) -> ExitKind:
return (self.h)(input)
libc = ctypes.cdll.LoadLibrary("libc.so.6")
area_ptr = libc.calloc(1, 4096)
observer = StdMapObserverI8("mymap", area_ptr, 4096)
m = observer.as_map_observer()
observers = ObserversTuple(
[observer.as_map_observer().as_observer(), FooObserver().as_observer()]
)
feedback = feedback_or(MaxMapFeedbackI8(m).as_feedback(), FooFeedback().as_feedback())
objective = feedback_and_fast(
CrashFeedback().as_feedback(), MaxMapFeedbackI8(m).as_feedback()
)
fuzzer = StdFuzzer(feedback, objective)
rand = StdRand.with_current_nanos()
state = StdState(
rand.as_rand(),
InMemoryCorpus().as_corpus(),
InMemoryCorpus().as_corpus(),
feedback,
objective,
)
monitor = SimpleMonitor(lambda s: print(s))
mgr = SimpleEventManager(monitor.as_monitor())
def harness(buf) -> ExitKind:
# print(buf)
m[0] = 1
if len(buf) > 0 and buf[0] == ord("a"):
m[1] = 1
if len(buf) > 1 and buf[1] == ord("b"):
m[2] = 1
if len(buf) > 2 and buf[2] == ord("c"):
m[3] = 1
return ExitKind.crash()
return ExitKind.ok()
# executor = InProcessExecutor(harness, observers, fuzzer, state, mgr.as_manager())
executor = FooExecutor(harness, observers)
stage = StdMutationalStage(StdHavocMutator().as_mutator())
stage_tuple_list = StagesTuple([stage.as_stage()])
fuzzer.add_input(state, executor.as_executor(), mgr.as_manager(), b"\0\0")
fuzzer.fuzz_loop(executor.as_executor(), state, mgr.as_manager(), stage_tuple_list)

View File

@ -1,14 +0,0 @@
#!/usr/bin/env bash
mkdir in || true
echo "a" > ./in/a
timeout 10 python3 ./test.py
export exit_code=$?
if [ $exit_code -eq 124 ]; then
# 124 = timeout happened. All good.
exit 0
else
exit $exit_code
fi

1
docs/.gitignore vendored
View File

@ -1,2 +1 @@
book book
!listings/**/*

View File

@ -1,9 +0,0 @@
[package]
name = "baby_fuzzer_listing_01"
version = "0.1.0"
authors = ["Your Name <you@example.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View File

@ -1,3 +0,0 @@
fn main() {
println!("Hello, world!");
}

View File

@ -1,21 +0,0 @@
[package]
name = "baby_fuzzer_listing_02"
version = "0.1.0"
authors = ["Your Name <you@example.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libafl = { path = "path/to/libafl/" }
libafl_bolts = { path = "path/to/libafl_bolts/" }
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true

View File

@ -1,3 +0,0 @@
fn main() {
println!("Hello, world!");
}

View File

@ -1,24 +0,0 @@
[package]
name = "baby_fuzzer_listing_03"
version = "0.1.0"
authors = ["Your Name <you@example.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libafl = { path = "path/to/libafl/" }
libafl_bolts = { path = "path/to/libafl_bolts/" }
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[features]
panic = []

View File

@ -1,26 +0,0 @@
extern crate libafl;
extern crate libafl_bolts;
use libafl::{
executors::ExitKind,
inputs::{BytesInput, HasTargetBytes},
};
use libafl_bolts::AsSlice;
fn main() {
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
if buf.len() > 0 && buf[0] == 'a' as u8 {
if buf.len() > 1 && buf[1] == 'b' as u8 {
if buf.len() > 2 && buf[2] == 'c' as u8 {
panic!("=)");
}
}
}
ExitKind::Ok
};
// To test the panic:
let input = BytesInput::new(Vec::from("abc"));
#[cfg(feature = "panic")]
harness(&input);
}

View File

@ -1,24 +0,0 @@
[package]
name = "baby_fuzzer_listing_04"
version = "0.1.0"
authors = ["Your Name <you@example.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libafl = { path = "path/to/libafl/" }
libafl_bolts = { path = "path/to/libafl_bolts/" }
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[features]
panic = []

View File

@ -1,87 +0,0 @@
/* ANCHOR: use */
extern crate libafl;
extern crate libafl_bolts;
use std::path::PathBuf;
use libafl::{
corpus::{InMemoryCorpus, OnDiskCorpus},
events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind},
fuzzer::StdFuzzer,
generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor,
schedulers::QueueScheduler,
state::StdState,
};
use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice};
/* ANCHOR_END: use */
fn main() {
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
if buf.len() > 0 && buf[0] == 'a' as u8 {
if buf.len() > 1 && buf[1] == 'b' as u8 {
if buf.len() > 2 && buf[2] == 'c' as u8 {
panic!("=)");
}
}
}
ExitKind::Ok
};
// To test the panic:
let input = BytesInput::new(Vec::from("abc"));
#[cfg(feature = "panic")]
harness(&input);
/* ANCHOR: state */
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::new(),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
&mut (),
&mut (),
)
.unwrap();
/* ANCHOR_END: state */
/* ANCHOR: event_manager */
// The Monitor trait defines how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{s}"));
// The event manager handles the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
/* ANCHOR_END: event_manager */
/* ANCHOR: scheduler_fuzzer */
// A queue policy to get testcases from the corpus
let scheduler = QueueScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, (), ());
/* ANCHOR_END: scheduler_fuzzer */
/* ANCHOR: executor */
// Create the executor for an in-process function
let mut executor = InProcessExecutor::new(&mut harness, (), &mut fuzzer, &mut state, &mut mgr)
.expect("Failed to create the Executor");
/* ANCHOR_END: executor */
/* ANCHOR: generator */
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
/* ANCHOR_END: generator */
}

View File

@ -1,23 +0,0 @@
[package]
name = "baby_fuzzer_listing_05"
version = "0.1.0"
authors = ["Your Name <you@example.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libafl = { path = "path/to/libafl/" }
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[features]
panic = []

View File

@ -1,116 +0,0 @@
/* ANCHOR: use */
extern crate libafl;
extern crate libafl_bolts;
use libafl::{
corpus::{InMemoryCorpus, OnDiskCorpus},
events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind},
feedbacks::{CrashFeedback, MaxMapFeedback},
fuzzer::StdFuzzer,
generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor,
observers::StdMapObserver,
schedulers::QueueScheduler,
state::StdState,
};
use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice};
use std::path::PathBuf;
/* ANCHOR_END: use */
/* ANCHOR: signals */
// Coverage map with explicit assignments due to the lack of instrumentation
static mut SIGNALS: [u8; 16] = [0; 16];
fn signals_set(idx: usize) {
unsafe { SIGNALS[idx] = 1 };
}
fn main() {
// The closure that we want to fuzz
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
signals_set(0); // set SIGNALS[0]
if buf.len() > 0 && buf[0] == 'a' as u8 {
signals_set(1); // set SIGNALS[1]
if buf.len() > 1 && buf[1] == 'b' as u8 {
signals_set(2); // set SIGNALS[2]
if buf.len() > 2 && buf[2] == 'c' as u8 {
panic!("=)");
}
}
}
ExitKind::Ok
};
/* ANCHOR_END: signals */
// To test the panic:
let input = BytesInput::new(Vec::from("abc"));
#[cfg(feature = "panic")]
harness(&input);
/* ANCHOR: observer */
// Create an observation channel using the signals map
let observer = unsafe { StdMapObserver::new("signals", &mut SIGNALS) };
/* ANCHOR_END: observer */
/* ANCHOR: state_with_feedback_and_objective */
// Feedback to rate the interestingness of an input
let mut feedback = MaxMapFeedback::new(&observer);
// A feedback to choose if an input is a solution or not
let mut objective = CrashFeedback::new();
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::new(),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
&mut feedback,
&mut objective,
)
.unwrap();
/* ANCHOR_END: state_with_feedback_and_objective */
// The Monitor trait defines how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{s}"));
// The event manager handles the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
// A queue policy to get testcasess from the corpus
let scheduler = QueueScheduler::new();
/* ANCHOR: state_with_feedback_and_objective */
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
/* ANCHOR_END: state_with_feedback_and_objective */
/* ANCHOR: executor_with_observer */
// Create the executor for an in-process function with just one observer
let mut executor = InProcessExecutor::new(
&mut harness,
tuple_list!(observer),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the Executor");
/* ANCHOR_END: executor_with_observer */
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
/* ANCHOR: signals */
}
/* ANCHOR_END: signals */

View File

@ -1,24 +0,0 @@
[package]
name = "baby_fuzzer_listing_06"
version = "0.1.0"
authors = ["Your Name <you@example.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libafl = { path = "path/to/libafl/" }
libafl_bolts = { path = "path/to/libafl_bolts/" }
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[features]
panic = []

View File

@ -1,116 +0,0 @@
/* ANCHOR: use */
extern crate libafl;
extern crate libafl_bolts;
use libafl::{
corpus::{InMemoryCorpus, OnDiskCorpus},
events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind},
feedbacks::{CrashFeedback, MaxMapFeedback},
fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::StdMapObserver,
schedulers::QueueScheduler,
stages::mutational::StdMutationalStage,
state::StdState,
};
use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice};
use std::path::PathBuf;
/* ANCHOR_END: use */
// Coverage map with explicit assignments due to the lack of instrumentation
static mut SIGNALS: [u8; 16] = [0; 16];
fn signals_set(idx: usize) {
unsafe { SIGNALS[idx] = 1 };
}
fn main() {
// The closure that we want to fuzz
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
signals_set(0); // set SIGNALS[0]
if buf.len() > 0 && buf[0] == 'a' as u8 {
signals_set(1); // set SIGNALS[1]
if buf.len() > 1 && buf[1] == 'b' as u8 {
signals_set(2); // set SIGNALS[2]
if buf.len() > 2 && buf[2] == 'c' as u8 {
panic!("=)");
}
}
}
ExitKind::Ok
};
// To test the panic:
let input = BytesInput::new(Vec::from("abc"));
#[cfg(feature = "panic")]
harness(&input);
// Create an observation channel using the signals map
let observer = unsafe { StdMapObserver::new("signals", &mut SIGNALS) };
// Feedback to rate the interestingness of an input
let mut feedback = MaxMapFeedback::new(&observer);
// A feedback to choose if an input is a solution or not
let mut objective = CrashFeedback::new();
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::new(),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
&mut feedback,
&mut objective,
)
.unwrap();
// The Monitor trait defines how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{s}"));
// The event manager handles the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
// A queue policy to get testcasess from the corpus
let scheduler = QueueScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for an in-process function with just one observer
let mut executor = InProcessExecutor::new(
&mut harness,
tuple_list!(observer),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the Executor");
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
/* ANCHOR: mutational_stage */
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
/* ANCHOR_END: mutational_stage */
}

View File

@ -1,32 +0,0 @@
# General debugging tips
This file answers some commmon questions that arise when you are writing a fuzzer using LibAFL.
## Q. My fuzzer crashed but the stack trace is useless.
You can enable the `errors_backtrace` feature of the `libafl` crate. With this the stacktrace is meaningful.
## Q. I started the fuzzer but the corpus count is 0.
Unless the initial corpus is loaded with the "load_initial_inputs_forced" function, we only store the interesting inputs, which is the inputs that triggered the feedback. So this usually means that your input was not interesting or your target was simply not properly implemented.
Either way, what you can do is attach to the executable with gdb and set a breakpoint at where the new edges should be reported. If no instrumentation code is executed, then the the problem is in the instrumentation. If the instrumentation code is hit, but still the your input is not instrumented, then the problem could be that you are not passign the observer/feedback correctly to the fuzzer.
## Q. I started the fuzzer but the coverage is 0.
This could mean two things. Perhaps your target was not properly instrumented, or you are not using the correct observer, feedback feature.
In this case, again, what usually should do is to run the fuzzer with gdb and set a breakpoint at where the coverage is recorded (e.g. __sanitizer_coverage_trace_pcguard), and validate that the target is giving the feedback to the fuzzer.
## Q. I started the fuzzer but there's no output.
First, verify that your stdout and stderr are not redirected to `/dev/null`. If you get the log, then it should either fall into the previous 2 cases. Either the fuzzer crashed because you didn't have the initial seeds, or the coverage feedback is not working.
## Q. My fuzzer is slow.
Try running the fuzzer with the `introspection` feature of the `libafl`. This will show how much time is spent on each module of your fuzzer. Also you might be using a wrong size of the coverage map. If you see `2621440` for the size of the coverage map, you are doing it wrong. One possible mistake is the misuse of `libafl_targets::coverage::EDGES_MAP`
```
let map = StdMapObserver::from_mut_ptr("edges", EDGES_MAP.as_mut_ptr(), EDGES_MAP.len());
```
You should *never* use the `EDGES_MAP`'s size as this is just the size of the allocated size of the coverage map. Consider using something smaller or our default value `libafl_targets::LIBAFL_EDGES_MAP_SIZE_IN_USE`.
## Q. I still have problems with my fuzzer.
Finally, if you really have no idea what is going on, run your fuzzer with logging enabled. (You can use `env_logger`, `SimpleStdoutLogger`, `SimpleStderrLogger` from `libafl_bolts`. `fuzzbench_text` has an example to show how to use it.) (Don't forget to enable stdout and stderr), and you can open an issue or ask us in Discord.
## Q. My fuzzer died of ``Storing state in crashed fuzzer instance did not work''.
If the exit code is zero, then this is because either your harness exited or you are using fuzzer_loop_for and forgot to add `mgr.on_restart` at the end of the fuzzer. In the first case, you should patch your harness not to exit. (or use `utils/deexit`).
## Q. I can't leave the TUI screen
Type `q` then you leave TUI.

View File

@ -25,7 +25,6 @@
- [Architecture](./design/architecture.md) - [Architecture](./design/architecture.md)
- [Metadata](./design/metadata.md) - [Metadata](./design/metadata.md)
- [Migrating from LibAFL <0.9 to 0.9](./design/migration-0.9.md) - [Migrating from LibAFL <0.9 to 0.9](./design/migration-0.9.md)
- [Migrating from LibAFL <0.11 to 0.11](./design/migration-0.11.md)
- [Message Passing](./message_passing/message_passing.md) - [Message Passing](./message_passing/message_passing.md)
- [Spawning Instances](./message_passing/spawn_instances.md) - [Spawning Instances](./message_passing/spawn_instances.md)

View File

@ -1,8 +1,7 @@
# Concolic Tracing and Hybrid Fuzzing # Concolic Tracing and Hybrid Fuzzing
LibAFL has support for concolic tracing based on the [SymCC](https://github.com/eurecom-s3/symcc) instrumenting compiler. LibAFL has support for concolic tracing based on the [SymCC](https://github.com/eurecom-s3/symcc) instrumenting compiler.
For those uninitiated, the following text attempts to describe concolic tracing from the ground up using an example. For those uninitiated, the following attempts to describe concolic tracing from the ground up using an example.
Then, we'll go through the relationship of SymCC and LibAFL concolic tracing. Then, we'll go through the relationship of SymCC and LibAFL concolic tracing.
Finally, we'll walk through building a basic hybrid fuzzer using LibAFL. Finally, we'll walk through building a basic hybrid fuzzer using LibAFL.
@ -93,18 +92,18 @@ In hybrid fuzzing, we combine this tracing + solving approach with more traditio
The concolic tracing support in LibAFL is implemented using SymCC. The concolic tracing support in LibAFL is implemented using SymCC.
SymCC is a compiler plugin for clang that can be used as a drop-in replacement for a normal C or C++ compiler. SymCC is a compiler plugin for clang that can be used as a drop-in replacement for a normal C or C++ compiler.
SymCC will instrument the compiled code with callbacks into a runtime that can be supplied by the user. SymCC will instrument the compiled code with callbacks into a runtime that can be supplied by the user.
These callbacks allow the runtime to construct a trace that is similar to the previous example. These callbacks allow the runtime to construct a trace that similar to the previous example.
### SymCC and its Runtimes ### SymCC and its Runtimes
SymCC ships with 2 runtimes: SymCC ships with 2 runtimes:
* A 'simple' runtime that attempts to negate and analytically solve any branch conditions it comes across using [Z3](https://github.com/Z3Prover/z3/wiki) and * a 'simple' runtime that attempts to solve any branches it comes across using [Z3](https://github.com/Z3Prover/z3/wiki) and
* A [QSym](https://github.com/sslab-gatech/qsym)-based runtime, which does a bit more filtering on the expressions and also solves them using Z3. * a [QSym](https://github.com/sslab-gatech/qsym)-based runtime, which does a bit more filtering on the expressions and also solves using Z3.
The integration with LibAFL, however, requires you to **BYORT** (_bring your own runtime_) using the [`symcc_runtime`](https://docs.rs/symcc_runtime/0.1/symcc_runtime) crate. The integration with LibAFL, however, requires you to **BYORT** (_bring your own runtime_) using the [`symcc_runtime`](https://docs.rs/symcc_runtime/0.1/symcc_runtime) crate.
This crate allows you to easily build a custom runtime out of the built-in building blocks or create entirely new runtimes with full flexibility. This crate allows you to easily build a custom runtime out of the built-in building blocks or create entirely new runtimes with full flexibility.
Check out the `symcc_runtime` docs for more information on how to build your own runtime. Checkout out the `symcc_runtime` docs for more information on how to build your own runtime.
### SymQEMU ### SymQEMU
@ -124,7 +123,7 @@ There are three main steps involved with building a hybrid fuzzer using LibAFL:
3. building the fuzzer. 3. building the fuzzer.
Note that the order of these steps is important. Note that the order of these steps is important.
For example, we need to have a runtime ready before we can do instrumentation with SymCC. For example, we need to have runtime ready before we can do instrumentation with SymCC.
### Building a Runtime ### Building a Runtime
@ -135,12 +134,10 @@ Check out the [example hybrid fuzzer's runtime](https://github.com/AFLplusplus/L
### Instrumentation ### Instrumentation
There are two main instrumentation methods to make use of concolic tracing in LibAFL: There are two main instrumentation methods to make use of concolic tracing in LibAFL:
* Using an **compile-time** instrumented target with **SymCC**.
* Using a **compile-time** instrumented target with **SymCC**.
This only works when the source is available for the target and the target is reasonably easy to build using the SymCC compiler wrapper. This only works when the source is available for the target and the target is reasonably easy to build using the SymCC compiler wrapper.
* Using **SymQEMU** to dynamically instrument the target at **runtime**. * Using **SymQEMU** to dynamically instrument the target at **runtime**.
This avoids building a separate instrumented target with concolic tracing instrumentation and so does not require source code. This avoids a separate instrumented target with concolic tracing instrumentation and does not require source code.
It should be noted, however, that the 'quality' of the generated expressions can be significantly worse and SymQEMU generally produces significantly more and significantly more convoluted expressions than SymCC. It should be noted, however, that the 'quality' of the generated expressions can be significantly worse and SymQEMU generally produces significantly more and significantly more convoluted expressions than SymCC.
Therefore, it is recommended to use SymCC over SymQEMU when possible. Therefore, it is recommended to use SymCC over SymQEMU when possible.
@ -161,25 +158,25 @@ Make sure you satisfy the [build requirements](https://github.com/eurecom-s3/sym
Build SymQEMU according to its [build instructions](https://github.com/eurecom-s3/symqemu#readme). Build SymQEMU according to its [build instructions](https://github.com/eurecom-s3/symqemu#readme).
By default, SymQEMU looks for the runtime in a sibling directory. By default, SymQEMU looks for the runtime in a sibling directory.
Since we don't have a runtime there, we need to explicitly set the `--symcc-build` argument of the `configure` script to the path of your runtime. Since we don't have a runtime there, we need to let it know the path to your runtime by setting `--symcc-build` argument of the `configure` script to the path of your runtime.
### Building the Fuzzer ### Building the Fuzzer
No matter the instrumentation method, the interface between the fuzzer and the instrumented target should now be consistent. No matter the instrumentation method, the interface between the fuzzer and the instrumented target should now be consistent.
The only difference between using SymCC and SymQEMU should be the binary that represents the target: The only difference between using SymCC and SymQEMU should be the binary that represents the target:
In the case of SymCC it will be the binary that was build with instrumentation and with SymQEMU it will be the emulator binary (eg. `x86_64-linux-user/symqemu-x86_64`), followed by your uninstrumented target binary and its arguments. In the case of SymCC it will be the binary that was build with instrumentation and with SymQEMU it will be the emulator binary (eg. `x86_64-linux-user/symqemu-x86_64`), followed by your uninstrumented target binary and arguments.
You can use the [`CommandExecutor`](https://docs.rs/libafl/latest/libafl/executors/command/struct.CommandExecutor.html) to execute your target ([example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs#L244)). You can use the [`CommandExecutor`](https://docs.rs/libafl/0.6.0/libafl/executors/command/struct.CommandExecutor.html) to execute your target ([example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs#L244)).
When configuring the command, make sure you pass the `SYMCC_INPUT_FILE` environment variable (set to the input file path), if your target reads input from a file (instead of standard input). When configuring the command, make sure you pass the `SYMCC_INPUT_FILE` environment variable the input file path, if your target reads input from a file (instead of standard input).
#### Serialization and Solving #### Serialization and Solving
While it is perfectly possible to build a custom runtime that also performs the solving step of hybrid fuzzing in the context of the target process, the intended use of the LibAFL concolic tracing support is to serialize the (filtered and pre-processed) branch conditions using the [`TracingRuntime`](https://docs.rs/symcc_runtime/0.1/symcc_runtime/tracing/struct.TracingRuntime.html). While it is perfectly possible to build a custom runtime that also performs the solving step of hybrid fuzzing in the context of the target process, the intended use of the LibAFL concolic tracing support is to serialize the (filtered and pre-processed) branch conditions using the [`TracingRuntime`](https://docs.rs/symcc_runtime/0.1/symcc_runtime/tracing/struct.TracingRuntime.html).
This serialized representation can be deserialized in the fuzzer process for solving using a [`ConcolicObserver`](https://docs.rs/libafl/latest/libafl/observers/concolic/struct.ConcolicObserver.html) wrapped in a [`ConcolicTracingStage`](https://docs.rs/libafl/latest/libafl/stages/concolic/struct.ConcolicTracingStage.html), which will attach a [`ConcolicMetadata`](https://docs.rs/libafl/latest/libafl/observers/concolic/struct.ConcolicMetadata.html) to every [`TestCase`](https://docs.rs/libafl/latest/libafl/corpus/testcase/struct.Testcase.html). This serialized representation can be deserialized in the fuzzer process for solving using a [`ConcolicObserver`](https://docs.rs/libafl/0.6.0/libafl/observers/concolic/struct.ConcolicObserver.html) wrapped in a [`ConcolicTracingStage`](https://docs.rs/libafl/0.6.0/libafl/stages/concolic/struct.ConcolicTracingStage.html), which will attach a [`ConcolicMetadata`](https://docs.rs/libafl/0.6.0/libafl/observers/concolic/struct.ConcolicMetadata.html) to every [`TestCase`](https://docs.rs/libafl/0.6.0/libafl/corpus/testcase/struct.Testcase.html).
The `ConcolicMetadata` can be used to replay the concolic trace and to solve the conditions using an SMT-Solver. The `ConcolicMetadata` can be used to replay the concolic trace and solved using an SMT-Solver.
Most use-cases involving concolic tracing, however, will need to define some policy around which branches they want to solve. Most use-cases involving concolic tracing, however, will need to define some policy around which branches they want to solve.
The [`SimpleConcolicMutationalStage`](https://docs.rs/libafl/latest/libafl/stages/concolic/struct.SimpleConcolicMutationalStage.html) can be used for testing purposes. The [`SimpleConcolicMutationalStage`](https://docs.rs/libafl/0.6.0//libafl/stages/concolic/struct.SimpleConcolicMutationalStage.html) can be used for testing purposes.
It will attempt to solve all branches, like the original simple backend from SymCC, using Z3. It will attempt to solve all branches, like the original simple backend from SymCC, using Z3.
### Example ### Example

View File

@ -17,7 +17,7 @@ If you are on Windows, you'll need to install llvm tools.
LibAFL uses Frida's [__Stalker__](https://frida.re/docs/stalker/) to trace the execution of your program and instrument your harness. LibAFL uses Frida's [__Stalker__](https://frida.re/docs/stalker/) to trace the execution of your program and instrument your harness.
Thus, you have to compile your harness to a dynamic library. Frida instruments your PUT after dynamically loading it. Thus, you have to compile your harness to a dynamic library. Frida instruments your PUT after dynamically loading it.
In our `frida_libpng` example, we load the dynamic library and find the symbol to harness as follows: For example in our `frida_libpng` example, we load the dynamic library and find the symbol to harness as follows:
```rust,ignore ```rust,ignore
let lib = libloading::Library::new(module_name).unwrap(); let lib = libloading::Library::new(module_name).unwrap();
@ -28,9 +28,9 @@ In our `frida_libpng` example, we load the dynamic library and find the symbol t
## `FridaInstrumentationHelper` and Runtimes ## `FridaInstrumentationHelper` and Runtimes
To use functionalities that Frida offers, we'll first need to obtain a `Gum` object by `Gum::obtain()`. To use functionalities that Frida offers, we'll first need to obtain `Gum` object by `Gum::obtain()`.
In LibAFL, we use the `FridaInstrumentationHelper` struct to manage frida-related state. `FridaInstrumentationHelper` is a key component that sets up the [__Transformer__](https://frida.re/docs/stalker/#transformer) that is used to generate the instrumented code. It also initializes the `Runtimes` that offer various instrumentations. In LibAFL, we use the `FridaInstrumentationHelper` struct to manage frida-related state. `FridaInstrumentationHelper` is a key component that sets up the [__Transformer__](https://frida.re/docs/stalker/#transformer) that is used to generate the instrumented code. It also initializes the `Runtimes` that offer various instrumentation.
We have `CoverageRuntime` that can track the edge coverage, `AsanRuntime` for address sanitizer, `DrCovRuntime` that uses [__DrCov__](https://dynamorio.org/page_drcov.html) for coverage collection (to be imported in coverage tools like Lighthouse, bncov, dragondance,...), and `CmpLogRuntime` for cmplog instrumentation. We have `CoverageRuntime` that can track the edge coverage, `AsanRuntime` for address sanitizer, `DrCovRuntime` that uses [__DrCov__](https://dynamorio.org/page_drcov.html) for coverage collection (to be imported in coverage tools like Lighthouse, bncov, dragondance,...), and `CmpLogRuntime` for cmplog instrumentation.
All of these runtimes can be slotted into `FridaInstrumentationHelper` at build time. All of these runtimes can be slotted into `FridaInstrumentationHelper` at build time.
@ -53,12 +53,12 @@ Combined with any `Runtime` you'd like to use, you can initialize the `FridaInst
## Running the Fuzzer ## Running the Fuzzer
After setting up the `FridaInstrumentationHelper` you can obtain the pointer to the coverage map by calling `map_mut_ptr()`. After setting up the `FridaInstrumentationHelper`. You can obtain the pointer to the coverage map by calling `map_ptr_mut()`.
```rust,ignore ```rust,ignore
let edges_observer = HitcountsMapObserver::new(StdMapObserver::from_mut_ptr( let edges_observer = HitcountsMapObserver::new(StdMapObserver::new_from_ptr(
"edges", "edges",
frida_helper.map_mut_ptr().unwrap(), frida_helper.map_ptr_mut().unwrap(),
MAP_SIZE, MAP_SIZE,
)); ));
``` ```
@ -73,7 +73,7 @@ You can then link this observer to `FridaInProcessExecutor` as follows:
tuple_list!( tuple_list!(
edges_observer, edges_observer,
time_observer, time_observer,
AsanErrorsObserver::from_static_asan_errors() AsanErrorsObserver::new(&ASAN_ERRORS)
), ),
&mut fuzzer, &mut fuzzer,
&mut state, &mut state,
@ -83,5 +83,5 @@ You can then link this observer to `FridaInProcessExecutor` as follows:
); );
``` ```
And finally you can run the fuzzer. And, finally you can run the fuzzer.
See the `frida_` examples in [`./fuzzers`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/) for more information and, for linux or full-system, play around with `libafl_qemu`, another binary-only tracer. See the `frida_` examples in [`./fuzzers`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/) for more information and, for linux or full-system, play around with `libafl_qemu`, another binary-only tracer.

View File

@ -1,6 +1,6 @@
# Using LibAFL in `no_std` environments # Using LibAFL in `no_std` environments
It is possible to use LibAFL in `no_std` environments e.g. on custom platforms like microcontrollers, kernels, hypervisors, and more. It is possible to use LibAFL in `no_std` environments e.g. custom platforms like microcontrollers, kernels, hypervisors, and more.
You can simply add LibAFL to your `Cargo.toml` file: You can simply add LibAFL to your `Cargo.toml` file:
@ -16,7 +16,7 @@ cargo build --no-default-features --target aarch64-unknown-none
## Use custom timing ## Use custom timing
The minimum amount of support LibAFL needs for a `no_std` environment is a monotonically increasing timestamp. The minimum amount of input LibAFL needs for `no_std` is a monotonically increasing timestamp.
For this, anywhere in your project you need to implement the `external_current_millis` function, which returns the current time in milliseconds. For this, anywhere in your project you need to implement the `external_current_millis` function, which returns the current time in milliseconds.
```c ```c

View File

@ -2,12 +2,12 @@
NYX supports both source-based and binary-only fuzzing. NYX supports both source-based and binary-only fuzzing.
Currently, `libafl_nyx` only supports [afl++](https://github.com/AFLplusplus/AFLplusplus)'s instruction type. To install it, you can use `sudo apt install aflplusplus`. Or compile from the source: Currently, `libafl_nyx` only supports [afl++](https://github.com/AFLplusplus/AFLplusplus)'s instruction. To install it, you can use `sudo apt install aflplusplus`. Or compile from the source:
```bash ```bash
git clone https://github.com/AFLplusplus/AFLplusplus git clone https://github.com/AFLplusplus/AFLplusplus
cd AFLplusplus cd AFLplusplus
make all # this will not compile afl's additional extensions make all # this will not compile afl's additional extension
``` ```
Then you should compile the target with the afl++ compiler wrapper: Then you should compile the target with the afl++ compiler wrapper:
@ -20,9 +20,9 @@ export CXX=afl-clang-fast++
make make
``` ```
For binary-only fuzzing, Nyx uses intel-PT(Intel® Processor Trace). You can find the list of supported CPUs at <https://www.intel.com/content/www/us/en/support/articles/000056730/processors.html>. For binary-only fuzzing, Nyx uses intel-PT(Intel® Processor Trace). You can find the supported CPU at <https://www.intel.com/content/www/us/en/support/articles/000056730/processors.html>.
## Preparing the Nyx working directory ## Preparing Nyx working directory
This step is used to pack the target into Nyx's kernel. Don't worry, we have a template shell script in our [example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/setup_libxml2.sh): This step is used to pack the target into Nyx's kernel. Don't worry, we have a template shell script in our [example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/setup_libxml2.sh):
@ -49,7 +49,7 @@ python3 ./packer/packer/nyx_config_gen.py /tmp/nyx_libxml2/ Kernel || exit
## Standalone fuzzing ## Standalone fuzzing
In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_standalone/src/main.rs) you first need to run `./setup_libxml2.sh`. It will prepare your target and create your nyx work directory in `/tmp/libxml2`. After that, you can start to write your code. In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_standalone/src/main.rs). First you need to run `./setup_libxml2.sh`, It will prepare your target and create your nyx work directory in `/tmp/libxml2`. After that, you can start write your code.
First, to create `Nyxhelper`: First, to create `Nyxhelper`:
@ -57,21 +57,22 @@ First, to create `Nyxhelper`:
let share_dir = Path::new("/tmp/nyx_libxml2/"); let share_dir = Path::new("/tmp/nyx_libxml2/");
let cpu_id = 0; // use first cpu let cpu_id = 0; // use first cpu
let parallel_mode = false; // close parallel_mode let parallel_mode = false; // close parallel_mode
let mut helper = NyxHelper::new(share_dir, cpu_id, true, parallel_mode, None).unwrap(); // we don't need to set the last parameter in standalone mode, we just use None, here let mut helper = NyxHelper::new(share_dir, cpu_id, true, parallel_mode, None).unwrap(); // we don't the set the last parameter in standalone mode, we just use None, here
``` ```
Then, fetch `trace_bits`, create an observer and the `NyxExecutor`: Then, fetch `trace_bits`, create an observer and the `NyxExecutor`:
```rust,ignore ```rust,ignore
let observer = unsafe { StdMapObserver::from_mut_ptr("trace", helper.trace_bits, helper.map_size) }; let trace_bits = unsafe { std::slice::from_raw_parts_mut(helper.trace_bits, helper.map_size) };
let observer = StdMapObserver::new("trace", trace_bits);
let mut executor = NyxExecutor::new(&mut helper, tuple_list!(observer)).unwrap(); let mut executor = NyxExecutor::new(&mut helper, tuple_list!(observer)).unwrap();
``` ```
Finally, use them normally and pass them into `fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)` to start fuzzing. Finally, use them as normal and pass them into `fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)` to start fuzzing.
## Parallel fuzzing ## Parallel fuzzing
In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/src/main.rs) you first need to run `./setup_libxml2.sh` as described before. In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/src/main.rs). First you need to run `./setup_libxml2.sh` as described before.
Parallel fuzzing relies on [`Launcher`](../message_passing/spawn_instances.md), so spawn logic should be written in the scoop of anonymous function `run_client`: Parallel fuzzing relies on [`Launcher`](../message_passing/spawn_instances.md), so spawn logic should be written in the scoop of anonymous function `run_client`:
@ -90,7 +91,7 @@ let mut helper = NyxHelper::new(
cpu_id, // current cpu id cpu_id, // current cpu id
true, // open snap_mode true, // open snap_mode
parallel_mode, // open parallel mode parallel_mode, // open parallel mode
Some(parent_cpu_id.id as u32), // the cpu-id of main instance, there is only one main instance, other instances will be treated as secondaries Some(parent_cpu_id.id as u32), // the cpu-id of master instance, there is only one master instance, other instances will be treated as slaved
) )
.unwrap(); .unwrap();
``` ```
@ -98,11 +99,13 @@ let mut helper = NyxHelper::new(
Then you can fetch the trace_bits and create an observer and `NyxExecutor` Then you can fetch the trace_bits and create an observer and `NyxExecutor`
```rust,ignore ```rust,ignore
let observer = unsafe { StdMapObserver::from_mut_ptr("trace", helper.trace_bits, helper.map_size) } let trace_bits =
unsafe { std::slice::from_raw_parts_mut(helper.trace_bits, helper.map_size) };
let observer = StdMapObserver::new("trace", trace_bits);
let mut executor = NyxExecutor::new(&mut helper, tuple_list!(observer)).unwrap(); let mut executor = NyxExecutor::new(&mut helper, tuple_list!(observer)).unwrap();
``` ```
Finally, open a `Launcher` as usual to start fuzzing: Finally, open a `Launcher` as normal to start fuzzing:
```rust,ignore ```rust,ignore
match Launcher::builder() match Launcher::builder()
@ -118,6 +121,6 @@ match Launcher::builder()
{ {
Ok(()) => (), Ok(()) => (),
Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."), Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."),
Err(err) => panic!("Failed to run launcher: {err:?}"), Err(err) => panic!("Failed to run launcher: {:?}", err),
} }
``` ```

View File

@ -17,7 +17,7 @@ You can find a complete version of this tutorial as an example fuzzer in [`fuzze
We use cargo to create a new Rust project with LibAFL as a dependency. We use cargo to create a new Rust project with LibAFL as a dependency.
```console ```sh
$ cargo new baby_fuzzer $ cargo new baby_fuzzer
$ cd baby_fuzzer $ cd baby_fuzzer
``` ```
@ -25,11 +25,18 @@ $ cd baby_fuzzer
The generated `Cargo.toml` looks like the following: The generated `Cargo.toml` looks like the following:
```toml ```toml
{{#include ../../listings/baby_fuzzer/listing-01/Cargo.toml}} [package]
name = "baby_fuzzer"
version = "0.1.0"
authors = ["Your Name <you@example.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
``` ```
In order to use LibAFl we must add it as dependency adding `libafl = { path = "path/to/libafl/" }` under `[dependencies]`. In order to use LibAFl we must add it as dependency adding `libafl = { path = "path/to/libafl/" }` under `[dependencies]`.
That path actually needs to point to the `libafl` directory within the cloned repo, not the root of the repo itself.
You can use the LibAFL version from [crates.io](https://crates.io/crates/libafl) if you want, in this case, you have to use `libafl = "*"` to get the latest version (or set it to the current version). You can use the LibAFL version from [crates.io](https://crates.io/crates/libafl) if you want, in this case, you have to use `libafl = "*"` to get the latest version (or set it to the current version).
As we are going to fuzz Rust code, we want that a panic does not simply cause the program to exit, but raise an `abort` that can then be caught by the fuzzer. As we are going to fuzz Rust code, we want that a panic does not simply cause the program to exit, but raise an `abort` that can then be caught by the fuzzer.
@ -40,9 +47,27 @@ Alongside this setting, we add some optimization flags for the compilation, when
The final `Cargo.toml` should look similar to the following: The final `Cargo.toml` should look similar to the following:
```toml ```toml
{{#include ../../listings/baby_fuzzer/listing-02/Cargo.toml}} [package]
``` name = "baby_fuzzer"
version = "0.1.0"
authors = ["Your Name <you@example.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libafl = { path = "path/to/libafl/" }
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
```
## The function under test ## The function under test
@ -51,32 +76,52 @@ To start, we create the closure that we want to fuzz. It takes a buffer as input
`ExitKind` is used to inform the fuzzer about the harness' exit status. `ExitKind` is used to inform the fuzzer about the harness' exit status.
```rust ```rust
{{#rustdoc_include ../../listings/baby_fuzzer/listing-03/src/main.rs}} extern crate libafl;
use libafl::{
bolts::AsSlice,
inputs::{BytesInput, HasTargetBytes},
executors::ExitKind,
};
fn main(){
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
if buf.len() > 0 && buf[0] == 'a' as u8 {
if buf.len() > 1 && buf[1] == 'b' as u8 {
if buf.len() > 2 && buf[2] == 'c' as u8 {
panic!("=)");
}
}
}
ExitKind::Ok
};
// To test the panic:
let input = BytesInput::new(Vec::from("abc"));
#[cfg(feature = "panic")]
harness(&input);
}
``` ```
To test the crash manually, you can add a feature in `Cargo.toml` that enables the call that triggers the panic:
```toml
{{#include ../../listings/baby_fuzzer/listing-03/Cargo.toml:23:25}}
```
And then run the program with that feature activated:
```console
$ cargo run -F panic
```
And you should see the program crash as expected.
## Generating and running some tests ## Generating and running some tests
One of the main components that a LibAFL-based fuzzer uses is the State, a container of the data that will evolve during the fuzzing process. One of the main components that a LibAFL-based fuzzer uses is the State, a container of the data that is evolved during the fuzzing process.
It includes all state, such as the Corpus of inputs, the current RNG state, and potential Metadata for the testcases and run. Includes all State, such as the Corpus of inputs, the current RNG state, and potential Metadata for the testcases and run.
In our `main` we create a basic State instance like the following: In our `main` we create a basic State instance like the following:
```rust,ignore
```rust // create a State from scratch
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:state}} let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
&mut (),
&mut ()
).unwrap();
``` ```
- The first parameter is a random number generator, that is part of the fuzzer state, in this case, we use the default one `StdRand`, but you can choose a different one. We seed it with the current nanoseconds. - The first parameter is a random number generator, that is part of the fuzzer state, in this case, we use the default one `StdRand`, but you can choose a different one. We seed it with the current nanoseconds.
@ -84,26 +129,43 @@ In our `main` we create a basic State instance like the following:
To avoid type annotation error, you can use `InMemoryCorpus::<BytesInput>::new()` to replace `InMemoryCorpus::new()`. If not, type annotation will be automatically inferred when adding `executor`. To avoid type annotation error, you can use `InMemoryCorpus::<BytesInput>::new()` to replace `InMemoryCorpus::new()`. If not, type annotation will be automatically inferred when adding `executor`.
- The third parameter is another Corpus that stores the "solution" testcases for the fuzzer. For our purpose, the solution is the input that triggers the panic. In this case, we want to store it to disk under the `crashes` directory, so we can inspect it. - third parameter is another corpus that stores the "solution" testcases for the fuzzer. For our purpose, the solution is the input that triggers the panic. In this case, we want to store it to disk under the `crashes` directory, so we can inspect it.
- The last two parameters are feedback and objective, we will discuss them later. - last two parameters are feedback and objective, we will discuss them later.
Another required component is the **EventManager**. It handles some events such as the addition of a testcase to the corpus during the fuzzing process. For our purpose, we use the simplest one that just displays the information about these events to the user using a `Monitor` instance. Another required component is the **EventManager**. It handles some events such as the addition of a testcase to the corpus during the fuzzing process. For our purpose, we use the simplest one that just displays the information about these events to the user using a `Monitor` instance.
```rust ```rust,ignore
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:event_manager}} // The Monitor trait defines how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{}", s));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
``` ```
In addition, we have the **Fuzzer**, an entity that contains some actions that alter the State. One of these actions is the scheduling of the testcases to the fuzzer using a **Scheduler**. In addition, we have the **Fuzzer**, an entity that contains some actions that alter the State. One of these actions is the scheduling of the testcases to the fuzzer using a **Scheduler**.
We create it as `QueueScheduler`, a scheduler that serves testcases to the fuzzer in a FIFO fashion. We create it as `QueueScheduler`, a scheduler that serves testcases to the fuzzer in a FIFO fashion.
```rust ```rust,ignore
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:scheduler_fuzzer}} // A queue policy to get testcasess from the corpus
let scheduler = QueueScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, (), ());
``` ```
Last but not least, we need an **Executor** that is the entity responsible to run our program under test. In this example, we want to run the harness function in-process (without forking off a child, for example), and so we use the `InProcessExecutor`. Last but not least, we need an **Executor** that is the entity responsible to run our program under test. In this example, we want to run the harness function in-process (without forking off a child, for example), and so we use the `InProcessExecutor`.
```rust ```rust,ignore
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:executor}} // Create the executor for an in-process function
let mut executor = InProcessExecutor::new(
&mut harness,
(),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the Executor");
``` ```
It takes a reference to the harness, the state, and the event manager. We will discuss the second parameter later. It takes a reference to the harness, the state, and the event manager. We will discuss the second parameter later.
@ -113,19 +175,41 @@ Now we have the 4 major entities ready for running our tests, but we still canno
For this purpose, we use a **Generator**, `RandPrintablesGenerator` that generates a string of printable bytes. For this purpose, we use a **Generator**, `RandPrintablesGenerator` that generates a string of printable bytes.
```rust ```rust,ignore
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:generator}} use libafl::generators::RandPrintablesGenerator;
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus".into());
``` ```
Now you can prepend the necessary `use` directives to your main.rs and compile the fuzzer. Now you can prepend the necessary `use` directives to your main.rs and compile the fuzzer.
```rust ```rust
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:use}} extern crate libafl;
use std::path::PathBuf;
use libafl::{
bolts::{AsSlice, current_nanos, rands::StdRand},
corpus::{InMemoryCorpus, OnDiskCorpus},
events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind},
fuzzer::StdFuzzer,
generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor,
schedulers::QueueScheduler,
state::StdState,
};
``` ```
When running, you should see something similar to: When running, you should see something similar to:
```console ```sh
$ cargo run $ cargo run
Finished dev [unoptimized + debuginfo] target(s) in 0.04s Finished dev [unoptimized + debuginfo] target(s) in 0.04s
Running `target/debug/baby_fuzzer` Running `target/debug/baby_fuzzer`
@ -141,22 +225,60 @@ Now we want to turn our simple fuzzer into a feedback-based one and increase the
**Observer** can record the information about properties of a fuzzing run and then feeds the fuzzer. We use the `StdMapObserver`, the default observer that uses a map to keep track of covered elements. In our fuzzer, each condition is mapped to an entry of such map. **Observer** can record the information about properties of a fuzzing run and then feeds the fuzzer. We use the `StdMapObserver`, the default observer that uses a map to keep track of covered elements. In our fuzzer, each condition is mapped to an entry of such map.
We represent such map as a `static mut` variable. We represent such map as a `static mut` variable.
As we don't rely on any instrumentation engine, we have to manually track the satisfied conditions by `signals_set` in our harness: As we don't rely on any instrumentation engine, we have to manually track the satisfied conditions by `singals_set` in our harness:
```rust ```rust
{{#rustdoc_include ../../listings/baby_fuzzer/listing-05/src/main.rs:signals}} extern crate libafl;
use libafl::{
bolts::AsSlice,
inputs::{BytesInput, HasTargetBytes},
executors::ExitKind,
};
// Coverage map with explicit assignments due to the lack of instrumentation
static mut SIGNALS: [u8; 16] = [0; 16];
fn signals_set(idx: usize) {
unsafe { SIGNALS[idx] = 1 };
}
// The closure that we want to fuzz
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
signals_set(0); // set SIGNALS[0]
if buf.len() > 0 && buf[0] == 'a' as u8 {
signals_set(1); // set SIGNALS[1]
if buf.len() > 1 && buf[1] == 'b' as u8 {
signals_set(2); // set SIGNALS[2]
if buf.len() > 2 && buf[2] == 'c' as u8 {
panic!("=)");
}
}
}
ExitKind::Ok
};
``` ```
The observer can be created directly from the `SIGNALS` map, in the following way: The observer can be created directly from the `SIGNALS` map, in the following way:
```rust ```rust,ignore
{{#rustdoc_include ../../listings/baby_fuzzer/listing-05/src/main.rs:observer}} // Create an observation channel using the signals map
let observer = StdMapObserver::new("signals", unsafe { &mut SIGNALS });
``` ```
The observers are usually kept in the corresponding executor as they keep track of information that is valid for just one run. We have then to modify our InProcessExecutor creation to include the observer as follows: The observers are usually kept in the corresponding executor as they keep track of information that is valid for just one run. We have then to modify our InProcessExecutor creation to include the observer as follows:
```rust ```rust,ignore
{{#rustdoc_include ../../listings/baby_fuzzer/listing-05/src/main.rs:executor_with_observer}} // Create the executor for an in-process function with just one observer
let mut executor = InProcessExecutor::new(
&mut harness,
tuple_list!(observer),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the Executor".into());
``` ```
Now that the fuzzer can observe which condition is satisfied, we need a way to rate an input as interesting (i.e. worth of addition to the corpus) based on this observation. Here comes the notion of Feedback. Now that the fuzzer can observe which condition is satisfied, we need a way to rate an input as interesting (i.e. worth of addition to the corpus) based on this observation. Here comes the notion of Feedback.
@ -165,23 +287,49 @@ Now that the fuzzer can observe which condition is satisfied, we need a way to r
We use `MaxMapFeedback`, a feedback that implements a novelty search over the map of the MapObserver. Basically, if there is a value in the observer's map that is greater than the maximum value registered so far for the same entry, it rates the input as interesting and updates its state. We use `MaxMapFeedback`, a feedback that implements a novelty search over the map of the MapObserver. Basically, if there is a value in the observer's map that is greater than the maximum value registered so far for the same entry, it rates the input as interesting and updates its state.
**Objective Feedback** is another kind of Feedback which decides if an input is a "solution". It will save input to solutions(`./crashes` in our case) rather than corpus when the input is rated interesting. We use `CrashFeedback` to tell the fuzzer that if an input causes the program to crash it is a solution for us. **Objective Feedback** is another kind of Feedback which decide if an input is a "solution". It will save input to solutions(`./crashes` in our case) other than corpus when the input is rated interesting. We use `CrashFeedback` to tell the fuzzer that if an input causes the program to crash it is a solution for us.
We need to update our State creation including the feedback state and the Fuzzer including the feedback and the objective: We need to update our State creation including the feedback state and the Fuzzer including the feedback and the objective:
```rust ```rust,ignore
{{#rustdoc_include ../../listings/baby_fuzzer/listing-05/src/main.rs:state_with_feedback_and_objective}} extern crate libafl;
``` use libafl::{
bolts::{current_nanos, rands::StdRand, tuples::tuple_list},
corpus::{InMemoryCorpus, OnDiskCorpus},
feedbacks::{MaxMapFeedback, CrashFeedback},
fuzzer::StdFuzzer,
state::StdState,
observers::StdMapObserver,
};
Once again, you need to add the necessary `use` directives for this to work properly: // Feedback to rate the interestingness of an input
let mut feedback = MaxMapFeedback::new(&observer);
```rust // A feedback to choose if an input is a solution or not
{{#rustdoc_include ../../listings/baby_fuzzer/listing-05/src/main.rs:use}} let mut objective = CrashFeedback::new();
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
&mut feedback,
&mut objective
).unwrap();
// ...
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
``` ```
## The actual fuzzing ## The actual fuzzing
Now, we can run the program, but the outcome is not so different from the previous one as the random generator does not take into account what we save as interesting in the corpus. To do that, we need to plug a Mutator. Now, after including the correct `use`, we can run the program, but the outcome is not so different from the previous one as the random generator does not take into account what we save as interesting in the corpus. To do that, we need to plug a Mutator.
**Stages** perform actions on individual inputs, taken from the corpus. **Stages** perform actions on individual inputs, taken from the corpus.
For instance, the `MutationalStage` executes the harness several times in a row, every time with mutated inputs. For instance, the `MutationalStage` executes the harness several times in a row, every time with mutated inputs.
@ -189,20 +337,28 @@ For instance, the `MutationalStage` executes the harness several times in a row,
As the last step, we create a MutationalStage that uses a mutator inspired by the havoc mutator of AFL. As the last step, we create a MutationalStage that uses a mutator inspired by the havoc mutator of AFL.
```rust,ignore ```rust,ignore
{{#rustdoc_include ../../listings/baby_fuzzer/listing-06/src/main.rs:mutational_stage}} use libafl::{
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
stages::mutational::StdMutationalStage,
fuzzer::Fuzzer,
};
// ...
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
``` ```
`fuzz_loop` will request a testcase for each iteration to the fuzzer using the scheduler and then it will invoke the stage. `fuzz_loop` will request a testcase for each iteration to the fuzzer using the scheduler and then it will invoke the stage.
Again, we need to add the new `use` directives: After adding this code, we have a proper fuzzer, that can run a find the input that panics the function in less than a second.
```rust,ignore ```text
{{#rustdoc_include ../../listings/baby_fuzzer/listing-06/src/main.rs:use}}
```
After adding this code, we have a proper fuzzer, that can run and find the input that panics the function in less than a second.
```console
$ cargo run $ cargo run
Compiling baby_fuzzer v0.1.0 (/home/andrea/Desktop/baby_fuzzer) Compiling baby_fuzzer v0.1.0 (/home/andrea/Desktop/baby_fuzzer)
Finished dev [unoptimized + debuginfo] target(s) in 1.56s Finished dev [unoptimized + debuginfo] target(s) in 1.56s

View File

@ -9,4 +9,4 @@ Examples can be found under `./fuzzer`.
| baby_fuzzer_nautilus | [nautilus](https://www.ndss-symposium.org/wp-content/uploads/2019/02/ndss2019_04A-3_Aschermann_paper.pdf) is a **coverage guided, grammar based** fuzzer| | baby_fuzzer_nautilus | [nautilus](https://www.ndss-symposium.org/wp-content/uploads/2019/02/ndss2019_04A-3_Aschermann_paper.pdf) is a **coverage guided, grammar based** fuzzer|
|baby_fuzzer_tokens| basic **token level** fuzzer with token level mutations| |baby_fuzzer_tokens| basic **token level** fuzzer with token level mutations|
|baby_fuzzer_with_forkexecutor| example for **InProcessForkExecutor**| |baby_fuzzer_with_forkexecutor| example for **InProcessForkExecutor**|
|baby_no_std|a minimalistic example how to create a libafl based fuzzer that works on **`no_std`** environments like TEEs, Kernels or on bare metal| |baby_no_std|a minimalistic example how to create a libafl based fuzzer that works on **`no_std`** environments like TEEs, Kernels or on barew metal|

View File

@ -4,8 +4,8 @@ The Corpus is where testcases are stored. We define a Testcase as an Input and a
A Corpus can store testcases in different ways, for example on disk, or in memory, or implement a cache to speedup on disk storage. A Corpus can store testcases in different ways, for example on disk, or in memory, or implement a cache to speedup on disk storage.
Usually, a testcase is added to the Corpus when it is considered as interesting, but a Corpus is used also to store testcases that fulfill an objective (like crashing the program under test for instance). Usually, a testcase is added to the Corpus when it is considered as interesting, but a Corpus is used also to store testcases that fulfill an objective (like crashing the tested program for instance).
Related to the Corpus is the way in which the next testcase (the fuzzer would ask for) is retrieved from the Corpus. The taxonomy for this handling in LibAFL is Scheduler, the entity representing the policy to pop testcases from the Corpus, in a FIFO fashion for instance. Related to the Corpus, there is the way in which the fuzzer should ask for the next testcase to fuzz picking it from the Corpus. The taxonomy for this in LibAFL is CorpusScheduler, the entity representing the policy to pop testcases from the Corpus, FIFO for instance.
Speaking about the code, [`Corpus`](https://docs.rs/libafl/latest/libafl/corpus/trait.Corpus.html) and [`Scheduler`](https://docs.rs/libafl/latest/libafl/schedulers/trait.Scheduler.html) are traits. Speaking about the code, [`Corpus`](https://docs.rs/libafl/0/libafl/corpus/trait.Corpus.html) and [`CorpusScheduler`](https://docs.rs/libafl/0/libafl/corpus/trait.CorpusScheduler.html) are traits.

View File

@ -9,9 +9,11 @@ So the Executor is for instance responsible to inform the program about the inpu
In our model, it can also hold a set of Observers connected with each execution. In our model, it can also hold a set of Observers connected with each execution.
In Rust, we bind this concept to the [`Executor`](https://docs.rs/libafl/latest/libafl/executors/trait.Executor.html) trait. A structure implementing this trait must implement [`HasObservers`](https://docs.rs/libafl/latest/libafl/executors/trait.HasObservers.html) too if wants to hold a set of Observers. In Rust, we bind this concept to the [`Executor`](https://docs.rs/libafl/0/libafl/executors/trait.Executor.html) trait. A structure implementing this trait must implement [`HasObservers`](https://docs.rs/libafl/0/libafl/executors/trait.HasObservers.html) too if wants to hold a set of Observers.
By default, we implement some commonly used Executors such as [`InProcessExecutor`](https://docs.rs/libafl/latest/libafl/executors/inprocess/type.InProcessExecutor.html) in which the target is a harness function providing in-process crash detection. Another Executor is the [`ForkserverExecutor`](https://docs.rs/libafl/latest/libafl/executors/forkserver/struct.ForkserverExecutor.html) that implements an AFL-like mechanism to spawn child processes to fuzz. By default, we implement some commonly used Executors such as [`InProcessExecutor`](https://docs.rs/libafl/0/libafl/executors/inprocess/struct.InProcessExecutor.html) in which the target is a harness function providing in-process crash detection. Another Executor is the [`ForkserverExecutor`](https://docs.rs/libafl/0/libafl/executors/forkserver/struct.ForkserverExecutor.html) that implements an AFL-like mechanism to spawn child processes to fuzz.
A common pattern when creating an Executor is wrapping an existing one, for instance [`TimeoutExecutor`](https://docs.rs/libafl/0.6.1/libafl/executors/timeout/struct.TimeoutExecutor.html) wraps an executor and install a timeout callback before calling the original run function of the wrapped executor.
## InProcessExecutor ## InProcessExecutor
Let's begin with the base case; `InProcessExecutor`. Let's begin with the base case; `InProcessExecutor`.
@ -22,7 +24,7 @@ When you want to execute the harness as fast as possible, you will most probably
One thing to note here is, when your harness is likely to have heap corruption bugs, you want to use another allocator so that corrupted heap does not affect the fuzzer itself. (For example, we adopt MiMalloc in some of our fuzzers.). Alternatively you can compile your harness with address sanitizer to make sure you can catch these heap bugs. One thing to note here is, when your harness is likely to have heap corruption bugs, you want to use another allocator so that corrupted heap does not affect the fuzzer itself. (For example, we adopt MiMalloc in some of our fuzzers.). Alternatively you can compile your harness with address sanitizer to make sure you can catch these heap bugs.
## ForkserverExecutor ## ForkserverExecutor
Next, we'll take a look at the `ForkserverExecutor`. In this case, it is `afl-cc` (from AFL/AFLplusplus) that compiles the harness code, and therefore, we can't use `EDGES_MAP` anymore. Fortunately we have [_a way_](https://github.com/AFLplusplus/AFLplusplus/blob/2e15661f184c77ac1fbb6f868c894e946cbb7f17/instrumentation/afl-compiler-rt.o.c#L270) to tell the forkserver which map to record the coverage in. Next, we'll take a look at the `ForkserverExecutor`. In this case, it is `afl-cc` (from AFLplusplus/AFLplusplus) that compiles the harness code, and therefore, we can't use `EDGES_MAP` anymore. Hopefully, we have [_a way_](https://github.com/AFLplusplus/AFLplusplus/blob/2e15661f184c77ac1fbb6f868c894e946cbb7f17/instrumentation/afl-compiler-rt.o.c#L270) to tell the forkserver which map to record the coverage.
As you can see from the forkserver example, As you can see from the forkserver example,
@ -31,7 +33,7 @@ As you can see from the forkserver example,
let mut shmem = StdShMemProvider::new().unwrap().new_shmem(MAP_SIZE).unwrap(); let mut shmem = StdShMemProvider::new().unwrap().new_shmem(MAP_SIZE).unwrap();
//let the forkserver know the shmid //let the forkserver know the shmid
shmem.write_to_env("__AFL_SHM_ID").unwrap(); shmem.write_to_env("__AFL_SHM_ID").unwrap();
let mut shmem_buf = shmem.as_slice_mut(); let mut shmem_buf = shmem.as_mut_slice();
``` ```
Here we make a shared memory region; `shmem`, and write this to environmental variable `__AFL_SHM_ID`. Then the instrumented binary, or the forkserver, finds this shared memory region (from the aforementioned env var) to record its coverage. On your fuzzer side, you can pass this shmem map to your `Observer` to obtain coverage feedbacks combined with any `Feedback`. Here we make a shared memory region; `shmem`, and write this to environmental variable `__AFL_SHM_ID`. Then the instrumented binary, or the forkserver, finds this shared memory region (from the aforementioned env var) to record its coverage. On your fuzzer side, you can pass this shmem map to your `Observer` to obtain coverage feedbacks combined with any `Feedback`.
@ -46,7 +48,7 @@ See AFL++'s [_documentation_](https://github.com/AFLplusplus/AFLplusplus/blob/st
Finally, we'll talk about the `InProcessForkExecutor`. Finally, we'll talk about the `InProcessForkExecutor`.
`InProcessForkExecutor` has only one difference from `InprocessExecutor`; It forks before running the harness and that's it. `InProcessForkExecutor` has only one difference from `InprocessExecutor`; It forks before running the harness and that's it.
But why do we want to do so? Well, under some circumstances, you may find your harness pretty unstable or your harness wreaks havoc on the global states. In this case, you want to fork it before executing the harness runs in the child process so that it doesn't break things. But why do we want to do so? well, under some circumstances, you may find your harness pretty unstable or your harness wreaks havoc on the global states. In this case, you want to fork it before executing the harness runs in the child process so that it doesn't break things.
However, we have to take care of the shared memory, it's the child process that runs the harness code and writes the coverage to the map. However, we have to take care of the shared memory, it's the child process that runs the harness code and writes the coverage to the map.
@ -57,9 +59,9 @@ On your fuzzer side, you can allocate a shared memory region and make the `EDGES
```rust,ignore ```rust,ignore
let mut shmem; let mut shmem;
unsafe{ unsafe{
shmem = StdShMemProvider::new().unwrap().new_shmem(EDGES_MAP_SIZE_IN_USE).unwrap(); shmem = StdShMemProvider::new().unwrap().new_shmem(MAX_EDGES_NUM).unwrap();
} }
let shmem_buf = shmem.as_slice_mut(); let shmem_buf = shmem.as_mut_slice();
unsafe{ unsafe{
EDGES_PTR = shmem_buf.as_ptr(); EDGES_PTR = shmem_buf.as_ptr();
} }

View File

@ -10,25 +10,17 @@ The concept of "interestingness" is abstract, but typically it is related to a n
As an example, given an Observer that reports all the sizes of memory allocations, a maximization Feedback can be used to maximize these sizes to sport pathological inputs in terms of memory consumption. As an example, given an Observer that reports all the sizes of memory allocations, a maximization Feedback can be used to maximize these sizes to sport pathological inputs in terms of memory consumption.
In terms of code, the library offers the [`Feedback`](https://docs.rs/libafl/latest/libafl/feedbacks/trait.Feedback.html) trait. In terms of code, the library offers the [`Feedback`](https://docs.rs/libafl/0/libafl/feedbacks/trait.Feedback.html) and the [`FeedbackState`](https://docs.rs/libafl/0/libafl/feedbacks/trait.FeedbackState.html) traits.
It is used to implement functors that, given the state of the observers from the last execution, tells if the execution was interesting. The first is used to implement functors that, given the state of the observers from the last execution, tells if the execution was interesting. The second is tied with `Feedback` and it is the state of the data that the feedback wants to persist in the fuzzers's state, for instance the cumulative map holding all the edges seen so far in the case of a feedback based on edge coverage.
So to speak, it reduces the observations to a boolean result of [`is_interesting`](https://docs.rs/libafl/latest/libafl/feedbacks/trait.Feedback.html#tymethod.is_interesting) - or not.
For this, a `Feedback` can store anything it wants to persist in the fuzzers's state.
This might be, for instance, the cumulative map of all edges seen so far, in the case of a feedback based on edge coverage.
This can be achieved by adding `Metadata` in [`init_state`](https://docs.rs/libafl/latest/libafl/feedbacks/trait.Feedback.html#method.init_state) and accessing it later in `is_interesting`.
`Feedback` can also add custom metadata to a newly created [`Testcase`](https://docs.rs/libafl/latest/libafl/corpus/testcase/struct.Testcase.html) using [`append_metadata`](https://docs.rs/libafl/latest/libafl/feedbacks/trait.Feedback.html#method.append_metadata).
Multiple Feedbacks can be combined into a boolean expression, considering for instance an execution as interesting if it triggers new code paths or execute in less time compared to the average execution time using [`feedback_or`](https://docs.rs/libafl/latest/libafl/macro.feedback_or.html). Multiple Feedbacks can be combined into boolean formula, considering for instance an execution as interesting if it triggers new code paths or execute in less time compared to the average execution time using [`feedback_or`](https://docs.rs/libafl/*/libafl/macro.feedback_or.html).
On top, logic operators like `feedback_or` and `feedback_and` have a `_fast` variant (e.g. `feedback_or_fast`) where the second feedback will not be evaluated, if the value of the first feedback operand already answers the `interestingness` question so as to save precious performance. On top, logic operators like `feedback_or` and `feedback_and` have a `_fast` option (`feedback_or_fast` where the second feedback will not be evaluated, if the first part already answers the `interestingness` question, to save precious performance.
Using `feedback_and_fast` in combination with [`ConstFeedback`](https://docs.rs/libafl/latest/libafl/feedbacks/enum.ConstFeedback.html#method.new), certain feedbacks can be disabled dynamically. Using `feedback_and_fast` in combination with [`ConstFeedback`](https://docs.rs/libafl/*/libafl/feedbacks/enum.ConstFeedback.html#method.new), certain feedbacks can be disabled dynamically.
## Objectives ## Objectives
While feedbacks are commonly used to decide if an [`Input`](https://docs.rs/libafl/latest/libafl/inputs/trait.Input.html) should be kept for future mutations, they serve a double-purpose, as so-called `Objective Feedbacks`. While feedbacks are commonly used to decide if an [`Input`](https://docs.rs/libafl/*/libafl/inputs/trait.Input.html) should be kept for future mutations, they serve a double-purpose, as so-called `Objective Feedbacks`.
In this case, the `interestingness` of a feedback indicates if an `Objective` has been hit. In this case, the `interestingness` of a feedback indicates, if an `Objective` has been hit.
Commonly, these objectives would be a crash or a timeout, but they can also be used to detect if specific parts of the program have been reached, for sanitization, or a differential fuzzing success. Commonly, these would be a`crash or a timeout, but they can also be used to find specific parts of the program, for sanitization, or a differential fuzzing success.
Objectives use the same trait as a normal [`Feedback`](https://docs.rs/libafl/latest/libafl/feedbacks/trait.Feedback.html) and the implementations can be used interchangeably.
The only difference is that `interesting` Objectives won't be mutated further, and are counted as `Solutions`, a successful fuzzing campaign.

View File

@ -6,4 +6,4 @@ Typically, a random generator is used to generate random inputs.
Generators are traditionally less used in Feedback-driven Fuzzing, but there are exceptions, like Nautilus, that uses a Grammar generator to create the initial corpus and a sub-tree Generator as a mutation of its grammar Mutator. Generators are traditionally less used in Feedback-driven Fuzzing, but there are exceptions, like Nautilus, that uses a Grammar generator to create the initial corpus and a sub-tree Generator as a mutation of its grammar Mutator.
In the code, [`Generator`](https://docs.rs/libafl/latest/libafl/generators/trait.Generator.html) is a trait. In the code, [`Generator`](https://docs.rs/libafl/0/libafl/generators/trait.Generator.html) is a trait.

View File

@ -6,10 +6,10 @@ In our model of an abstract fuzzer, we define the Input as the internal represen
In the straightforward case, the input of the program is a byte array and in fuzzers such as AFL we store and manipulate exactly these byte arrays. In the straightforward case, the input of the program is a byte array and in fuzzers such as AFL we store and manipulate exactly these byte arrays.
But it is not always the case. A program can expect inputs that are not linear byte arrays (e.g. a sequence of syscalls forming a use case or protocol) and the fuzzer does not represent the Input in the same way that the program consumes it. But it is not always the case. A program can expect inputs that are not byte arrays (e.g. a sequence of syscalls) and the fuzzer does not represent the Input in the same way that the program consumes it.
In case of a grammar fuzzer for instance, the Input is generally an Abstract Syntax Tree because it is a data structure that can be easily manipulated while maintaining the validity, but the program expects a byte array as input, so just before the execution, the tree is serialized to a sequence of bytes. In case of a grammar fuzzer for instance, the Input is generally an Abstract Syntax Tree because it is a data structure that can be easily manipulated while maintaining the validity, but the program expects a byte array as input, so just before the execution, the tree is serialized to a sequence of bytes.
In the Rust code, an [`Input`](https://docs.rs/libafl/latest/libafl/inputs/trait.Input.html) is a trait that can be implemented only by structures that are serializable and have only owned data as fields. In the Rust code, an [`Input`](https://docs.rs/libafl/*/libafl/inputs/trait.Input.html) is a trait that can be implemented only by structures that are serializable and have only owned data as fields.
While most fuzzers use a normal `BytesInput`, more advanced ones use inputs that include special inputs for grammar fuzzing ([GramatronInput](https://docs.rs/libafl/latest/libafl/inputs/gramatron/struct.GramatronInput.html) or `NautilusInput` on Rust nightly), as well as the token-level [EncodedInput](https://docs.rs/libafl/latest/libafl/inputs/encoded/struct.EncodedInput.html). While most fuzzer use a normal `BytesInput`], more advanced inputs like inputs include special inputs for grammar fuzzing ([GramatronInput](https://docs.rs/libafl/*/libafl/inputs/gramatron/struct.GramatronInput.html) or `NautilusInput` on nightly), as well as the token-level [EncodedInput](https://docs.rs/libafl/*/libafl/inputs/encoded/struct.EncodedInput.html).

View File

@ -1,9 +1,9 @@
# Mutator # Mutator
The Mutator is an entity that takes one or more Inputs and generates a new instance of Input derived by its inputs. The Mutator is an entity that takes one or more Inputs and generates a new derived one.
Mutators can be composed, and they are generally linked to a specific Input type. Mutators can be composed, and they are generally linked to a specific Input type.
There can be, for instance, a Mutator that applies more than a single type of mutation to the input. Consider a generic Mutator for a byte stream, bit flip is just one of the possible mutations but not the only one, there is also, for instance, the random replacement of a byte of the copy of a chunk. There can be, for instance, a Mutator that applies more than a single type of mutation on the input. Consider a generic Mutator for a byte stream, bit flip is just one of the possible mutations but not the only one, there is also, for instance, the random replacement of a byte of the copy of a chunk.
In LibAFL, [`Mutator`](https://docs.rs/libafl/latest/libafl/mutators/trait.Mutator.html) is a trait. In LibAFL, [`Mutator`](https://docs.rs/libafl/*/libafl/mutators/trait.Mutator.html) is a trait.

View File

@ -4,10 +4,10 @@ An Observer is an entity that provides an information observed during the execut
The information contained in the Observer is not preserved across executions, but it may be serialized and passed on to other nodes if an `Input` is considered `interesting`, and added to the `Corpus`. The information contained in the Observer is not preserved across executions, but it may be serialized and passed on to other nodes if an `Input` is considered `interesting`, and added to the `Corpus`.
As an example, the coverage map, filled during the execution to report the executed edges used by fuzzers such as AFL and `HonggFuzz` can be considered an observation. Another `Observer` can collect the time spent executing a run, the program output, or a more advanced observation, like maximum stack depth at runtime. As an example, the coverage map, filled during the execution to report the executed edges used by fuzzers such as AFL and `HonggFuzz` can be considered an observation. Another `Observer` can be the time spent executing a run, the program output, or more advanced observation, like maximum stack depth at runtime.
This information is an observation of a dynamic property of the program. This information is not preserved across runs, and it is an observation of a dynamic property of the program.
In terms of code, in the library this entity is described by the [`Observer`](https://docs.rs/libafl/latest/libafl/observers/trait.Observer.html) trait. In terms of code, in the library this entity is described by the [`Observer`](https://docs.rs/libafl/0/libafl/observers/trait.Observer.html) trait.
In addition to holding the volatile data connected with the last execution of the target, the structures implementing this trait can define some execution hooks that are executed before and after each fuzz case. In these hooks, the observer can modify the fuzzer's state. In addition to holding the volatile data connected with the last execution of the target, the structures implementing this trait can define some execution hooks that are executed before and after each fuzz case. In these hooks, the observer can modify the fuzzer's state.

View File

@ -1,9 +1,9 @@
# Stage # Stage
A Stage is an entity that operates on a single Input received from the Corpus. A Stage is an entity that operates on a single Input got from the Corpus.
For instance, a Mutational Stage, given an input of the corpus, applies a Mutator and executes the generated input one or more times. How many times this has to be done can be scheduled, AFL for instance uses a performance score of the input to choose how many times the havoc mutator should be invoked. This can depend also on other parameters, for instance, the length of the input if we want to just apply a sequential bitflip, or a fixed value. For instance, a Mutational Stage, given an input of the corpus, applies a Mutator and executes the generated input one or more time. How many times this has to be done can be scheduled, AFL for instance uses a performance score of the input to choose how many times the havoc mutator should be invoked. This can depend also on other parameters, for instance, the length of the input if we want to just apply a sequential bitflip, or be a fixed value.
A stage can also be an analysis stage, for instance, the Colorization stage of Redqueen that aims to introduce more entropy in a testcase or the Trimming stage of AFL that aims to reduce the size of a testcase. A stage can also be an analysis stage, for instance, the Colorization stage of Redqueen that aims to introduce more entropy in a testcase or the Trimming stage of AFL that aims to reduce the size of a testcase.
There are several stages in the LibAFL codebase implementing the [`Stage`](https://docs.rs/libafl/latest/libafl/stages/trait.Stage.html) trait. There are several stages in the LibAFL codebase implementing the [`Stage`](https://docs.rs/libafl/*/libafl/stages/trait.Stage.html) trait.

View File

@ -8,8 +8,8 @@ The LibAFL code reuse mechanism is based on components, rather than sub-classes,
Thinking about similar fuzzers, you can observe that most of the time the data structures that are modified are the ones related to testcases and the fuzzer global state. Thinking about similar fuzzers, you can observe that most of the time the data structures that are modified are the ones related to testcases and the fuzzer global state.
Beside the entities previously described, we introduce the [`Testcase`](https://docs.rs/libafl/latest/libafl/corpus/testcase/struct.Testcase.html) and [`State`](https://docs.rs/libafl/latest/libafl/state/struct.StdState.html) entities. The Testcase is a container for an Input stored in the Corpus and its metadata (so, in the implementation, the Corpus stores Testcases) and the State contains all the metadata that are evolved while running the fuzzer, Corpus included. Beside the entities previously described, we introduce the [`Testcase`](https://docs.rs/libafl/0.6/libafl/corpus/testcase/struct.Testcase.html) and [`State`](https://docs.rs/libafl/0.6/libafl/state/struct.StdState.html) entities. The Testcase is a container for an Input stored in the Corpus and its metadata (so, in the implementation, the Corpus stores Testcases) and the State contains all the metadata that are evolved while running the fuzzer, Corpus included.
The State, in the implementation, contains only owned objects that are serializable, and it is serializable itself. Some fuzzers may want to serialize their state when pausing or just, when doing in-process fuzzing, serialize on crash and deserialize in the new process to continue to fuzz with all the metadata preserved. The State, in the implementation, contains only owned objects that are serializable, and it is serializable itself. Some fuzzers may want to serialize its state when pausing or just, when doing in-process fuzzing, serialize on crash and deserialize in the new process to continue to fuzz with all the metadata preserved.
Additionally, we group the entities that are "actions", like the `CorpusScheduler` and the `Feedbacks`, in a common place, the [`Fuzzer'](https://docs.rs/libafl/latest/libafl/fuzzer/struct.StdFuzzer.html). Additionally, we group the entities that are "actions", like the `CorpusScheduler` and the `Feedbacks`, in a common place, the [`Fuzzer'](https://docs.rs/libafl/*/libafl/fuzzer/struct.StdFuzzer.html).

View File

@ -5,10 +5,10 @@ A metadata in LibAFL is a self-contained structure that holds associated data to
In terms of code, a metadata can be defined as a Rust struct registered in the SerdeAny register. In terms of code, a metadata can be defined as a Rust struct registered in the SerdeAny register.
```rust ```rust
# extern crate libafl_bolts; extern crate libafl;
# extern crate serde; extern crate serde;
use libafl_bolts::SerdeAny; use libafl::SerdeAny;
use serde::{Serialize, Deserialize}; use serde::{Serialize, Deserialize};
#[derive(Debug, Serialize, Deserialize, SerdeAny)] #[derive(Debug, Serialize, Deserialize, SerdeAny)]
@ -19,15 +19,15 @@ pub struct MyMetadata {
The struct must be static, so it cannot hold references to borrowed objects. The struct must be static, so it cannot hold references to borrowed objects.
As an alternative to `derive(SerdeAny)` which is a proc-macro in `libafl_derive` the user can use `libafl_bolts::impl_serdeany!(MyMetadata);`. As an alternative to `derive(SerdeAny)` that is a proc-macro in `libafl_derive` the user can use `libafl::impl_serdeany!(MyMetadata);`.
## Usage ## Usage
Metadata objects are primarly intended to be used inside [`SerdeAnyMap`](https://docs.rs/libafl_bolts/latest/libafl_bolts/serdeany/serdeany_registry/struct.SerdeAnyMap.html) and [`NamedSerdeAnyMap`](https://docs.rs/libafl_bolts/latest/libafl_bolts/serdeany/serdeany_registry/struct.NamedSerdeAnyMap.html). Metadata objects are primarly intended to be used inside [`SerdeAnyMap`](https://docs.rs/libafl/0.5.0/libafl/bolts/serdeany/serdeany_registry/struct.SerdeAnyMap.html) and [`NamedSerdeAnyMap`](https://docs.rs/libafl/0.5.0/libafl/bolts/serdeany/serdeany_registry/struct.NamedSerdeAnyMap.html).
With these maps, the user can retrieve instances by type (and name). Internally, the instances are stored as SerdeAny trait objects. With these maps, the user can retrieve instances by type (and name). Internally, the instances are stored as SerdeAny trait objects.
Structs that want to have a set of metadata must implement the [`HasMetadata`](https://docs.rs/libafl/latest/libafl/state/trait.HasMetadata.html) trait. Structs that want to have a set of metadata must implement the [`HasMetadata`](https://docs.rs/libafl/0.5.0/libafl/state/trait.HasMetadata.html) trait.
By default, Testcase and State implement it and hold a SerdeAnyMap testcase. By default, Testcase and State implement it and hold a SerdeAnyMap testcase.

View File

@ -1,27 +0,0 @@
# Migrating from LibAFL <0.11 to 0.11
We moved the old `libafl::bolts` module to its own crate called `libafl_bolts`.
For this, imports for types in LibAFL bolts have changed in version 0.11, everything else should remain the same.
## Reasons for This Change
With the change we can now use a lot of low-level features of LibAFL for projects that are unrelated to fuzzing, or just completely different to LibAFL.
Some cross-platform things in bolts include
* SerdeAnyMap: a map that stores and retrieves elements by type and is serializable and deserializable
* ShMem: A cross-platform (Windows, Linux, Android, MacOS) shared memory implementation
* LLMP: A fast, lock-free IPC mechanism via SharedMap
* Core_affinity: A maintained version of `core_affinity` that can be used to get core information and bind processes to cores
* Rands: Fast random number generators for fuzzing (like [RomuRand](http://www.romu-random.org/))
* MiniBSOD: get and print information about the current process state including important registers.
* Tuples: Haskel-like compile-time tuple lists
* Os: OS specific stuff like signal handling, windows exception handling, pipes, and helpers for `fork`
## What changed
You will need to move all `libafl::bolts::` imports to `libafl_bolts:::` and add the crate dependency in your Cargo.toml (and specify feature flags there).
As only exception, the `libafl::bolts::launcher::Launcher` has moved to `libafl::events::launcher::Launcher` since it has fuzzer and `EventManager` specific code.
If you are using `prelude`, you may need to also ad `libafl_bolts::prelude`.
That's it.
Enjoy using `libafl_bolts` in other projects.

View File

@ -1,9 +0,0 @@
# Migrating from <0.12 to 0.12
We deleted `TimeoutExecutor` and `TimeoutForkserverExecutor` and make it mandatory for `InProcessExecutor` and `ForkserverExecutor` to have the timeout. Now `InProcessExecutor` and `ForkserverExecutor` have the default timeout of 5 seconds.
## Reason for This Change.
In 99% of the case, it is advised to have the timeout for the fuzzer. This is because we do not want the fuzzer to stop forever just because the target has hit a path that resulted in a infinite-loop.
## What changed
You do not have to wrap the executor with `TimeoutExecutor` anymore. You can just use `InProcessExecutor::new()` to instantiate the executor with the default timeout or use `InProcessExecutor::timeout(duration)` to start the executor with the customized duration of timeout.

View File

@ -75,7 +75,7 @@ where
``` ```
The executor is constrained to `EM` and `Z`, with each of their respective states being constrained to `E`'s state. It The executor is constrained to `EM` and `Z`, with each of their respective states being constrained to `E`'s state. It
is no longer necessary to explicitly define a generic for the input type, the state type, or the generic type, as these is no longer necessary to explicitly defined a generic for the input type, the state type, or the generic type, as these
are all present as associated types for `E`. Additionally, we don't even need to specify any details about the observers are all present as associated types for `E`. Additionally, we don't even need to specify any details about the observers
(`OT` in the previous version) as the type does not need to be constrained and is not shared by other types. (`OT` in the previous version) as the type does not need to be constrained and is not shared by other types.
@ -101,7 +101,7 @@ See `fuzzers/` for examples of these changes.
If you implemented a Mutator, Executor, State, or another kind of component, you must update your implementation. The If you implemented a Mutator, Executor, State, or another kind of component, you must update your implementation. The
main changes to the API are in the use of "Uses*" for associated types. main changes to the API are in the use of "Uses*" for associated types.
In many scenarios, Input, Observer, and State generics have been moved into traits with associated types (namely, In many scenarios, Input, Observers, and State generics have been moved into traits with associated types (namely,
"UsesInput", "UsesObservers", and "UsesState". These traits are required for many existing traits now and are very "UsesInput", "UsesObservers", and "UsesState". These traits are required for many existing traits now and are very
straightforward to implement. In a majority of cases, you will have generics on your custom implementation or a fixed straightforward to implement. In a majority of cases, you will have generics on your custom implementation or a fixed
type to implement this with. Thankfully, Rust will let you know when you need to implement this type. type to implement this with. Thankfully, Rust will let you know when you need to implement this type.
@ -127,7 +127,7 @@ where
} }
``` ```
After 0.9, all `Corpus` implementations are required to implement `UsesInput`. Also `Corpus` no longer has a generic for After 0.9, all `Corpus` implementations are required to implement `UsesInput` and `Corpus` no longer has a generic for
the input type (as it is now provided by the UsesInput impl). The migrated implementation is shown below: the input type (as it is now provided by the UsesInput impl). The migrated implementation is shown below:
```rust,ignore ```rust,ignore
@ -160,26 +160,3 @@ Now, `Corpus` cannot be accidentally implemented for another type other than tha
is fixed to the associated type for `UsesInput`. is fixed to the associated type for `UsesInput`.
A more complex example of migration can be found in the "Reasons for this change" section of this document. A more complex example of migration can be found in the "Reasons for this change" section of this document.
## Observer Changes
Additionally, we changed the Observer API, as the API in 0.8 led to undefined behavior.
At the same time, we used the change to simplify the common case: creating an `StdMapObserver`
from libafl_target's `EDGES_MAP`.
In the future, instead of using:
```rust,ignore
let edges = unsafe { &mut EDGES_MAP[0..EDGES_MAP_SIZE_IN_USE] };
let edges_observer = StdMapObserver::new("edges", edges);
```
creating the edges observer is as simple as using the new `std_edges_map_observer` function.
```rust,ignore
let edges_observer = unsafe { std_edges_map_observer("edges") };
```
Alternatively, `StdMapObserver::new` will still work, but now the whole method is marked as `unsafe`.
The reason is that the caller has to make sure `EDGES_MAP` (or other maps) are not moved or freed in memory,
for the lifetime of the `MapObserver`.
This means that the buffer should either be `static` or `Pin`.

View File

@ -6,7 +6,7 @@ LibAFL, as most of the Rust projects, can be built using `cargo` from the root d
$ cargo build --release $ cargo build --release
``` ```
Note that the `--release` flag is optional for development, but you need to add it to do fuzzing at a decent speed. Note that the `--release` flag is optional for development, but you needed to add it to fuzzing at a decent speed.
Slowdowns of 10x or more are not uncommon for Debug builds. Slowdowns of 10x or more are not uncommon for Debug builds.
The LibAFL repository is composed of multiple crates. The LibAFL repository is composed of multiple crates.

View File

@ -10,7 +10,7 @@ libafl = { version = "*" }
## Crate List ## Crate List
For LibAFL, each crate has its self-contained purpose, and the user may not need to use all of them in their project. For LibAFL, each crate has its self-contained purpose, and the user may not need to use all of them in its project.
Following the naming convention of the folders in the project's root, they are: Following the naming convention of the folders in the project's root, they are:
### [`libafl`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl) ### [`libafl`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl)
@ -31,35 +31,20 @@ You can choose the features by using `features = ["feature1", "feature2", ...]`
Out of this list, by default, `std`, `derive`, and `rand_trait` are already set. Out of this list, by default, `std`, `derive`, and `rand_trait` are already set.
You can choose to disable them by setting `default-features = false` in your `Cargo.toml`. You can choose to disable them by setting `default-features = false` in your `Cargo.toml`.
### [`libafl_bolts`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_bolts) ### libafl_sugar
The `libafl_bolts` crate is a minimal tool shed filled with useful low-level rust features, not necessarily related to fuzzers.
In it, you'll find highlights like:
- `core_affinity` to bind the current process to cores
- `SerdeAnyMap` a map that can store typed values in a serializable fashion
- `minibsod` to dump the current process state
- `LLMP`, "low level message passing", a lock-free IPC mechanism
- `Rand`, different fast (non-cryptographically secure) RNG implementations like RomuRand
- `ShMem`, a platform independent shard memory implementation
- `Tuples`, a compiletime tuple implementation
... and much more.
### [`libafl_sugar`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_sugar)
The sugar crate abstracts away most of the complexity of LibAFL's API. The sugar crate abstracts away most of the complexity of LibAFL's API.
Instead of high flexibility, it aims to be high-level and easy-to-use. Instead of high flexibility, it aims to be high-level and easy-to-use.
It is not as flexible as stitching your fuzzer together from each individual component, but allows you to build a fuzzer with minimal lines of code. It is not as flexible as stitching your fuzzer together from each individual component, but allows you to build a fuzzer with minimal lines of code.
To see it in action, take a look at the [`libfuzzer_stb_image_sugar` example fuzzer](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/libfuzzer_stb_image_sugar). To see it in action, take a look at the [`libfuzzer_stb_image_sugar` example fuzzer](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/libfuzzer_stb_image_sugar).
### [`libafl_derive`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_derive) ### libafl_derive
This a proc-macro crate paired with the `libafl` crate. This a proc-macro crate paired with the `libafl` crate.
At the moment, it just exposes the `derive(SerdeAny)` macro that can be used to define Metadata structs, see the section about [Metadata](../design/metadata.md) for details. At the moment, it just exposes the `derive(SerdeAny)` macro that can be used to define Metadata structs, see the section about [Metadata](../design/metadata.md) for details.
### [`libafl_targets`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_targets) ### libafl_targets
This crate exposes code to interact with, and to instrument, targets. This crate exposes code to interact with, and to instrument, targets.
To enable and disable features at compile-time, the features are enabled and disabled using feature flags. To enable and disable features at compile-time, the features are enabled and disabled using feature flags.
@ -67,36 +52,36 @@ To enable and disable features at compile-time, the features are enabled and dis
Currently, the supported flags are: Currently, the supported flags are:
- `pcguard_edges` defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges in a map. - `pcguard_edges` defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges in a map.
- `pcguard_hitcounts` defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges with the hitcounts (like AFL) in a map. - `pcguard_hitcounts defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges with the hitcounts (like AFL) in a map.
- `libfuzzer` exposes a compatibility layer with libFuzzer style harnesses. - `libfuzzer` exposes a compatibility layer with libFuzzer style harnesses.
- `value_profile` defines the SanitizerCoverage trace-cmp hooks to track the matching bits of each comparison in a map. - `value_profile` defines the SanitizerCoverage trace-cmp hooks to track the matching bits of each comparison in a map.
### [`libafl_cc`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_cc) ### libafl_cc
This is a library that provides utils to wrap compilers and create source-level fuzzers. This is a library that provides utils wrap compilers and create source-level fuzzers.
At the moment, only the Clang compiler is supported. At the moment, only the Clang compiler is supported.
To understand it deeper, look through the tutorials and examples. To understand it deeper, look through the tutorials and examples.
### [`libafl_frida`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_frida) ### libafl_frida
This library bridges LibAFL with Frida as instrumentation backend. This library bridges LibAFL with Frida as instrumentation backend.
With this crate, you can instrument targets on Linux/macOS/Windows/Android for coverage collection. With this crate, you can instrument targets on Linux/macOS/Windows/Android for coverage collection.
Additionally, it supports CmpLog, and AddressSanitizer instrumentation and runtimes for aarch64. Additionally, it supports CmpLog, and AddressSanitizer instrumentation and runtimes for aarch64.
See further information, as well as usage instructions, [later in the book](../advanced_features/frida.md). See further information, as well as usage instructions, [later in the book](../advanced_features/frida.md).
### [`libafl_qemu`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_qemu) ### libafl_qemu
This library bridges LibAFL with QEMU user-mode to fuzz ELF cross-platform binaries. This library bridges LibAFL with QEMU user-mode to fuzz ELF cross-platform binaries.
It works on Linux and can collect edge coverage without collisions! It works on Linux and can collect edge coverage without collisions!
It also supports a wide range of hooks and instrumentation options. It also supports a wide range of hooks and instrumentation options.
### [`libafl_nyx`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_nyx) ### libafl_nyx
[Nyx](https://nyx-fuzz.com/) is a KVM-based snapshot fuzzer. `libafl_nyx` adds these capabilities to LibAFL. There is a specific section explaining usage of libafl_nyx [later in the book](../advanced_features/nyx.md). [Nyx](https://nyx-fuzz.com/) is a KVM-based snapshot fuzzer. `libafl_nyx` adds these capabilities to LibAFL. There is a specific section explaining usage of libafl_nyx [later in the book](../advanced_features/nyx.md).
### [`libafl_concolic`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_concolic) ### libafl_concolic
Concolic fuzzing is the combination of fuzzing and a symbolic execution engine. Concolic fuzzing is the combination of fuzzing and a symbolic execution engine.
This can reach greater depth than normal fuzzing, and is exposed in this crate. This can reach greater depth than normal fuzzing, and is exposed in this crate.

View File

@ -11,27 +11,27 @@ The first step is to download LibAFL and all dependencies that are not automatic
> previous command. Additionally, PowerShell-specific examples will use `>` > previous command. Additionally, PowerShell-specific examples will use `>`
> rather than `$`. > rather than `$`.
While technically you do not need to install LibAFL, but can use the version from crates.io directly, we do recommend to download or clone the GitHub version. While you technically do not need to install LibAFL, but can use the version from crates.io directly, we do recommend to download or clone the GitHub version.
This gets you the example fuzzers, additional utilities, and latest patches. This gets you the example fuzzers, additional utilities, and latest patches.
The easiest way to do this is to use `git`. The easiest way to do this is to use `git`.
```sh ```sh
$ git clone https://github.com/AFLplusplus/LibAFL.git $ git clone git@github.com:AFLplusplus/LibAFL.git
``` ```
Alternatively, on a UNIX-like machine, you can download a compressed archive and extract it with: You can alternatively, on a UNIX-like machine, download a compressed archive and extract it with:
```sh ```sh
$ wget https://github.com/AFLplusplus/LibAFL/archive/main.tar.gz wget https://github.com/AFLplusplus/LibAFL/archive/main.tar.gz
$ tar xvf main.tar.gz $ tar xvf LibAFL-main.tar.gz
$ rm main.tar.gz $ rm LibAFL-main.tar.gz
$ ls LibAFL-main # this is the extracted folder $ ls LibAFL-main # this is the extracted folder
``` ```
## Clang installation ## Clang installation
One of the external dependencies of LibAFL is the Clang C/C++ compiler. One of the external dependencies of LibAFL is the Clang C/C++ compiler.
While most of the code is written in pure Rust, we still need a C compiler because stable Rust still does not support features that some parts of LibAFL may need, such as weak linking, and LLVM builtins linking. While most of the code is in pure Rust, we still need a C compiler because stable Rust still does not support features that some parts of LibAFL may need, such as weak linking, and LLVM builtins linking.
For these parts, we use C to expose the missing functionalities to our Rust codebase. For these parts, we use C to expose the missing functionalities to our Rust codebase.
In addition, if you want to perform source-level fuzz testing of C/C++ applications, In addition, if you want to perform source-level fuzz testing of C/C++ applications,

View File

@ -4,10 +4,10 @@ Fuzzers are important tools for security researchers and developers alike.
A wide range of state-of-the-art tools like [AFL++](https://github.com/AFLplusplus/AFLplusplus), [libFuzzer](https://llvm.org/docs/LibFuzzer.html) or [honggfuzz](https://github.com/google/honggfuzz) are available to users. They do their job in a very effective way, finding thousands of bugs. A wide range of state-of-the-art tools like [AFL++](https://github.com/AFLplusplus/AFLplusplus), [libFuzzer](https://llvm.org/docs/LibFuzzer.html) or [honggfuzz](https://github.com/google/honggfuzz) are available to users. They do their job in a very effective way, finding thousands of bugs.
From the perspective of a power user, however, these tools are limited. From the perspective of a power user, however, these tools are limited.
Their designs do not treat extensibility as a first-class citizen. Their design does not treat extensibility as a first-class citizen.
Usually, a fuzzer developer can choose to either fork one of these existing tools, or to create a new fuzzer from scratch. Usually, a fuzzer developer can choose to either fork one of these existing tools, or to create a new fuzzer from scratch.
In any case, researchers end up with tons of fuzzers, all of which are incompatible with each other. In any case, researchers end up with tons of fuzzers, all of which are incompatible with each other.
Their outstanding features cannot just be combined for new projects. Their outstanding features can not just be combined for new projects.
By reinventing the wheel over and over, we may completely miss out on features that are complex to reimplement. By reinventing the wheel over and over, we may completely miss out on features that are complex to reimplement.
To tackle this issue, we created LibAFL, a library that is _not just another fuzzer_, but a collection of reusable pieces for individual fuzzers. To tackle this issue, we created LibAFL, a library that is _not just another fuzzer_, but a collection of reusable pieces for individual fuzzers.
@ -24,11 +24,11 @@ Some highlight features currently include:
This means it does not require a specific OS-dependent runtime to function. This means it does not require a specific OS-dependent runtime to function.
Define an allocator and a way to map pages, and you are good to inject LibAFL in obscure targets like embedded devices, hypervisors, or maybe even WebAssembly? Define an allocator and a way to map pages, and you are good to inject LibAFL in obscure targets like embedded devices, hypervisors, or maybe even WebAssembly?
- `adaptable`: Given years of experience fine-tuning *AFLplusplus* and our academic fuzzing background, we could incorporate recent fuzzing trends into LibAFL's design and make it future-proof. - `adaptable`: Given years of experience fine-tuning *AFLplusplus* and our academic fuzzing background, we could incorporate recent fuzzing trends into LibAFL's design and make it future-proof.
To give an example, as opposed to old-school fuzzers, a `BytesInput` is just one of the potential forms of inputs: To give an example, as opposed to old-skool fuzzers, a `BytesInput` is just one of the potential forms of inputs:
feel free to use and mutate an Abstract Syntax Tree instead, for structured fuzzing. feel free to use and mutate an Abstract Syntax Tree instead, for structured fuzzing.
- `scalable`: As part of LibAFL, we developed `Low Level Message Passing`, `LLMP` for short, which allows LibAFL to scale almost linearly over cores. That is, if you chose to use this feature - it is your fuzzer, after all. - `scalable`: As part of LibAFL, we developed `Low Level Message Passing`, `LLMP` for short, which allows LibAFL to scale almost linearly over cores. That is, if you chose to use this feature - it is your fuzzer, after all.
Scaling to multiple machines over TCP is also possible, using LLMP's `broker2broker` feature. Scaling to multiple machines over TCP is also possible, using LLMP's `broker2broker` feature.
- `fast`: We do everything we can at compile time so that the runtime overhead is as minimal as it can get. - `fast`: We do everything we can at compile time so that the runtime overhead is as minimal as it can get.
- `bring your own target`: We support binary-only modes, like (full-system) QEMU-Mode and Frida-Mode with ASan and CmpLog, as well as multiple compilation passes for sourced-based instrumentation. - `bring your own target`: We support binary-only modes, like QEMU-Mode and Frida-Mode with ASAN and CmpLog, as well as multiple compilation passes for sourced-based instrumentation.
Of course, we also support custom instrumentation, as you can see in the Python example based on Google's Atheris. Of course, we also support custom instrumentation, as you can see in the Python example based on Google's Atheris.
- `usable`: This one is on you to decide. Dig right in! - `usable`: This one is on you to decide. Dig right in!

View File

@ -1,11 +1,11 @@
# The LibAFL Fuzzing Library # The LibAFL Fuzzing Library
<img align="right" src="https://raw.githubusercontent.com/AFLplusplus/Website/main/static/libafl_logo.svg" alt="LibAFL Logo" style="width: 256px; height: auto"> <img align="right" src="https://github.com/AFLplusplus/Website/raw/master/static/logo_256x256.png" alt="AFL++ Logo">
*by Andrea Fioraldi and Dominik Maier* *by Andrea Fioraldi and Dominik Maier*
Welcome to LibAFL, the Advanced Fuzzing Library. Welcome to LibAFL, the Advanced Fuzzing Library.
This book shall be a gentle introduction to the library. This book shall be a gentle introduction into the library.
This version of the LibAFL book is coupled with the release 1.0 beta of the library. This version of the LibAFL book is coupled with the release 1.0 beta of the library.

View File

@ -3,7 +3,7 @@
Configurations for individual fuzzer nodes are relevant for multi node fuzzing. Configurations for individual fuzzer nodes are relevant for multi node fuzzing.
The chapter describes how to run nodes with different configurations The chapter describes how to run nodes with different configurations
in one fuzzing cluster. in one fuzzing cluster.
This allows, for example, a node compiled with ASan, to know that it needs to rerun new testcases for a node without ASan, while the same binary/configuration does not. This allows, for example, a node compiled with ASAN, to know that it needs to rerun new testcases for a node without ASAN, while the same binary/configuration does not.
Fuzzers with the same configuration can exchange Observers for new testcases and reuse them without rerunning the input. Fuzzers with the same configuration can exchange Observers for new testcases and reuse them without rerunning the input.
A different configuration indicates, that only the raw input can be exchanged, it must be rerun on the other node to capture relevant observations. A different configuration indicates, that only the raw input can be exchanged, it must be rerun on the other node to capture relevant observations.

View File

@ -1,8 +1,8 @@
# Message Passing # Message Passing
LibAFL offers a standard mechanism for message passing between processes and machines with a low overhead. LibAFL offers a standard mechanism for message passing over processes and machines with a low overhead.
We use message passing to inform the other connected clients/fuzzers/nodes about new testcases, metadata, and statistics about the current run. We use message passing to inform the other connected clients/fuzzers/nodes about new testcases, metadata, and statistics about the current run.
Depending on individual needs, LibAFL can also write testcase contents to disk, while still using events to notify other fuzzers, using the `CachedOnDiskCorpus` or similar. Depending on individual needs, LibAFL can also write testcase contents to disk, while still using events to notify other fuzzers, using an `OnDiskCorpus`.
In our tests, message passing scales very well to share new testcases and metadata between multiple running fuzzer instances for multi-core fuzzing. In our tests, message passing scales very well to share new testcases and metadata between multiple running fuzzer instances for multi-core fuzzing.
Specifically, it scales _a lot_ better than using memory locks on a shared corpus, and _a lot_ better than sharing the testcases via the filesystem, as AFL traditionally does. Specifically, it scales _a lot_ better than using memory locks on a shared corpus, and _a lot_ better than sharing the testcases via the filesystem, as AFL traditionally does.
@ -12,7 +12,7 @@ The `EventManager` interface is used to send Events over the wire using `Low Lev
## Low Level Message Passing (LLMP) ## Low Level Message Passing (LLMP)
LibAFL comes with a reasonably lock-free message passing mechanism that scales well across cores and, using its _broker2broker_ mechanism, even to connected machines via TCP. LibAFL comes with a reasonably lock-free message passing mechanism that scales well across cores and, using its *broker2broker* mechanism, even to connected machines via TCP.
Most example fuzzers use this mechanism, and it is the best `EventManager` if you want to fuzz on more than a single core. Most example fuzzers use this mechanism, and it is the best `EventManager` if you want to fuzz on more than a single core.
In the following, we will describe the inner workings of `LLMP`. In the following, we will describe the inner workings of `LLMP`.
@ -28,12 +28,12 @@ Shared maps, called shared memory for the sake of not colliding with Rust's `map
Each client, usually a fuzzer trying to share stats and new testcases, maps an outgoing `ShMem` map. Each client, usually a fuzzer trying to share stats and new testcases, maps an outgoing `ShMem` map.
With very few exceptions, only this client writes to this map, therefore, we do not run in race conditions and can live without locks. With very few exceptions, only this client writes to this map, therefore, we do not run in race conditions and can live without locks.
The broker reads from all client's `ShMem` maps. The broker reads from all client's `ShMem` maps.
It periodically checks all incoming client maps and then forwards new messages to its outgoing broadcast-`ShMem`, mapped by all connected clients. It checks all incoming client maps periodically and then forwards new messages to its outgoing broadcast-`ShMem`, mapped by all connected clients.
To send new messages, a client places a new message at the end of their shared memory and then updates a static field to notify the broker. To send new messages, a client places a new message at the end of their shared memory and then updates a static field to notify the broker.
Once the outgoing map is full, the sender allocates a new `ShMem` using the respective `ShMemProvider`. Once the outgoing map is full, the sender allocates a new `ShMem` using the respective `ShMemProvider`.
It then sends the information needed to map the newly-allocated page in connected processes to the old page, using an end of page (`EOP`) message. It then sends the information needed to map the newly-allocated page in connected processes to the old page, using an end of page (`EOP`) message.
Once the receiver maps the new page, it flags it as safe for unmapping by the sending process (to avoid race conditions if we have more than a single EOP in a short time), and then continues to read from the new `ShMem`. Once the receiver maps the new page, flags it as safe for unmapping from the sending process (to avoid race conditions if we have more than a single EOP in a short time), and then continues to read from the new `ShMem`.
The schema for client's maps to the broker is as follows: The schema for client's maps to the broker is as follows:
@ -54,10 +54,10 @@ After the broker received a new message from clientN, (`clientN_out->current_id
The clients periodically, for example after finishing `n` mutations, check for new incoming messages by checking if (`current_broadcast_map->current_id != last_message->message_id`). The clients periodically, for example after finishing `n` mutations, check for new incoming messages by checking if (`current_broadcast_map->current_id != last_message->message_id`).
While the broker uses the same EOP mechanism to map new `ShMem`s for its outgoing map, it never unmaps old pages. While the broker uses the same EOP mechanism to map new `ShMem`s for its outgoing map, it never unmaps old pages.
This additional memory resources serve a good purpose: by keeping all broadcast pages around, we make sure that new clients can join in on a fuzzing campaign at a later point in time. This additional memory overhead serves a good purpose: by keeping all broadcast pages around, we make sure that new clients can join in on a fuzzing campaign at a later point in time
They just need to re-read all broadcasted messages from start to finish. They just need to re-read all broadcasted messages from start to finish.
So the outgoing messages flow is like this over the outgoing broadcast `Shmem`: So the outgoing messages flow like this over the outgoing broadcast `Shmem`:
```text ```text
[broker] [broker]
@ -78,7 +78,7 @@ They are the default if using LibAFL's `Launcher`.
If you should want to use `LLMP` in its raw form, without any `LibAFL` abstractions, take a look at the `llmp_test` example in [./libafl/examples](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/examples/llmp_test/main.rs). If you should want to use `LLMP` in its raw form, without any `LibAFL` abstractions, take a look at the `llmp_test` example in [./libafl/examples](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/examples/llmp_test/main.rs).
You can run the example using `cargo run --example llmp_test` with the appropriate modes, as indicated by its help output. You can run the example using `cargo run --example llmp_test` with the appropriate modes, as indicated by its help output.
First, you will have to create a broker using `LlmpBroker::new()`. First, you will have to create a broker using `LlmpBroker::new()`.
Then, create some `LlmpClient`s in other threads and register them with the main thread using `LlmpBroker::register_client`. Then, create some `LlmpClient``s` in other threads and register them with the main thread using `LlmpBroker::register_client`.
Finally, call `LlmpBroker::loop_forever()`. Finally, call `LlmpBroker::loop_forever()`.
### B2B: Connecting Fuzzers via TCP ### B2B: Connecting Fuzzers via TCP

View File

@ -4,18 +4,18 @@ Multiple fuzzer instances can be spawned using different ways.
## Manually, via a TCP port ## Manually, via a TCP port
The straightforward way to do Multi-Threading is to use the [`LlmpRestartingEventManager`](https://docs.rs/libafl/latest/libafl/events/llmp/struct.LlmpRestartingEventManager.html), specifically to use [`setup_restarting_mgr_std`](https://docs.rs/libafl/latest/libafl/events/llmp/fn.setup_restarting_mgr_std.html). The straightforward way to do Multi-Threading is to use the `LlmpRestartingEventManager`, specifically to use `setup_restarting_mgr_std`.
It abstracts away all the pesky details about restarts on crash handling (for in-memory fuzzers) and multi-threading. It abstracts away all the pesky details about restarts on crash handling (for in-memory fuzzers) and multi-threading.
With it, every instance you launch manually tries to connect to a TCP port on the local machine. With it, every instance you launch manually tries to connect to a TCP port on the local machine.
If the port is not yet bound, this instance becomes the broker, binding itself to the port to await new clients. If the port is not yet bound, this instance becomes the broker, itself binding to the port to await new clients.
If the port is already bound, the EventManager will try to connect to it. If the port is already bound, the EventManager will try to connect to it.
The instance becomes a client and can now communicate with all other nodes. The instance becomes a client and can now communicate with all other nodes.
Launching nodes manually has the benefit that you can have multiple nodes with different configurations, such as clients fuzzing with and without `ASan`. Launching nodes manually has the benefit that you can have multiple nodes with different configurations, such as clients fuzzing with and without ASAN.
While it's called "restarting" manager, it uses `fork` on Unix-like operating systems as optimization and only actually restarts from scratch on Windows. While it's called "restarting" manager, it uses `fork` on Unix operating systems as optimization and only actually restarts from scratch on Windows.
## Automated, with Launcher ## Automated, with Launcher
@ -23,7 +23,7 @@ While it's called "restarting" manager, it uses `fork` on Unix-like operating sy
The Launcher is the lazy way to do multiprocessing. The Launcher is the lazy way to do multiprocessing.
You can use the Launcher builder to create a fuzzer that spawns multiple nodes with one click, all using restarting event managers and the same configuration. You can use the Launcher builder to create a fuzzer that spawns multiple nodes with one click, all using restarting event managers and the same configuration.
To use launcher, first you need to write an anonymous function `let mut run_client = |state: Option<_>, mut mgr, _core_id|{}`, which uses three parameters to create an individual fuzzer. Then you can specify the `shmem_provider`,`broker_port`,`monitor`,`cores` and other stuff through `Launcher::builder()`: To use launcher, first you need to write an anonymous function `let mut run_client = |state: Option<_>, mut mgr, _core_id|{}`, which uses three parameters to create individual fuzzer. Then you can specify the `shmem_provider`,`broker_port`,`monitor`,`cores` and other stuff through `Launcher::builder()`:
```rust,ignore ```rust,ignore
Launcher::builder() Launcher::builder()
@ -42,17 +42,13 @@ To use launcher, first you need to write an anonymous function `let mut run_clie
This first starts a broker, then spawns `n` clients, according to the value passed to `cores`. This first starts a broker, then spawns `n` clients, according to the value passed to `cores`.
The value is a string indicating the cores to bind to, for example, `0,2,5` or `0-3`. The value is a string indicating the cores to bind to, for example, `0,2,5` or `0-3`.
For each client, `run_client` will be called. For each client, `run_client` will be called.
If the launcher uses `fork`, it will hide child output, unless the settings indicate otherwise, or the `LIBAFL_DEBUG_OUTPUT` env variable is set. On Windows, the Launcher will restart each client, while on Unix, it will use `fork`.
On Windows, the Launcher will restart each client, while on Unix-alikes, it will use `fork`.
Advanced use-cases: Advanced use-cases:
1. To connect multiple nodes together via TCP, you can use the `remote_broker_addr`. this requires the `llmp_bind_public` compile-time feature for `LibAFL`. 1. To connect multiple nodes together via TCP, you can use the `remote_broker_addr`. this requires the `llmp_bind_public` compile-time feature for `LibAFL`.
2. To use multiple launchers for individual configurations, you can set `spawn_broker` to `false` on all instances but one. 2. To use multiple launchers for individual configurations, you can set `spawn_broker` to `false` on all but one.
3. Launcher will not select the cores automatically, so you need to specify the `cores` that you want. 3. Launcher will not select the cores automatically, so you need to specify the `cores` that you want.
4. On `Unix`, you can chose between a forking and non-forking version of Launcher by setting the `fork` feature in LibAFL. Some targets may not like forking, but it is faster than restarting processes from scratch. Windows will never fork.
5. For simple debugging, first set the `LIBAFL_DEBUG_OUTPUT` env variable to see if a child process printed anything.
6. For further debugging of fuzzer failures, it may make sense to replace `Launcher` temporarily with a [`SimpleEventManager`](https://docs.rs/libafl/latest/libafl/events/simple/struct.SimpleEventManager.html#method.new) and call your harness fn (`run_client(None, mgr, 0);`) directly, so that fuzzing runs in the same thread and is easier to debug, before moving back to `Launcher` after the bugfix.
For more examples, you can check out `qemu_launcher` and `libfuzzer_libpng_launcher` in [`./fuzzers/`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers). For more examples, you can check out `qemu_launcher` and `libfuzzer_libpng_launcher` in [`./fuzzers/`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers).

View File

@ -2,4 +2,4 @@
In this chapter, we will build a custom fuzzer using the [Lain](https://github.com/microsoft/lain) mutator in Rust. In this chapter, we will build a custom fuzzer using the [Lain](https://github.com/microsoft/lain) mutator in Rust.
This tutorial will introduce you to writing extensions to LibAFL like Feedbacks and Testcase's metadata. This tutorial will introduce you in writing extensions to LibAFL like Feedbacks and Testcase's metadata.

View File

@ -1,58 +1,30 @@
[package] [package]
name = "fret" name = "fret"
version = "0.8.2" version = "0.8.2"
authors = ["Alwin Berger <alwin.berger@tu-dortmund.de>"] authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
edition = "2021" edition = "2021"
[features] [features]
default = ["std", "snapshot_fast", "restarting", "do_hash_notify_state", "config_stg", "fuzz_int", "shortcut", "trace_job_response_times" ] default = ["std", "snapshot_restore", "singlecore", "restarting", "feed_systemtrace", "fuzz_int" ]
std = [] std = []
# Exec environemnt basics
snapshot_restore = [] snapshot_restore = []
snapshot_fast = [ "snapshot_restore" ] snapshot_fast = [ "snapshot_restore" ]
singlecore = [] singlecore = []
restarting = ['singlecore'] restarting = ['singlecore']
run_until_saturation = [] trace_abbs = []
fuzz_int = [] systemstate = []
shortcut = [] feed_systemgraph = [ "systemstate" ]
# information capture feed_systemtrace = [ "systemstate" ]
observe_edges = [] # observe cfg edges
observe_hitcounts = [ "observe_edges" ] # reduces edge granularity
observe_systemstate = []
do_hash_notify_state = []
trace_job_response_times = [ "trace_stg" ]
trace_stg = [ "observe_systemstate" ]
trace_reads = [ "trace_stg", "trace_job_response_times" ]
# feedbacks
feed_stg = [ "trace_stg", "observe_systemstate" ]
feed_stg_edge = [ "feed_stg"]
feed_stg_pathhash = [ "feed_stg"]
feed_stg_abbhash = [ "feed_stg"]
feed_stg_aggregatehash = [ "feed_stg"]
mutate_stg = [ "observe_systemstate", "trace_reads" ]
feed_longest = [ ] feed_longest = [ ]
feed_afl = [ "observe_edges" ] feed_afl = [ ]
feed_genetic = [] feed_genetic = [ ]
fuzz_int = [ ]
gensize_1 = [ ] gensize_1 = [ ]
gensize_10 = [ ] gensize_10 = [ ]
gensize_100 = [ ] gensize_100 = [ ]
gensize_1000 = [ ] observer_hitcounts = []
# schedulers no_hash_state = []
sched_genetic = [] run_until_saturation = []
sched_afl = []
sched_stg = []
sched_stg_edge = ['sched_stg'] # every edge in the stg
sched_stg_pathhash = ['sched_stg'] # every path in the stg
sched_stg_abbhash = ['sched_stg'] # every path of abbs
sched_stg_aggregatehash = ['sched_stg'] # every aggregated path (order independent)
# overall_configs
config_genetic = ["gensize_100","feed_genetic","sched_genetic","trace_stg"]
config_afl = ["feed_afl","sched_afl","trace_stg"]
config_frafl = ["feed_afl","sched_afl","feed_longest","trace_stg"]
config_stg = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg"]
# config_stg_aggregate = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg"]
config_stg_abbpath = ["feed_stg_abbhash","sched_stg_abbhash","mutate_stg"]
config_stg_edge = ["feed_stg_edge","sched_stg_edge","mutate_stg"]
[profile.release] [profile.release]
lto = true lto = true
@ -60,17 +32,10 @@ codegen-units = 1
debug = true debug = true
[dependencies] [dependencies]
libafl = { path = "../../libafl/", features = ["multipart_inputs"] } libafl = { path = "../../libafl/" }
libafl_bolts = { path = "../../libafl_bolts/" }
libafl_qemu = { path = "../../libafl_qemu/", features = ["arm", "systemmode"] } libafl_qemu = { path = "../../libafl_qemu/", features = ["arm", "systemmode"] }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
serde_json = { version = "1.0", default-features = false, features = ["alloc"] } hashbrown = { version = "0.12", features = ["serde", "ahash-compile-time-rng"] } # A faster hashmap, nostd compatible
hashbrown = { version = "0.14.0", features = ["serde"] } # A faster hashmap, nostd compatible petgraph = { version="0.6.0", features = ["serde-1"] }
petgraph = { version="0.6.5", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps ron = "0.7" # write serialized data - including hashmaps
rand = "0.5" rand = "0.5"
clap = { version = "4.4.11", features = ["derive"] }
csv = "1.3.0"
log = "0.4"
simple_moving_average = "1.0.2"
itertools = "0.13.0"

View File

@ -1,187 +1,218 @@
import csv import csv
import os import os
def_flags="--release --no-default-features --features std,snapshot_fast,restarting,do_hash_notify_state,fuzz_int,trace_job_response_times" def_flags="--no-default-features --features std,snapshot_restore,singlecore,restarting,run_until_saturation"
remote="remote/" remote="timedump_253048_1873f6_all/"
RUNTIME=86400 RUNTIME=10
NUM_ITERS=12 TARGET_REPS_A=2
TARGET_REPS_B=2
rule build_default: NUM_NODES=2
input: REP_PER_NODE_A=int(TARGET_REPS_A/NUM_NODES)
"../Cargo.toml", REP_PER_NODE_B=int(TARGET_REPS_B/NUM_NODES)
"../src" NODE_ID= 0 if os.getenv('NODE_ID') == None else int(os.environ['NODE_ID'])
output: MY_RANGE_A=range(NODE_ID*REP_PER_NODE_A,(NODE_ID+1)*REP_PER_NODE_A)
directory("bins/target_default") MY_RANGE_B=range(NODE_ID*REP_PER_NODE_B,(NODE_ID+1)*REP_PER_NODE_B)
shell:
"cargo build --target-dir {output} {def_flags}"
rule build_showmap: rule build_showmap:
input:
"bins/target_default"
output: output:
directory("bins/target_showmap") directory("bins/target_showmap")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_stg" "cargo build --target-dir {output} {def_flags},systemstate"
rule build_random: rule build_random:
input:
"bins/target_default"
output: output:
directory("bins/target_random") directory("bins/target_random")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},feed_longest" "cargo build --target-dir {output} {def_flags},feed_longest"
rule build_feedlongest:
output:
directory("bins/target_feedlongest")
shell:
"cargo build --target-dir {output} {def_flags},feed_longest"
rule build_frafl: rule build_frafl:
input:
"bins/target_default"
output: output:
directory("bins/target_frafl") directory("bins/target_frafl")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_frafl,feed_longest" "cargo build --target-dir {output} {def_flags},feed_afl,feed_longest"
rule build_afl: rule build_afl:
input:
"bins/target_default"
output: output:
directory("bins/target_afl") directory("bins/target_afl")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_afl" "cargo build --target-dir {output} {def_flags},feed_afl,observer_hitcounts"
rule build_stg: rule build_state:
input:
"bins/target_default"
output: output:
directory("bins/target_stg") directory("bins/target_state")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_stg" "cargo build --target-dir {output} {def_flags},feed_systemtrace"
rule build_stg_abbpath: rule build_nohashstate:
input:
"bins/target_default"
output: output:
directory("bins/target_stg_abbpath") directory("bins/target_nohashstate")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_stg_abbpath" "cargo build --target-dir {output} {def_flags},feed_systemtrace,no_hash_state"
rule build_stg_edge: rule build_graph:
input:
"bins/target_default"
output: output:
directory("bins/target_stg_edge") directory("bins/target_graph")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_stg_edge" "cargo build --target-dir {output} {def_flags},feed_systemgraph"
rule build_showmap_int:
output:
directory("bins/target_showmap_int")
shell:
"cargo build --target-dir {output} {def_flags},systemstate,fuzz_int"
rule build_random_int:
output:
directory("bins/target_random_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_longest,fuzz_int"
rule build_state_int:
output:
directory("bins/target_state_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace,fuzz_int"
rule build_nohashstate_int:
output:
directory("bins/target_nohashstate_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace,fuzz_int,no_hash_state"
rule build_frafl_int:
output:
directory("bins/target_frafl_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_afl,feed_longest,fuzz_int"
rule build_afl_int:
output:
directory("bins/target_afl_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_afl,fuzz_int,observer_hitcounts"
rule build_feedlongest_int:
output:
directory("bins/target_feedlongest_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_longest,fuzz_int"
rule build_feedgeneration1: rule build_feedgeneration1:
input:
"bins/target_default"
output: output:
directory("bins/target_feedgeneration1") directory("bins/target_feedgeneration1")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},feed_genetic,gensize_1" "cargo build --target-dir {output} {def_flags},feed_genetic,gensize_1"
rule build_feedgeneration1_int:
output:
directory("bins/target_feedgeneration1_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_1"
rule build_feedgeneration10: rule build_feedgeneration10:
input:
"bins/target_default"
output: output:
directory("bins/target_feedgeneration10") directory("bins/target_feedgeneration10")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},feed_genetic,gensize_10" "cargo build --target-dir {output} {def_flags},feed_genetic,gensize_10"
rule build_feedgeneration10_int:
output:
directory("bins/target_feedgeneration10_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_10"
rule build_feedgeneration100: rule build_feedgeneration100:
input:
"bins/target_default"
output: output:
directory("bins/target_feedgeneration100") directory("bins/target_feedgeneration100")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_genetic,gensize_100" "cargo build --target-dir {output} {def_flags},feed_genetic,gensize_100"
rule build_genetic100: rule build_feedgeneration100_int:
input:
"bins/target_default"
output: output:
directory("bins/target_genetic100") directory("bins/target_feedgeneration100_int")
shell: shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_genetic,mutate_stg,gensize_100" "cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_100"
rule build_feedgeneration1000:
input:
"bins/target_default"
output:
directory("bins/target_feedgeneration1000")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_genetic,gensize_1000"
rule build_genetic1000:
input:
"bins/target_default"
output:
directory("bins/target_genetic1000")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_genetic,mutate_stg,gensize_1000"
rule run_bench: rule run_bench:
input: input:
"build/{target}.elf", "build/{target}.elf",
"bins/target_{fuzzer}" "bins/target_{fuzzer}"
output: output:
multiext("timedump/{fuzzer}/{target}#{num}", ".time", ".log") # , ".case" multiext("timedump/{fuzzer}/{target}.{num}", "", ".log") # , ".case"
run: run:
with open('target_symbols.csv') as csvfile: with open('target_symbols.csv') as csvfile:
reader = csv.DictReader(csvfile) reader = csv.DictReader(csvfile)
line = next((x for x in reader if x['\ufeffkernel']==wildcards.target), None) line = next((x for x in reader if x['kernel']==wildcards.target), None)
if line == None: if line == None:
return False return False
kernel=line['\ufeffkernel'] kernel=line['kernel']
fuzz_main=line['main_function'] fuzz_main=line['main_function']
fuzz_input=line['input_symbol'] fuzz_input=line['input_symbol']
fuzz_len=line['input_size'] fuzz_len=line['input_size']
bkp=line['return_function'] bkp=line['return_function']
select_task=line['select_task'] script="""
mkdir -p $(dirname {output[0]})
export KERNEL=$(pwd)/{input[0]}
export FUZZ_MAIN={fuzz_main}
export FUZZ_INPUT={fuzz_input}
export FUZZ_INPUT_LEN={fuzz_len}
export BREAKPOINT={bkp}
export SEED_RANDOM={wildcards.num}
export TIME_DUMP=$(pwd)/{output[0]}
export CASE_DUMP=$(pwd)/{output[0]}.case
export TRACE_DUMP=$(pwd)/{output[0]}.trace
export FUZZ_ITERS={RUNTIME}
export FUZZER=$(pwd)/{input[1]}/debug/fret
set +e
../fuzzer.sh > {output[1]} 2>&1
exit 0
"""
if wildcards.fuzzer.find('random') >= 0: if wildcards.fuzzer.find('random') >= 0:
script=""" script="export FUZZ_RANDOM={output[1]}\n"+script
export RUST_BACKTRACE=1
mkdir -p $(dirname {output[0]})
set +e
echo $(pwd)/{input[1]}/release/fret -n $(pwd)/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -s {select_task} -t -a -g -k {input[0]} -c ./target_symbols.csv fuzz --random -t {RUNTIME} -s {wildcards.num}
$(pwd)/{input[1]}/release/fret -n $(pwd)/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -s {select_task} -t -a -g -k {input[0]} -c ./target_symbols.csv fuzz --random -t {RUNTIME} -s {wildcards.num} > {output[1]} 2>&1
exit 0
"""
else:
script="""
export RUST_BACKTRACE=1
mkdir -p $(dirname {output[0]})
set +e
echo $(pwd)/{input[1]}/release/fret -n $(pwd)/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -s {select_task} -t -a -g -k {input[0]} -c ./target_symbols.csv fuzz -t {RUNTIME} -s {wildcards.num}
$(pwd)/{input[1]}/release/fret -n $(pwd)/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -s {select_task} -t -a -g -k {input[0]} -c ./target_symbols.csv fuzz -t {RUNTIME} -s {wildcards.num} > {output[1]} 2>&1
exit 0
"""
shell(script) shell(script)
rule run_showmap: rule run_showmap:
input: input:
"{remote}build/{target}.elf", "{remote}build/{target}.elf",
"bins/target_showmap", "bins/target_showmap",
"{remote}timedump/{fuzzer}/{target}#{num}.case" "bins/target_showmap_int",
"{remote}timedump/{fuzzer}/{target}.{num}.case"
output: output:
"{remote}timedump/{fuzzer}/{target}#{num}_case.trace.ron", "{remote}timedump/{fuzzer}/{target}.{num}.trace.ron",
"{remote}timedump/{fuzzer}/{target}#{num}_case.time", "{remote}timedump/{fuzzer}/{target}.{num}.case.time",
run: run:
with open('target_symbols.csv') as csvfile: with open('target_symbols.csv') as csvfile:
reader = csv.DictReader(csvfile) reader = csv.DictReader(csvfile)
line = next((x for x in reader if x['\ufeffkernel']==wildcards.target), None) line = next((x for x in reader if x['kernel']==wildcards.target), None)
if line == None: if line == None:
return False return False
kernel=line['\ufeffkernel'] kernel=line['kernel']
fuzz_main=line['main_function'] fuzz_main=line['main_function']
fuzz_input=line['input_symbol'] fuzz_input=line['input_symbol']
fuzz_len=line['input_size'] fuzz_len=line['input_size']
bkp=line['return_function'] bkp=line['return_function']
select_task=line['select_task'] script=""
script=""" if wildcards.fuzzer.find('_int') > -1:
export FUZZER=$(pwd)/{input[1]}/release/fret script="export FUZZER=$(pwd)/{input[2]}/debug/fret\n"
else:
script="export FUZZER=$(pwd)/{input[1]}/debug/fret\n"
script+="""
mkdir -p $(dirname {output}) mkdir -p $(dirname {output})
export KERNEL=$(pwd)/{input[0]}
export FUZZ_MAIN={fuzz_main}
export FUZZ_INPUT={fuzz_input}
export FUZZ_INPUT_LEN={fuzz_len}
export BREAKPOINT={bkp}
export TRACE_DUMP=$(pwd)/{output[0]}
export DO_SHOWMAP=$(pwd)/{input[3]}
export TIME_DUMP=$(pwd)/{output[1]}
set +e set +e
echo $FUZZER -n $(pwd)/{remote}/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num}_case -s {select_task} -t -a -r -g -k {input[0]} -c ./target_symbols.csv showmap -i {input[2]} ../fuzzer.sh
$FUZZER -n $(pwd)/{remote}/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num}_case -s {select_task} -t -a -r -g -k {input[0]} -c ./target_symbols.csv showmap -i {input[2]}
exit 0 exit 0
""" """
if wildcards.fuzzer.find('random') >= 0: if wildcards.fuzzer.find('random') >= 0:
@ -190,78 +221,61 @@ rule run_showmap:
rule tarnsform_trace: rule tarnsform_trace:
input: input:
"{remote}timedump/{fuzzer}/{target}#{num}_case.trace.ron", "{remote}timedump/{fuzzer}/{target}.{num}.trace.ron"
output: output:
"{remote}timedump/{fuzzer}/{target}#{num}_case.jobs.csv", "{remote}timedump/{fuzzer}/{target}.{num}.trace.csv"
"{remote}timedump/{fuzzer}/{target}#{num}_case.resp.csv", shell:
"{remote}timedump/{fuzzer}/{target}#{num}_case.abbs.csv" "$(pwd)/../../../../state2gantt/target/debug/state2gantt {input} > {output[0]}"
run:
with open('target_symbols.csv') as csvfile:
reader = csv.DictReader(csvfile)
line = next((x for x in reader if x['\ufeffkernel']==wildcards.target), None)
if line == None:
return False
kernel=line['\ufeffkernel']
fuzz_main=line['main_function']
fuzz_input=line['input_symbol']
fuzz_len=line['input_size']
bkp=line['return_function']
select_task=line['select_task']
script="""
echo $(pwd)/../../../../state2gantt/target/debug/state2gantt -i {input} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task}
$(pwd)/../../../../state2gantt/target/debug/state2gantt -i {input} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task}
"""
shell(script)
rule trace2gantt: rule trace2gantt:
input: input:
"{remote}timedump/{fuzzer}/{target}#{num}_case.jobs.csv", "{remote}timedump/{fuzzer}/{target}.{num}.trace.csv"
"{remote}timedump/{fuzzer}/{target}#{num}_case.resp.csv"
output: output:
"{remote}timedump/{fuzzer}/{target}#{num}_case.jobs.html", "{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png"
shell: shell:
"Rscript $(pwd)/../../../../state2gantt/plot_response.r {input[0]} {input[1]} html" "Rscript --vanilla $(pwd)/../../../../state2gantt/gantt.R {input}"
rule quicktest: rule all_main:
input: input:
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['feedgeneration100', 'stg'], target=['release', 'waters', 'copter'], variant=['_full'], num=range(0,int( NUM_ITERS ))), expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random','afl','feedgeneration10','state'], target=['waters','watersv2'],num=range(0,3))
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['feedgeneration100', 'stg'], target=['waters'], variant=['_bytes', '_int'], num=range(0,int( NUM_ITERS ))),
#expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['genetic100', 'frafl'], target=['release', 'waters', 'copter'], variant=['_full'], num=range(0,int( NUM_ITERS ))),
#expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['genetic100', 'frafl'], target=['waters'], variant=['_bytes', '_int'], num=range(0,int( NUM_ITERS ))),
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['random'], target=['release', 'waters', 'copter'], variant=['_full'], num=range(0,int( 1 ))),
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['random'], target=['waters'], variant=['_bytes', '_int'], num=range(0,int( 1 ))),
rule critical_set: rule all_main_int:
input: input:
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['feedgeneration100', 'stg'], target=['release', 'waters', 'copter'], variant=['_seq_full'], num=range(0,int( 10 ))), expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random_int','afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=range(0,4))
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['random'], target=['release', 'waters', 'copter'], variant=['_seq_full'], num=range(0,int( 1 ))),
rule extended_set: rule all_compare_feedgeneration:
input: input:
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['feedgeneration100', 'stg'], target=['release', 'waters', 'copter'], variant=['_seq_full'], num=range(0,int( 10 ))), expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1','feedgeneration10','feedgeneration100'], target=['waters_int','watersv2'],num=range(0,10))
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['feedgeneration100', 'stg'], target=['waters'], variant=['_seq_int','_seq_bytes'], num=range(0,int( 10 ))),
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['feedgeneration100', 'stg'], target=['copter'], variant=['_seq_bytes'], num=range(0,int( 10 ))),
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['random'], target=['copter', 'release', 'waters'], variant=['_seq_full'], num=range(0,int( 1 ))),
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['random'], target=['copter', 'waters'], variant=['_seq_full','_seq_int','_seq_bytes'], num=range(0,int( 1 ))),
rule emergency_copter: rule all_compare_feedgeneration_int:
input: input:
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['feedgeneration100', 'stg', 'frafl'], target=['copter'], variant=['_seq_stateless_full'], num=range(0,int( 10 ))), expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1_int','feedgeneration10_int','feedgeneration100_int'], target=['waters_int','watersv2_int'],num=range(0,10))
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['feedgeneration100', 'stg', 'frafl'], target=['copter'], variant=['_seq_full'], num=range(0,int( 10 ))),
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['random'], target=['copter'], variant=['_seq_full'], num=range(0,int( 10 ))),
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['random'], target=['copter'], variant=['_seq_stateless_full'], num=range(0,int( 10 ))),
rule full_set: rule all_compare_afl:
input: input:
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=[ 'frafl'], target=['release', 'waters', 'copter'], variant=['_seq_full'], num=range(0,int( 10 ))), expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl','frafl','feedlongest'], target=['waters','watersv2'],num=range(0,10))
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['frafl'], target=['release', 'waters'], variant=['_seq_int'], num=range(0,int( 10 ))),
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['frafl'], target=['waters', 'copter'], variant=['_seq_bytes'], num=range(0,int( 10 ))), rule all_compare_afl_int:
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['frafl'], target=['copter'], variant=['_seq_int'], num=range(0,int( 10 ))), input:
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['afl'], target=['release', 'waters', 'copter'], variant=['_seq_full'], num=range(0,int( 10 ))), expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl_int','frafl_int','feedlongest_int'], target=['waters_int','watersv2_int'],num=range(0,10))
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['afl'], target=['copter'], variant=['_seq_int'], num=range(0,int( 10 ))),
expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['afl'], target=['copter'], variant=['_seq_bytes'], num=range(0,int( 8 ))), rule all_images:
#expand("timedump/{fuzzer}/{target}{variant}#{num}.time", fuzzer=['feedgeneration100', 'stg', 'random', 'frafl'], target=['release'], variant=['_seq_bytes'], num=range(0,int( 10 ))), input:
expand("{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png",remote=remote, fuzzer=['afl','feedgeneration10','state'], target=['waters','watersv2'],num=range(0,3))
rule all_images_int:
input:
expand("{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png",remote=remote, fuzzer=['afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=range(0,3))
rule clusterfuzz:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random','afl','feedgeneration10','state'], target=['waters','watersv2'],num=MY_RANGE_A),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random_int','afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_A),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1','feedgeneration10','feedgeneration100'], target=['waters_int','watersv2'],num=MY_RANGE_B),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1_int','feedgeneration10_int','feedgeneration100_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_B),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl','frafl','feedlongest'], target=['waters','watersv2'],num=MY_RANGE_B),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl_int','frafl_int','feedlongest_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_B),
rule all_bins: rule all_bins:
input: input:
expand("bins/target_{target}",target=['random','frafl','stg','feedgeneration100','feedgeneration1000','genetic100','genetic1000']) expand("bins/target_{target}{flag}",target=['random','afl','frafl','state','feedgeneration100'],flag=['','_int'])

View File

@ -1,15 +0,0 @@
def_flags="--no-default-features --features std,snapshot_fast,restarting,do_hash_notify_state,trace_job_response_times,fuzz_int"
set -e
cargo build --target-dir ./bins/target_showmap ${def_flags},config_stg
cargo build --target-dir ./bins/target_random ${def_flags},feed_longest
cargo build --target-dir ./bins/target_frafl ${def_flags},config_frafl,feed_longest
cargo build --target-dir ./bins/target_afl ${def_flags},config_afl,observe_hitcounts
cargo build --target-dir ./bins/target_stg ${def_flags},config_stg
cargo build --target-dir ./bins/target_stgpath ${def_flags},feed_stg_abbhash,sched_stg_abbhash,mutate_stg
cargo build --target-dir ./bins/target_feedgeneration1 ${def_flags},feed_genetic,gensize_1
cargo build --target-dir ./bins/target_feedgeneration10 ${def_flags},feed_genetic,gensize_10
cargo build --target-dir ./bins/target_feedgeneration100 ${def_flags},feed_genetic,gensize_100
cargo build --target-dir ./bins/target_feedgeneration1000 ${def_flags},feed_genetic,gensize_1000
cargo build --target-dir ./bins/target_genetic100 ${def_flags},feed_genetic,mutate_stg,gensize_100
cargo build --target-dir ./bins/target_genetic1000 ${def_flags},feed_genetic,mutate_stg,gensize_1000

View File

@ -1,53 +0,0 @@
# Sequential inputs!
export PARTITION_INPUT=0
build () {
make -C ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC clean && make -C ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC $1=1 FUZZ_INT_ACTIVATION=$FUZZ_INT_ACTIVATION FUZZ_BYTES=$FUZZ_BYTES
cp ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC/build/RTOSDemo.axf build/$(echo $1 | cut -d_ -f1 | tr '[:upper:]' '[:lower:]')$2.elf
}
export DELETE_RNG_STATE=1
# Only bytes
export FUZZ_INT_ACTIVATION=0 FUZZ_BYTES=1 SUFFIX="_seq_bytes"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build INTERACT_DEMO $SUFFIX
# Only interrupts
export FUZZ_INT_ACTIVATION=1 FUZZ_BYTES=0 SUFFIX="_seq_int"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build INTERACT_DEMO $SUFFIX
# Full
export FUZZ_INT_ACTIVATION=1 FUZZ_BYTES=1 SUFFIX="_seq_full"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build INTERACT_DEMO $SUFFIX
# Don't keep rng states
export DELETE_RNG_STATE=1
export FUZZ_INT_ACTIVATION=0 FUZZ_BYTES=1 SUFFIX="_seq_stateless_bytes"
build COPTER_DEMO $SUFFIX
export FUZZ_INT_ACTIVATION=1 FUZZ_BYTES=0 SUFFIX="_seq_stateless_int"
build COPTER_DEMO $SUFFIX
export FUZZ_INT_ACTIVATION=1 FUZZ_BYTES=1 SUFFIX="_seq_stateless_full"
build COPTER_DEMO $SUFFIX
# Keep rng states
export DELETE_RNG_STATE=0
export FUZZ_INT_ACTIVATION=0 FUZZ_BYTES=1 SUFFIX="_seq_bytes"
build COPTER_DEMO $SUFFIX
export FUZZ_INT_ACTIVATION=1 FUZZ_BYTES=0 SUFFIX="_seq_int"
build COPTER_DEMO $SUFFIX
export FUZZ_INT_ACTIVATION=1 FUZZ_BYTES=1 SUFFIX="_seq_full"
build COPTER_DEMO $SUFFIX

View File

@ -1,8 +0,0 @@
#!/usr/bin/env bash
find $1 -type 'f' -iname "${2}#*.log" | while IFS="" read -r p || [ -n "$p" ]
do
LINE=$(tail -n 100 $p | grep -io "run time: .* corpus: [0-9]*" | tail -n 1)
echo $p: $LINE
LINE=$(grep -i "interesting corpus elements" $p | tail -n 1)
echo $p: $LINE
done

View File

@ -1,30 +0,0 @@
plot () {
[ ! -f ~/code/FRET/LibAFL/fuzzers/FRET/benchmark/remote/${1}${2}_all.png ] && Rscript plot_multi.r remote ${1}${2} ~/code/FRET/LibAFL/fuzzers/FRET/benchmark/remote
}
# Only bytes
export SUFFIX="_seq_bytes"
plot waters $SUFFIX
plot release $SUFFIX
plot copter $SUFFIX
plot interact $SUFFIX
# Only interrupts
export SUFFIX="_seq_int"
plot waters $SUFFIX
plot release $SUFFIX
plot copter $SUFFIX
plot interact $SUFFIX
# Full
export SUFFIX="_seq_full"
plot waters $SUFFIX
plot release $SUFFIX
plot copter $SUFFIX
plot interact $SUFFIX

View File

@ -1,20 +0,0 @@
get_max_nodecount () {
rm -f sizecomp && for sizefile in remote/timedump/**/$1*.stgsize;do echo "$(tail -n 1 $sizefile),${sizefile}" >> sizecomp; done; sort -n sizecomp | tail -n 1
}
get_largest_files () {
T=$(get_max_nodecount $1)
echo $T | cut -d',' -f6
}
perform () {
T=$(get_max_nodecount $1)
echo $T | cut -d',' -f6
echo $T | cut -d',' -f6 | xargs -I {} ./plot_stgsize.r {}
mv "$(echo $T | cut -d',' -f6 | xargs -I {} basename -s .stgsize {})_nodes.png" $1_nodes.png
}
perform copter
perform release
perform waters
./plot_stgsize_multi.r $(get_largest_files copter) $(get_largest_files release) $(get_largest_files waters)

View File

@ -1,28 +0,0 @@
#!/usr/bin/env bash
declare -a PLOTS
COUNT=0
while IFS="" read -r p || [ -n "$p" ];
do
if [[ -z "$p" ]]; then
continue
fi
N="$(dirname "$p")/$(basename -s .case "$p")"
T="${N}_case.trace.ron"
P="${N}_case"
H="${N}_case.jobs.html"
echo "$COUNT $p -> $H"
IFS=" "
# PLOTS+=("$H")
PLOTS[$COUNT]="$H"
COUNT=$((COUNT+1))
# if [ ! -f "$T" ]; then
# snakemake -c1 "$T"
# fi
# if [ ! -f "$P.html" ]; then
# ~/code/FRET/state2gantt/driver.sh "$T"
# fi
done < <(find ./remote/timedump -maxdepth 2 -type 'f' -iregex '.*\.case')
# echo "${PLOTS[@]}"
snakemake -c 6 --keep-incomplete "${PLOTS[@]}"

View File

@ -11,11 +11,8 @@ registerDoParallel(cl)
args = commandArgs(trailingOnly=TRUE) args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) { if (length(args)==0) {
runtype="remote" runtype="timedump_253048_1873f6_all/timedump"
#target="waters" target="waters_int"
target="watersv2"
#target="waters_int"
#target="watersv2_int"
outputpath="~/code/FRET/LibAFL/fuzzers/FRET/benchmark/" outputpath="~/code/FRET/LibAFL/fuzzers/FRET/benchmark/"
#MY_SELECTION <- c('state', 'afl', 'graph', 'random') #MY_SELECTION <- c('state', 'afl', 'graph', 'random')
SAVE_FILE=TRUE SAVE_FILE=TRUE
@ -23,15 +20,10 @@ if (length(args)==0) {
runtype=args[1] runtype=args[1]
target=args[2] target=args[2]
outputpath=args[3] outputpath=args[3]
#MY_SELECTION <- args[4:length(args)] MY_SELECTION <- args[4:length(args)]
#if (length(MY_SELECTION) == 0)
# MY_SELECTION<-NULL
SAVE_FILE=TRUE SAVE_FILE=TRUE
print(runtype)
print(target)
print(outputpath)
} }
worst_cases <- list(waters=0, waters_int=0, tmr=405669, micro_longint=0, gen3=0) worst_cases <- list(waters=0, waters_int=0, tmr=405669, micro_longint=0)
worst_case <- worst_cases[[target]] worst_case <- worst_cases[[target]]
if (is.null(worst_case)) { if (is.null(worst_case)) {
worst_case = 0 worst_case = 0
@ -41,12 +33,12 @@ if (is.null(worst_case)) {
MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown") MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
BENCHDIR=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s",runtype) BENCHDIR=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s",runtype)
BASENAMES=Filter(function(x) x!="" && substr(x,1,1)!='.',list.dirs(BENCHDIR,full.names=FALSE)) BASENAMES=Filter(function(x) x!="" && substr(x,1,1)!='.',list.dirs(BENCHDIR,full.names=FALSE))
PATTERNS="%s#[0-9]*.time$" PATTERNS="%s.[0-9]*$"
#RIBBON='sd' #RIBBON='sd'
#RIBBON='span' #RIBBON='span'
RIBBON='both' RIBBON='both'
DRAW_WC = worst_case > 0 DRAW_WC = worst_case > 0
LEGEND_POS="bottomright" LEGEND_POS="topright"
#LEGEND_POS="bottomright" #LEGEND_POS="bottomright"
CONTINUE_LINE_TO_END=FALSE CONTINUE_LINE_TO_END=FALSE
@ -226,11 +218,11 @@ if (length(typenames) == 0) {return()}
h_ = 500 h_ = 500
w_ = h_*4/3 w_ = h_*4/3
if (SAVE_FILE) {png(file=sprintf("%s/%s_%s.png",outputpath,target,filename), width=w_, height=h_)} if (SAVE_FILE) {png(file=sprintf("%s%s_%s.png",outputpath,target,filename), width=w_, height=h_)}
par(mar=c(4,4,1,1)) par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0)) par(oma=c(0,0,0,0))
plot(c(0,max(one_frame['time'])),c(ylow,yhigh), col='white', xlab="Time [h]", ylab="WORT [insn]", pch='.') plot(c(1,max(one_frame['time'])),c(ylow,yhigh), col='white', xlab="Time [h]", ylab="WORT [insn]", pch='.')
for (t in seq_len(length(typenames))) { for (t in seq_len(length(typenames))) {
#proj = one_frame[seq(1, dim(one_frame)[1], by=max(1, length(one_frame[[1]])/(10*w_))),] #proj = one_frame[seq(1, dim(one_frame)[1], by=max(1, length(one_frame[[1]])/(10*w_))),]
@ -296,7 +288,7 @@ if (DRAW_WC) {
lines(c(0,length(one_frame[[1]])),y=c(worst_case,worst_case), lty='dotted') lines(c(0,length(one_frame[[1]])),y=c(worst_case,worst_case), lty='dotted')
leglines=c(typenames, 'worst observed') leglines=c(typenames, 'worst observed')
} }
legend(LEGEND_POS, legend=leglines,#"bottomright", legend(LEGEND_POS, legend=leglines,#"topleft"
col=c(MY_COLORS_[1:length(typenames)],"black"), col=c(MY_COLORS_[1:length(typenames)],"black"),
lty=c(rep("solid",length(typenames)),"dotted")) lty=c(rep("solid",length(typenames)),"dotted"))
@ -309,7 +301,7 @@ par(oma=c(0,0,0,0))
#RIBBON='both' #RIBBON='both'
#MY_SELECTION = c('state_int','generation100_int') #MY_SELECTION = c('state_int','generation100_int')
#MY_SELECTION = c('state','frafl') #MY_SELECTION = c('state_int','frafl_int')
if (exists("MY_SELECTION")) { if (exists("MY_SELECTION")) {
plotting(MY_SELECTION, 'custom', MY_COLORS[c(1,2)]) plotting(MY_SELECTION, 'custom', MY_COLORS[c(1,2)])

View File

@ -1,23 +0,0 @@
#!/usr/bin/env Rscript
# Load necessary libraries
library(ggplot2)
# Define the function to load CSV and plot
plot_stgsize <- function(file_path) {
print(file_path)
# Read the CSV file without headers
data <- read.csv(file_path, header = FALSE)
data['V5'] <- data['V5']/(3600*1000)
# Plot the line chart
p <- ggplot(data, aes(x = V5, y = V2)) +
geom_line() +
labs(x = "runtime [h]", y = "# of nodes") + #, title = "Number of nodes over time.") +
theme_minimal()
output_file <- sub("\\.stgsize$", paste0("_nodes.png"), file_path)
ggsave(basename(output_file), plot = p + theme_bw(base_size = 10), width = 3.5, height = 2, dpi = 300, units = "in", device = "png")
}
args <- commandArgs(trailingOnly = TRUE)
plot_stgsize(args[1])

View File

@ -1,33 +0,0 @@
#!/usr/bin/env Rscript
library(ggplot2)
# Function to plot multiple files
plot_multiple_files <- function(file_paths) {
all_data <- data.frame()
for (file_path in file_paths) {
# Read the CSV file without headers
data <- read.csv(file_path, header = FALSE)
data['V5'] <- data['V5']/(3600*1000)
# Extract the name for the line
target <- sub("_.*", "", basename(file_path))
data$target <- target
# Combine data
all_data <- rbind(all_data, data)
}
# Plot the line chart
p <- ggplot(all_data, aes(x = V5, y = V2, color = target)) +
geom_line() +
labs(x = "runtime [h]", y = "# of nodes") +
theme_minimal()
# Save the plot
ggsave("stg_node_sizes.png", plot = p + theme_bw(base_size = 10), width = 4, height = 2.5, dpi = 300, units = "in", device = "png")
}
# Example usage
file_paths <- commandArgs(trailingOnly = TRUE)
plot_multiple_files(file_paths)

View File

@ -1,17 +1,24 @@
kernel,main_function,input_symbol,input_size,return_function,select_task,interrupts kernel,main_function,input_symbol,input_size,return_function
interact_full,main_interact,FUZZ_INPUT,4096,trigger_Qemu_break,NONE,0#1000 mpeg2,mpeg2_main,mpeg2_oldorgframe,90112,mpeg2_return
interact_int,main_interact,FUZZ_INPUT,4096,trigger_Qemu_break,NONE,0#1000 audiobeam,audiobeam_main,audiobeam_input,11520,audiobeam_return
interact_bytes,main_interact,FUZZ_INPUT,4096,trigger_Qemu_break,NONE, epic,epic_main,epic_image,4096,epic_return
waters_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,1129,0#1000 dijkstra,dijkstra_main,dijkstra_AdjMatrix,10000,dijkstra_return
waters_seq_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,1129,0#1000 fft,fft_main,fft_twidtable,2046,fft_return
waters_seq_bytes,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,1129, bsort,bsort_main,bsort_Array,400,bsort_return
waters_seq_full_seq,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,1129,0#1000 insertsort,insertsort_main,insertsort_a,400,insertsort_return
release_seq_full,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#10000;1#5000;2#2000;3#3000 g723_enc,g723_enc_main,g723_enc_INPUT,1024,g723_enc_return
release_seq_int,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#10000;1#5000;2#2000;3#3000 rijndael_dec,rijndael_dec_main,rijndael_dec_data,32768,rijndael_dec_return
release_seq_bytes,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3, rijndael_enc,rijndael_enc_main,rijndael_enc_data,31369,rijndael_enc_return
copter_seq_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#5000 huff_dec,huff_dec_main,huff_dec_encoded,419,huff_dec_return
copter_seq_int,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#5000 huff_enc,huff_enc_main,huff_enc_plaintext,600,huff_enc_return
copter_seq_bytes,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,SPAttitud, gsm_enc,gsm_enc_main,gsm_enc_pcmdata,6400,gsm_enc_return
copter_seq_stateless_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#5000 tmr,main,FUZZ_INPUT,32,trigger_Qemu_break
copter_seq_stateless_int,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#5000 tacle_rtos,prvStage0,FUZZ_INPUT,604,trigger_Qemu_break
copter_seq_stateless_bytes,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,SPAttitud, lift,main_lift,FUZZ_INPUT,100,trigger_Qemu_break
waters,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
watersv2,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
waters_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
watersv2_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
micro_branchless,main_branchless,FUZZ_INPUT,4,trigger_Qemu_break
micro_int,main_int,FUZZ_INPUT,16,trigger_Qemu_break
micro_longint,main_micro_longint,FUZZ_INPUT,16,trigger_Qemu_break
1 kernel main_function input_symbol input_size return_function select_task interrupts
2 interact_full mpeg2 main_interact mpeg2_main FUZZ_INPUT mpeg2_oldorgframe 4096 90112 trigger_Qemu_break mpeg2_return NONE 0#1000
3 interact_int audiobeam main_interact audiobeam_main FUZZ_INPUT audiobeam_input 4096 11520 trigger_Qemu_break audiobeam_return NONE 0#1000
4 interact_bytes epic main_interact epic_main FUZZ_INPUT epic_image 4096 trigger_Qemu_break epic_return NONE
5 waters_seq_full dijkstra main_waters dijkstra_main FUZZ_INPUT dijkstra_AdjMatrix 4096 10000 trigger_Qemu_break dijkstra_return 1129 0#1000
6 waters_seq_int fft main_waters fft_main FUZZ_INPUT fft_twidtable 4096 2046 trigger_Qemu_break fft_return 1129 0#1000
7 waters_seq_bytes bsort main_waters bsort_main FUZZ_INPUT bsort_Array 4096 400 trigger_Qemu_break bsort_return 1129
8 waters_seq_full_seq insertsort main_waters insertsort_main FUZZ_INPUT insertsort_a 4096 400 trigger_Qemu_break insertsort_return 1129 0#1000
9 release_seq_full g723_enc main_release g723_enc_main FUZZ_INPUT g723_enc_INPUT 4096 1024 trigger_Qemu_break g723_enc_return T3 0#10000;1#5000;2#2000;3#3000
10 release_seq_int rijndael_dec main_release rijndael_dec_main FUZZ_INPUT rijndael_dec_data 4096 32768 trigger_Qemu_break rijndael_dec_return T3 0#10000;1#5000;2#2000;3#3000
11 release_seq_bytes rijndael_enc main_release rijndael_enc_main FUZZ_INPUT rijndael_enc_data 4096 31369 trigger_Qemu_break rijndael_enc_return T3
12 copter_seq_full huff_dec main_osek huff_dec_main FUZZ_INPUT huff_dec_encoded 4096 419 trigger_Qemu_break huff_dec_return FC 0#5000
13 copter_seq_int huff_enc main_osek huff_enc_main FUZZ_INPUT huff_enc_plaintext 4096 600 trigger_Qemu_break huff_enc_return FC 0#5000
14 copter_seq_bytes gsm_enc main_osek gsm_enc_main FUZZ_INPUT gsm_enc_pcmdata 4096 6400 trigger_Qemu_break gsm_enc_return SPAttitud
15 copter_seq_stateless_full tmr main_osek main FUZZ_INPUT 4096 32 trigger_Qemu_break FC 0#5000
16 copter_seq_stateless_int tacle_rtos main_osek prvStage0 FUZZ_INPUT 4096 604 trigger_Qemu_break FC 0#5000
17 copter_seq_stateless_bytes lift main_osek main_lift FUZZ_INPUT 4096 100 trigger_Qemu_break SPAttitud
18 waters main_waters FUZZ_INPUT 4096 trigger_Qemu_break
19 watersv2 main_waters FUZZ_INPUT 4096 trigger_Qemu_break
20 waters_int main_waters FUZZ_INPUT 4096 trigger_Qemu_break
21 watersv2_int main_waters FUZZ_INPUT 4096 trigger_Qemu_break
22 micro_branchless main_branchless FUZZ_INPUT 4 trigger_Qemu_break
23 micro_int main_int FUZZ_INPUT 16 trigger_Qemu_break
24 micro_longint main_micro_longint FUZZ_INPUT 16 trigger_Qemu_break

25
fuzzers/FRET/fuzzer.sh Executable file
View File

@ -0,0 +1,25 @@
#!/usr/bin/env bash
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
cd "$parent_path"
[ -n "$1" -a "$1" != "+" -a -z "$KERNEL" ] && export KERNEL="$1"
[ -n "$2" -a "$2" != "+" -a -z "$FUZZ_MAIN" ] && export FUZZ_MAIN="$2"
[ -n "$3" -a "$3" != "+" -a -z "$FUZZ_INPUT" ] && export FUZZ_INPUT="$3"
[ -n "$4" -a "$4" != "+" -a -z "$FUZZ_INPUT_LEN" ] && export FUZZ_INPUT_LEN="$4"
[ -n "$5" -a "$5" != "+" -a -z "$BREAKPOINT" ] && export BREAKPOINT="$5"
[ -n "$6" -a "$6" != "+" -a -z "$FUZZ_ITERS" ] && export FUZZ_ITERS="$6"
[ -n "$7" -a "$7" != "+" -a -z "$TIME_DUMP" ] && export TIME_DUMP="$7"
[ -n "$8" -a "$8" != "+" -a -z "$CASE_DUMP" ] && export CASE_DUMP="$8"
[ -n "$9" -a "$9" != "+" -a -z "$DO_SHOWMAP" ] && export DO_SHOWMAP="$9"
[ -n "${10}" -a "${10}" != "+" -a -z "$SHOWMAP_TEXTINPUT" ] && export SHOWMAP_TEXTINPUT="${10}"
[ -n "${11}" -a "${11}" != "+" -a -z "$TRACE_DUMP" ] && export TRACE_DUMP="${11}"
[ -z "$FUZZER" ] && export FUZZER=target/debug/fret
set +e
$FUZZER -icount shift=4,align=off,sleep=off -machine mps2-an385 -monitor null -kernel $KERNEL -serial null -nographic -S -semihosting --semihosting-config enable=on,target=native -snapshot -drive if=none,format=qcow2,file=dummy.qcow2
if [ "$exitcode" = "101" ]
then
exit 101
else
exit 0
fi

View File

@ -1,115 +0,0 @@
use clap::{Parser, Subcommand};
use std::path::PathBuf;
// Argument parsing ================================================================================
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
pub struct Cli {
/// Kernel Image
#[arg(short, long, value_name = "FILE")]
pub kernel: PathBuf,
/// Sets a custom config file
#[arg(short, long, value_name = "FILE")]
pub config: PathBuf,
/// Sets the prefix of dumed files
#[arg(short='n', long, value_name = "FILENAME")]
pub dump_name: Option<PathBuf>,
/// do time dumps
#[arg(short='t', long)]
pub dump_times: bool,
/// do worst-case dumps
#[arg(short='a', long)]
pub dump_cases: bool,
/// do trace dumps (if supported)
#[arg(short='r', long)]
pub dump_traces: bool,
/// do graph dumps (if supported)
#[arg(short='g', long)]
pub dump_graph: bool,
/// select a task for measurments
#[arg(short='s', long)]
pub select_task: Option<String>,
#[command(subcommand)]
pub command: Commands,
}
#[derive(Subcommand,Clone)]
pub enum Commands {
/// run a single input
Showmap {
/// take this input
#[arg(short, long)]
input: PathBuf,
},
/// start fuzzing campaign
Fuzz {
/// disable heuristic
#[arg(short, long)]
random: bool,
/// seed for randomness
#[arg(short, long)]
seed: Option<u64>,
/// runtime in seconds
#[arg(short, long)]
time: Option<u64>,
}
}
pub fn set_env_from_config(kernel : &PathBuf, path : &PathBuf) {
let is_csv = path.as_path().extension().map_or(false, |x| x=="csv");
if !is_csv {
let lines = std::fs::read_to_string(path).expect("Config file not found");
let lines = lines.lines().filter(
|x| x.len()>0
);
for l in lines {
let pair = l.split_once('=').expect("Non VAR=VAL line in config");
std::env::set_var(pair.0, pair.1);
}
} else {
let mut reader = csv::Reader::from_path(path).expect("CSV read from config failed");
let p = kernel.as_path();
let stem = p.file_stem().expect("Kernel filename error").to_str().unwrap();
for r in reader.records() {
let rec = r.expect("CSV entry error");
if stem == &rec[0] {
std::env::set_var("FUZZ_MAIN", &rec[1]);
std::env::set_var("FUZZ_INPUT", &rec[2]);
std::env::set_var("FUZZ_INPUT_LEN", &rec[3]);
std::env::set_var("BREAKPOINT", &rec[4]);
break;
}
}
}
}
pub fn get_interrupt_config(kernel : &PathBuf, path : &PathBuf) -> Vec<(usize,u32)>{
let is_csv = path.as_path().extension().map_or(false, |x| x=="csv");
if !is_csv {
panic!("Interrupt config must be inside a CSV file");
} else {
let mut reader = csv::Reader::from_path(path).expect("CSV read from config failed");
let p = kernel.as_path();
let stem = p.file_stem().expect("Kernel filename error").to_str().unwrap();
for r in reader.records() {
let rec = r.expect("CSV entry error");
if stem == &rec[0] {
let ret = rec[6].split(';').filter(|x| x != &"").map(|x| {
let pair = x.split_once('#').expect("Interrupt config error");
(pair.0.parse().expect("Interrupt config error"), pair.1.parse().expect("Interrupt config error"))
}).collect();
println!("Interrupt config {:?}", ret);
return ret;
}
}
}
return Vec::new();
}

View File

@ -1,49 +1,43 @@
use hashbrown::HashMap; use hashbrown::{hash_map::Entry, HashMap};
use libafl_bolts::Named;
use libafl::{ use libafl::{
executors::ExitKind, bolts::{
observers::Observer, current_nanos,
rands::StdRand,
tuples::{tuple_list},
},
executors::{ExitKind},
fuzzer::{StdFuzzer},
inputs::{BytesInput, HasTargetBytes},
observers::{Observer,VariableMapObserver},
state::{StdState, HasNamedMetadata},
Error, Error,
common::HasNamedMetadata, observers::ObserversTuple, prelude::UsesInput, impl_serdeany,
observers::ObserversTuple, prelude::UsesInput,
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{fs::OpenOptions, io::Write}; use std::{cell::UnsafeCell, cmp::max, env, fs::OpenOptions, io::Write, time::Instant};
use libafl::bolts::tuples::Named;
use libafl_qemu::{
emu,
emu::Emulator,
executor::QemuExecutor,
helper::{QemuHelper, QemuHelperTuple, QemuInstrumentationFilter},
};
use libafl::events::EventFirer; use libafl::events::EventFirer;
use libafl::state::MaybeHasClientPerfMonitor; use libafl::state::HasClientPerfMonitor;
use libafl::prelude::State; use libafl::inputs::Input;
use libafl::feedbacks::Feedback; use libafl::feedbacks::Feedback;
use libafl::SerdeAny; use libafl::SerdeAny;
use libafl::common::HasMetadata; use libafl::state::HasMetadata;
use libafl::corpus::testcase::Testcase; use libafl::corpus::testcase::Testcase;
use core::{fmt::Debug, time::Duration}; use core::{fmt::Debug, time::Duration};
// use libafl::feedbacks::FeedbackState;
// use libafl::state::HasFeedbackStates;
use libafl::bolts::tuples::MatchName;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use std::path::PathBuf;
use std::borrow::Cow;
use crate::systemstate::observers::QemuSystemStateObserver;
pub static mut FUZZ_START_TIMESTAMP : SystemTime = UNIX_EPOCH; pub static mut FUZZ_START_TIMESTAMP : SystemTime = UNIX_EPOCH;
pub const QEMU_ICOUNT_SHIFT : u32 = 5;
pub const QEMU_ISNS_PER_SEC : u32 = u32::pow(10, 9) / u32::pow(2, QEMU_ICOUNT_SHIFT);
pub const QEMU_ISNS_PER_USEC : u32 = QEMU_ISNS_PER_SEC / 1000000;
pub const _QEMU_NS_PER_ISN : u32 = 1 << QEMU_ICOUNT_SHIFT;
pub const _TARGET_SYSCLK_FREQ : u32 = 25 * 1000 * 1000;
pub const _TARGET_MHZ_PER_MIPS : f32 = _TARGET_SYSCLK_FREQ as f32 / QEMU_ISNS_PER_SEC as f32;
pub const _TARGET_MIPS_PER_MHZ : f32 = QEMU_ISNS_PER_SEC as f32 / _TARGET_SYSCLK_FREQ as f32;
pub const _TARGET_SYSCLK_PER_QEMU_SEC : u32 = (_TARGET_SYSCLK_FREQ as f32 * _TARGET_MIPS_PER_MHZ) as u32;
pub const _QEMU_SYSCLK_PER_TARGET_SEC : u32 = (_TARGET_SYSCLK_FREQ as f32 * _TARGET_MHZ_PER_MIPS) as u32;
pub fn tick_to_time(ticks: u64) -> Duration {
Duration::from_nanos((ticks << QEMU_ICOUNT_SHIFT) as u64)
}
pub fn tick_to_ms(ticks: u64) -> f32 {
(Duration::from_nanos(ticks << QEMU_ICOUNT_SHIFT).as_micros() as f32/10.0).round()/100.0
}
//========== Metadata //========== Metadata
#[derive(Debug, SerdeAny, Serialize, Deserialize)] #[derive(Debug, SerdeAny, Serialize, Deserialize)]
pub struct QemuIcountMetadata { pub struct QemuIcountMetadata {
@ -54,7 +48,7 @@ pub struct QemuIcountMetadata {
#[derive(Debug, Serialize, Deserialize, SerdeAny)] #[derive(Debug, Serialize, Deserialize, SerdeAny)]
pub struct MaxIcountMetadata { pub struct MaxIcountMetadata {
pub max_icount_seen: u64, pub max_icount_seen: u64,
pub name: Cow<'static, str>, pub name: String,
} }
// impl FeedbackState for MaxIcountMetadata // impl FeedbackState for MaxIcountMetadata
@ -68,8 +62,8 @@ pub struct MaxIcountMetadata {
impl Named for MaxIcountMetadata impl Named for MaxIcountMetadata
{ {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name self.name.as_str()
} }
} }
@ -80,7 +74,7 @@ impl MaxIcountMetadata
pub fn new(name: &'static str) -> Self { pub fn new(name: &'static str) -> Self {
Self { Self {
max_icount_seen: 0, max_icount_seen: 0,
name: Cow::from(name), name: name.to_string(),
} }
} }
} }
@ -100,21 +94,19 @@ pub struct IcHist (pub Vec<(u64, u128)>, pub (u64,u128));
/// A simple observer, just overlooking the runtime of the target. /// A simple observer, just overlooking the runtime of the target.
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct QemuClockObserver { pub struct QemuClockObserver {
name: Cow<'static, str>, name: String,
start_tick: u64, start_tick: u64,
end_tick: u64, end_tick: u64,
dump_path: Option<PathBuf>
} }
impl QemuClockObserver { impl QemuClockObserver {
/// Creates a new [`QemuClockObserver`] with the given name. /// Creates a new [`QemuClockObserver`] with the given name.
#[must_use] #[must_use]
pub fn new(name: &'static str, dump_path: Option<PathBuf>) -> Self { pub fn new(name: &'static str) -> Self {
Self { Self {
name: Cow::from(name), name: name.to_string(),
start_tick: 0, start_tick: 0,
end_tick: 0, end_tick: 0,
dump_path
} }
} }
@ -143,42 +135,37 @@ where
} }
fn post_exec(&mut self, _state: &mut S, _input: &S::Input, _exit_kind: &ExitKind) -> Result<(), Error> { fn post_exec(&mut self, _state: &mut S, _input: &S::Input, _exit_kind: &ExitKind) -> Result<(), Error> {
unsafe { self.end_tick = libafl_qemu::sys::icount_get_raw() }; unsafe { self.end_tick = emu::icount_get_raw() };
if let Some(td) = &self.dump_path { // println!("clock post {}", self.end_tick);
// println!("clock post {}", self.end_tick); // println!("Number of Ticks: {} <- {} {}",self.end_tick - self.start_tick, self.end_tick, self.start_tick);
// println!("Number of Ticks: {} <- {} {}",self.end_tick - self.start_tick, self.end_tick, self.start_tick); let metadata =_state.metadata_mut();
let metadata =_state.metadata_map_mut(); let hist = metadata.get_mut::<IcHist>();
let hist = metadata.get_mut::<IcHist>(); let timestamp = SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis();
let timestamp = SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis(); match hist {
match hist { None => {
Option::None => { metadata.insert(IcHist(vec![(self.end_tick - self.start_tick, timestamp)],
#[cfg(not(feature="trace_job_response_times"))] (self.end_tick - self.start_tick, timestamp)));
{ }
metadata.insert(IcHist(vec![(self.end_tick - self.start_tick, timestamp)], Some(v) => {
(self.end_tick - self.start_tick, timestamp))); v.0.push((self.end_tick - self.start_tick, timestamp));
} if (v.1.0 < self.end_tick-self.start_tick) {
#[cfg(feature="trace_job_response_times")] v.1 = (self.end_tick - self.start_tick, timestamp);
metadata.insert(IcHist(vec![],(0,timestamp)));
} }
Some(v) => { if v.0.len() >= 100 {
#[cfg(not(feature="trace_job_response_times"))] if let Ok(td) = env::var("TIME_DUMP") {
{
v.0.push((self.end_tick - self.start_tick, timestamp));
if v.1.0 < self.end_tick-self.start_tick {
v.1 = (self.end_tick - self.start_tick, timestamp);
}
}
if v.0.len() >= 100 {
let mut file = OpenOptions::new() let mut file = OpenOptions::new()
.read(true) .read(true)
.write(true) .write(true)
.create(true) .create(true)
.append(true) .append(true)
.open(td).expect("Could not open timedump"); .open(td).expect("Could not open timedump");
let newv : Vec<(u64, u128)> = Vec::with_capacity(110); let newv : Vec<(u64, u128)> = Vec::with_capacity(100);
for i in std::mem::replace(&mut v.0, newv).into_iter() { for i in std::mem::replace(&mut v.0, newv).into_iter() {
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed"); writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
} }
} else {
// If we don't write out values we don't need to remember them at all
v.0.clear();
} }
} }
} }
@ -189,7 +176,7 @@ where
impl Named for QemuClockObserver { impl Named for QemuClockObserver {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name &self.name
} }
} }
@ -197,10 +184,9 @@ impl Named for QemuClockObserver {
impl Default for QemuClockObserver { impl Default for QemuClockObserver {
fn default() -> Self { fn default() -> Self {
Self { Self {
name: Cow::from(String::from("clock")), name: String::from("clock"),
start_tick: 0, start_tick: 0,
end_tick: 0, end_tick: 0,
dump_path: None
} }
} }
} }
@ -212,14 +198,12 @@ impl Default for QemuClockObserver {
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ClockTimeFeedback { pub struct ClockTimeFeedback {
exec_time: Option<Duration>, exec_time: Option<Duration>,
select_task: Option<String>, name: String,
name: Cow<'static, str>,
} }
impl<S> Feedback<S> for ClockTimeFeedback impl<S> Feedback<S> for ClockTimeFeedback
where where
S: State + UsesInput + MaybeHasClientPerfMonitor + HasMetadata, S: UsesInput + HasClientPerfMonitor + HasMetadata,
<S as UsesInput>::Input: Default
{ {
#[allow(clippy::wrong_self_convention)] #[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>( fn is_interesting<EM, OT>(
@ -234,27 +218,17 @@ where
EM: EventFirer<State = S>, EM: EventFirer<State = S>,
OT: ObserversTuple<S>, OT: ObserversTuple<S>,
{ {
#[cfg(feature="trace_job_response_times")]
{
if self.select_task.is_some() {
let observer = observers.match_name::<QemuSystemStateObserver<S::Input>>("systemstate").unwrap();
self.exec_time = Some(Duration::from_nanos(observer.last_runtime()));
return Ok(false)
}
}
// TODO Replace with match_name_type when stable // TODO Replace with match_name_type when stable
let observer = observers.match_name::<QemuClockObserver>(self.name()).unwrap(); let observer = observers.match_name::<QemuClockObserver>(self.name()).unwrap();
self.exec_time = Some(Duration::from_nanos(observer.last_runtime())); self.exec_time = Some(Duration::from_nanos(observer.last_runtime() << 4)); // Assume a somewhat realistic multiplier of clock, it does not matter
Ok(false) Ok(false)
} }
/// Append to the testcase the generated metadata in case of a new corpus item /// Append to the testcase the generated metadata in case of a new corpus item
#[inline] #[inline]
fn append_metadata<EM, OT>( fn append_metadata(
&mut self, &mut self,
_state: &mut S, _state: &mut S,
_manager: &mut EM,
_observers: &OT,
testcase: &mut Testcase<S::Input>, testcase: &mut Testcase<S::Input>,
) -> Result<(), Error> { ) -> Result<(), Error> {
*testcase.exec_time_mut() = self.exec_time; *testcase.exec_time_mut() = self.exec_time;
@ -272,29 +246,27 @@ where
impl Named for ClockTimeFeedback { impl Named for ClockTimeFeedback {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name self.name.as_str()
} }
} }
impl ClockTimeFeedback { impl ClockTimeFeedback {
/// Creates a new [`ClockFeedback`], deciding if the value of a [`QemuClockObserver`] with the given `name` of a run is interesting. /// Creates a new [`ClockFeedback`], deciding if the value of a [`QemuClockObserver`] with the given `name` of a run is interesting.
#[must_use] #[must_use]
pub fn new(name: &'static str, select_task: Option<String>) -> Self { pub fn new(name: &'static str) -> Self {
Self { Self {
exec_time: None, exec_time: None,
select_task: select_task, name: name.to_string(),
name: Cow::from(name.to_string()),
} }
} }
/// Creates a new [`ClockFeedback`], deciding if the given [`QemuClockObserver`] value of a run is interesting. /// Creates a new [`ClockFeedback`], deciding if the given [`QemuClockObserver`] value of a run is interesting.
#[must_use] #[must_use]
pub fn new_with_observer(observer: &QemuClockObserver, select_task: &Option<String>) -> Self { pub fn new_with_observer(observer: &QemuClockObserver) -> Self {
Self { Self {
exec_time: None, exec_time: None,
select_task: select_task.clone(), name: observer.name().to_string(),
name: observer.name().clone(),
} }
} }
} }
@ -302,12 +274,12 @@ impl ClockTimeFeedback {
/// A [`Feedback`] rewarding increasing the execution cycles on Qemu. /// A [`Feedback`] rewarding increasing the execution cycles on Qemu.
#[derive(Debug)] #[derive(Debug)]
pub struct QemuClockIncreaseFeedback { pub struct QemuClockIncreaseFeedback {
name: Cow<'static, str>, name: String,
} }
impl<S> Feedback<S> for QemuClockIncreaseFeedback impl<S> Feedback<S> for QemuClockIncreaseFeedback
where where
S: State + UsesInput + HasNamedMetadata + MaybeHasClientPerfMonitor + Debug, S: UsesInput + HasNamedMetadata + HasClientPerfMonitor + Debug,
{ {
fn is_interesting<EM, OT>( fn is_interesting<EM, OT>(
&mut self, &mut self,
@ -324,7 +296,7 @@ where
let observer = _observers.match_name::<QemuClockObserver>("clock") let observer = _observers.match_name::<QemuClockObserver>("clock")
.expect("QemuClockObserver not found"); .expect("QemuClockObserver not found");
let clock_state = state let clock_state = state
.named_metadata_map_mut() .named_metadata_mut()
.get_mut::<MaxIcountMetadata>(&self.name) .get_mut::<MaxIcountMetadata>(&self.name)
.unwrap(); .unwrap();
if observer.last_runtime() > clock_state.max_icount_seen { if observer.last_runtime() > clock_state.max_icount_seen {
@ -337,7 +309,7 @@ where
/// Append to the testcase the generated metadata in case of a new corpus item /// Append to the testcase the generated metadata in case of a new corpus item
#[inline] #[inline]
fn append_metadata<EM, OT>(&mut self, _state: &mut S, _manager: &mut EM, _observers: &OT, _testcase: &mut Testcase<S::Input>) -> Result<(), Error> { fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
// testcase.metadata_mut().insert(QemuIcountMetadata{runtime: self.last_runtime}); // testcase.metadata_mut().insert(QemuIcountMetadata{runtime: self.last_runtime});
Ok(()) Ok(())
} }
@ -352,7 +324,7 @@ where
impl Named for QemuClockIncreaseFeedback { impl Named for QemuClockIncreaseFeedback {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name &self.name
} }
} }
@ -361,7 +333,7 @@ impl QemuClockIncreaseFeedback {
/// Creates a new [`HitFeedback`] /// Creates a new [`HitFeedback`]
#[must_use] #[must_use]
pub fn new(name: &'static str) -> Self { pub fn new(name: &'static str) -> Self {
Self {name: Cow::from(String::from(name))} Self {name: String::from(name)}
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,13 @@
#![feature(is_sorted)]
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
mod fuzzer; mod fuzzer;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
pub mod time; mod clock;
#[cfg(target_os = "linux")]
mod qemustate;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
pub mod systemstate; pub mod systemstate;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
mod cli; mod mutational;
#[cfg(target_os = "linux")]
mod worst;

View File

@ -1,12 +1,17 @@
#![feature(is_sorted)]
//! A libfuzzer-like fuzzer using qemu for binary-only coverage //! A libfuzzer-like fuzzer using qemu for binary-only coverage
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
mod fuzzer; mod fuzzer;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
mod time; mod clock;
#[cfg(target_os = "linux")]
mod qemustate;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
mod systemstate; mod systemstate;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
mod cli; mod worst;
#[cfg(target_os = "linux")]
mod mutational;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
pub fn main() { pub fn main() {

View File

@ -0,0 +1,240 @@
//| The [`MutationalStage`] is the default stage used during fuzzing.
//! For the current input, it will perform a range of random mutations, and then run them in the executor.
use core::marker::PhantomData;
use std::cmp::{max, min};
use libafl::{
bolts::rands::Rand,
corpus::{Corpus, self},
fuzzer::Evaluator,
mark_feature_time,
stages::{Stage},
start_timer,
state::{HasClientPerfMonitor, HasCorpus, HasRand, UsesState, HasMetadata},
Error, prelude::{HasBytesVec, UsesInput, new_hash_feedback, StdRand, RandomSeed, MutationResult, Mutator},
};
use crate::{systemstate::{FreeRTOSSystemStateMetadata, RefinedFreeRTOSSystemState}, fuzzer::DO_NUM_INTERRUPT, clock::IcHist};
pub const MINIMUM_INTER_ARRIVAL_TIME : u32 = 700 * 1000 * (1 << 4);
//======================= Custom mutator
/// The default mutational stage
#[derive(Clone, Debug, Default)]
pub struct MyStateStage<E, EM, Z> {
#[allow(clippy::type_complexity)]
phantom: PhantomData<(E, EM, Z)>,
}
impl<E, EM, Z> MyStateStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: HasClientPerfMonitor + HasCorpus + HasRand,
{
pub fn new() -> Self {
Self { phantom: PhantomData }
}
}
impl<E, EM, Z> Stage<E, EM, Z> for MyStateStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata,
<Z::State as UsesInput>::Input: HasBytesVec
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut Self::State,
manager: &mut EM,
corpus_idx: usize,
) -> Result<(), Error> {
let mut _input = state
.corpus()
.get(corpus_idx)?
.borrow_mut().clone();
let mut newinput = _input.input_mut().as_mut().unwrap().clone();
// let mut tmpinput = _input.input_mut().as_mut().unwrap().clone();
let mut do_rerun = false;
{
// need our own random generator, because borrowing rules
let mut myrand = StdRand::new();
let mut target_bytes : Vec<u8> = vec![];
{
let input = _input.input_mut().as_ref().unwrap();
let tmp = &mut state.rand_mut();
myrand.set_seed(tmp.next());
target_bytes = input.bytes().to_vec();
}
// produce a slice of absolute interrupt times
let mut interrupt_offsets : [u32; 32] = [0u32; 32];
let mut num_interrupts : usize = 0;
{
let mut start_tick : u32 = 0;
for i in 0..DO_NUM_INTERRUPT {
let mut t : [u8; 4] = [0,0,0,0];
if target_bytes.len() > (i+1)*4 {
for j in 0 as usize..4 as usize {
t[j]=target_bytes[i*4+j];
}
if i == 0 || true {
start_tick = u32::from_le_bytes(t);
} else {
start_tick = u32::saturating_add(start_tick,max(MINIMUM_INTER_ARRIVAL_TIME,u32::from_le_bytes(t)));
}
interrupt_offsets[i] = start_tick;
num_interrupts = i+1;
}
}
}
interrupt_offsets.sort();
// println!("Vor Mutator: {:?}", interrupt_offsets[0..num_interrupts].to_vec());
// let num_i = min(target_bytes.len() / 4, DO_NUM_INTERRUPT);
let mut suffix = target_bytes.split_off(4 * num_interrupts);
let mut prefix : Vec<[u8; 4]> = vec![];
// let mut suffix : Vec<u8> = vec![];
#[cfg(feature = "feed_systemtrace")]
{
let tmp = _input.metadata().get::<FreeRTOSSystemStateMetadata>();
if tmp.is_some() {
let trace = tmp.expect("FreeRTOSSystemStateMetadata not found");
// calculate hits and identify snippets
let mut last_m = false;
let mut marks : Vec<(&RefinedFreeRTOSSystemState, usize, usize)>= vec![]; // 1: got interrupted, 2: interrupt handler
for i in 0..trace.inner.len() {
let curr = &trace.inner[i];
let m = interrupt_offsets[0..num_interrupts].iter().any(|x| (curr.start_tick..curr.end_tick).contains(&(*x as u64)));
if m {
marks.push((curr, i, 1));
// println!("1: {}",curr.current_task.task_name);
} else if last_m {
marks.push((curr, i, 2));
// println!("2: {}",curr.current_task.task_name);
} else {
marks.push((curr, i, 0));
}
last_m = m;
}
for i in 0..num_interrupts {
// bounds based on minimum inter-arrival time
let mut lb = 0;
let mut ub : u32 = marks[marks.len()-1].0.end_tick.try_into().expect("ticks > u32");
if i > 0 {
lb = u32::saturating_add(interrupt_offsets[i-1],MINIMUM_INTER_ARRIVAL_TIME);
}
if i < num_interrupts-1 {
ub = u32::saturating_sub(interrupt_offsets[i+1],MINIMUM_INTER_ARRIVAL_TIME);
}
// get old hit and handler
let old_hit = marks.iter().filter(
|x| x.0.start_tick < (interrupt_offsets[i] as u64) && (interrupt_offsets[i] as u64) < x.0.end_tick
).next();
let old_handler = match old_hit {
Some(s) => if s.1 < num_interrupts-1 && s.1 < marks.len()-1 {
Some(marks[s.1+1])
} else {None},
None => None
};
// find reachable alternatives
let alternatives : Vec<_> = marks.iter().filter(|x|
x.2 != 2 &&
(
x.0.start_tick < (lb as u64) && (lb as u64) < x.0.end_tick
|| x.0.start_tick < (ub as u64) && (ub as u64) < x.0.end_tick )
).collect();
// in cases there are no alternatives
if alternatives.len() == 0 {
if old_hit.is_none() {
// choose something random
let untouched : Vec<_> = marks.iter().filter(
|x| x.2 == 0
).collect();
if untouched.len() > 0 {
let tmp = interrupt_offsets[i];
let choice = myrand.choose(untouched);
interrupt_offsets[i] = myrand.between(choice.0.start_tick, choice.0.end_tick)
.try_into().expect("tick > u32");
do_rerun = true;
}
// println!("no alternatives, choose random i: {} {} -> {}",i,tmp,interrupt_offsets[i]);
continue;
} else {
// do nothing
// println!("no alternatives, do nothing i: {} {}",i,interrupt_offsets[i]);
continue;
}
}
let replacement = myrand.choose(alternatives);
if (old_hit.map_or(false, |x| x == replacement)) {
// use the old value
// println!("chose old value, do nothing i: {} {}",i,interrupt_offsets[i]);
continue;
} else {
let extra = if (old_hit.map_or(false, |x| x.1 < replacement.1)) {
// move futher back, respect old_handler
old_handler.map_or(0, |x| x.0.end_tick - x.0.start_tick)
} else { 0 };
let tmp = interrupt_offsets[i];
interrupt_offsets[i] = (myrand.between(replacement.0.start_tick,
replacement.0.end_tick) + extra).try_into().expect("ticks > u32");
// println!("chose new alternative, i: {} {} -> {}",i,tmp, interrupt_offsets[i]);
do_rerun = true;
}
}
let mut numbers : Vec<u32> = interrupt_offsets[0..num_interrupts].to_vec();
numbers.sort();
// println!("Mutator: {:?}", numbers);
let mut start : u32 = 0;
// for i in 0..numbers.len() {
// let tmp = numbers[i];
// numbers[i] = numbers[i]-start;
// start = tmp;
// }
for i in 0..numbers.len() {
prefix.push(u32::to_le_bytes(numbers[i]));
}
}
}
#[cfg(not(feature = "feed_systemtrace"))]
{
let metadata = state.metadata();
let hist = metadata.get::<IcHist>().unwrap();
let maxtick : u64 = hist.1.0;
// let maxtick : u64 = (_input.exec_time().expect("No duration found").as_nanos() >> 4).try_into().unwrap();
let mut numbers : Vec<u32> = vec![];
for i in 0..num_interrupts {
prefix.push(u32::to_le_bytes(myrand.between(0, min(maxtick, u32::MAX as u64)).try_into().expect("ticks > u32")));
}
}
let mut n : Vec<u8> = vec![];
n = [prefix.concat(), suffix].concat();
newinput.bytes_mut().clear();
newinput.bytes_mut().append(&mut n);
}
// InterruptShifterMutator::mutate(&mut mymut, state, &mut input, 0)?;
if do_rerun {
let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, newinput)?;
}
Ok(())
}
}
impl<E, EM, Z> UsesState for MyStateStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: HasClientPerfMonitor + HasCorpus + HasRand,
{
type State = Z::State;
}

View File

@ -1,19 +1,23 @@
use libafl::prelude::UsesInput; use libafl::prelude::UsesInput;
use libafl_qemu::sys::CPUArchState; use libafl_qemu::CPUArchState;
use libafl_qemu::FastSnapshotPtr; use libafl_qemu::Emulator;
use libafl_qemu::FastSnapshot;
use libafl_qemu::QemuExecutor;
use libafl_qemu::QemuHelper; use libafl_qemu::QemuHelper;
use libafl_qemu::QemuHelperTuple; use libafl_qemu::QemuHelperTuple;
use libafl::executors::ExitKind; use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
use libafl_qemu::QemuHooks; use libafl_qemu::QemuHooks;
use libafl_qemu::{
emu,
};
// TODO be thread-safe maybe with https://amanieu.github.io/thread_local-rs/thread_local/index.html // TODO be thread-safe maybe with https://amanieu.github.io/thread_local-rs/thread_local/index.html
#[derive(Debug)] #[derive(Debug)]
pub struct QemuStateRestoreHelper { pub struct QemuStateRestoreHelper {
#[allow(unused)]
has_snapshot: bool, has_snapshot: bool,
#[allow(unused)] use_snapshot: bool,
saved_cpu_states: Vec<CPUArchState>, saved_cpu_states: Vec<CPUArchState>,
fastsnap: Option<FastSnapshotPtr> fastsnap: Option<FastSnapshot>
} }
impl QemuStateRestoreHelper { impl QemuStateRestoreHelper {
@ -21,16 +25,11 @@ impl QemuStateRestoreHelper {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
has_snapshot: false, has_snapshot: false,
use_snapshot: true,
saved_cpu_states: vec![], saved_cpu_states: vec![],
fastsnap: None fastsnap: None
} }
} }
#[allow(unused)]
pub fn with_fast(fastsnap: Option<FastSnapshotPtr>) -> Self {
let mut r = Self::new();
r.fastsnap = fastsnap;
r
}
} }
impl Default for QemuStateRestoreHelper { impl Default for QemuStateRestoreHelper {
@ -45,29 +44,29 @@ where
{ {
const HOOKS_DO_SIDE_EFFECTS: bool = true; const HOOKS_DO_SIDE_EFFECTS: bool = true;
fn init_hooks<QT>(&self, _hooks: &QemuHooks<QT, S>) fn init_hooks<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
where where
QT: QemuHelperTuple<S>, QT: QemuHelperTuple<S>,
{ {
} }
fn first_exec<QT>(&self, _hooks: &QemuHooks<QT, S>) fn first_exec<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
where where
QT: QemuHelperTuple<S>, QT: QemuHelperTuple<S>,
{ {
} }
fn post_exec<OT>(&mut self, _emulator: libafl_qemu::Qemu, _input: &S::Input, _observers: &mut OT, _exit_kind: &mut ExitKind) { fn post_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
// unsafe { println!("snapshot post {}",emu::icount_get_raw()) }; // unsafe { println!("snapshot post {}",emu::icount_get_raw()) };
} }
fn pre_exec(&mut self, emulator: libafl_qemu::Qemu, _input: &S::Input) { fn pre_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
// only restore in pre-exec, to preserve the post-execution state for inspection // only restore in pre-exec, to preserve the post-execution state for inspection
#[cfg(feature = "snapshot_restore")] #[cfg(feature = "snapshot_restore")]
{ {
#[cfg(feature = "snapshot_fast")] #[cfg(feature = "snapshot_fast")]
match self.fastsnap { match self.fastsnap {
Some(s) => unsafe { emulator.restore_fast_snapshot(s) }, Some(s) => emulator.restore_fast_snapshot(s),
None => {self.fastsnap = Some(emulator.create_fast_snapshot(true));}, None => {self.fastsnap = Some(emulator.create_fast_snapshot(true));},
} }
#[cfg(not(feature = "snapshot_fast"))] #[cfg(not(feature = "snapshot_fast"))]

View File

@ -1,6 +0,0 @@
# System-state heuristics
## Information flow
- ``fuzzer.rs`` resolves symbols and creates ``api_ranges`` and ``isr_ranges``
- ``helpers::QemuSystemStateHelper`` captures a series of ``RawFreeRTOSSystemState``
- ``observers::QemuSystemStateObserver`` divides this into ``ReducedFreeRTOSSystemState`` and ``ExecInterval``, the first contains the raw states and the second contains information about the flow between states
- ``stg::StgFeedback`` builds an stg from the intervals

View File

@ -1,98 +1,95 @@
use libafl::SerdeAny; use libafl::SerdeAny;
use libafl::bolts::ownedref::OwnedSlice;
use libafl::inputs::BytesInput;
use libafl::prelude::UsesInput; use libafl::prelude::UsesInput;
use libafl::common::HasNamedMetadata; use libafl::state::HasNamedMetadata;
use std::path::PathBuf; use std::path::PathBuf;
use crate::time::clock::QemuClockObserver; use crate::clock::QemuClockObserver;
use libafl::corpus::Testcase; use libafl::corpus::Testcase;
use libafl::bolts::tuples::MatchName;
use std::collections::hash_map::DefaultHasher; use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher; use std::hash::Hasher;
use std::hash::Hash; use std::hash::Hash;
use libafl::events::EventFirer; use libafl::events::EventFirer;
use libafl::state::MaybeHasClientPerfMonitor; use libafl::state::HasClientPerfMonitor;
use libafl::prelude::State;
use libafl::feedbacks::Feedback; use libafl::feedbacks::Feedback;
use libafl_bolts::Named; use libafl::bolts::tuples::Named;
use libafl::Error; use libafl::Error;
use hashbrown::HashMap; use hashbrown::HashMap;
use libafl::{executors::ExitKind, observers::ObserversTuple, common::HasMetadata}; use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::ExecInterval; use super::RefinedFreeRTOSSystemState;
use super::ReducedFreeRTOSSystemState;
use super::FreeRTOSSystemStateMetadata; use super::FreeRTOSSystemStateMetadata;
use super::observers::QemuSystemStateObserver; use super::observers::QemuSystemStateObserver;
use std::borrow::Cow; use petgraph::prelude::DiGraph;
use petgraph::graph::NodeIndex;
use petgraph::Direction;
use std::cmp::Ordering;
//============================= Feedback //============================= Feedback
/// Shared Metadata for a systemstateFeedback /// Shared Metadata for a systemstateFeedback
#[derive(Debug, Serialize, Deserialize, SerdeAny, Clone)] #[derive(Debug, Serialize, Deserialize, SerdeAny, Clone, Default)]
pub struct SystemStateFeedbackState pub struct SystemStateFeedbackState
{ {
name: Cow<'static, str>,
known_traces: HashMap<u64,(u64,u64,usize)>, // encounters,ticks,length known_traces: HashMap<u64,(u64,u64,usize)>, // encounters,ticks,length
longest: Vec<ReducedFreeRTOSSystemState>, longest: Vec<RefinedFreeRTOSSystemState>,
} }
impl Named for SystemStateFeedbackState impl Named for SystemStateFeedbackState
{ {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name "systemstate"
}
}
impl Default for SystemStateFeedbackState
{
fn default() -> Self {
Self {name: Cow::from("systemstate".to_string()), known_traces: HashMap::new(), longest: Vec::new() }
} }
} }
// impl FeedbackState for systemstateFeedbackState
// {
// fn reset(&mut self) -> Result<(), Error> {
// self.longest.clear();
// self.known_traces.clear();
// Ok(())
// }
// }
/// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`] /// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct NovelSystemStateFeedback pub struct NovelSystemStateFeedback
{ {
name: Cow<'static, str>, last_trace: Option<Vec<RefinedFreeRTOSSystemState>>,
last_trace: Option<Vec<ReducedFreeRTOSSystemState>>,
// known_traces: HashMap<u64,(u64,usize)>, // known_traces: HashMap<u64,(u64,usize)>,
} }
impl<S> Feedback<S> for NovelSystemStateFeedback impl<S> Feedback<S> for NovelSystemStateFeedback
where where
S: State + UsesInput + MaybeHasClientPerfMonitor + HasNamedMetadata, S: UsesInput + HasClientPerfMonitor + HasNamedMetadata,
S::Input: Default,
{ {
fn is_interesting<EM, OT>( fn is_interesting<EM, OT>(
&mut self, &mut self,
state: &mut S, state: &mut S,
_manager: &mut EM, manager: &mut EM,
_input: &S::Input, input: &S::Input,
observers: &OT, observers: &OT,
_exit_kind: &ExitKind, exit_kind: &ExitKind,
) -> Result<bool, Error> ) -> Result<bool, Error>
where where
EM: EventFirer<State = S>, EM: EventFirer<State = S>,
OT: ObserversTuple<S>, OT: ObserversTuple<S>
S::Input: Default
{ {
let observer : &QemuSystemStateObserver<S::Input> = observers.match_name::<QemuSystemStateObserver<S::Input>>("systemstate") let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
.expect("QemuSystemStateObserver not found"); .expect("QemuSystemStateObserver not found");
let clock_observer = observers.match_name::<QemuClockObserver>("clocktime") //TODO not fixed let clock_observer = observers.match_name::<QemuClockObserver>("clocktime") //TODO not fixed
.expect("QemuClockObserver not found"); .expect("QemuClockObserver not found");
let feedbackstate = match state let feedbackstate = match state
.named_metadata_map_mut() .named_metadata_mut()
.get_mut::<SystemStateFeedbackState>("systemstate") { .get_mut::<SystemStateFeedbackState>("systemstate") {
Some(s) => s, Some(s) => s,
Option::None => { None => {
let n=SystemStateFeedbackState::default(); let n=SystemStateFeedbackState::default();
state.named_metadata_map_mut().insert("systemstate",n); state.named_metadata_mut().insert(n, "systemstate");
state.named_metadata_map_mut().get_mut::<SystemStateFeedbackState>("systemstate").unwrap() state.named_metadata_mut().get_mut::<SystemStateFeedbackState>("systemstate").unwrap()
} }
}; };
#[cfg(feature = "trace_job_response_times")]
let last_runtime = observer.last_runtime();
#[cfg(not(feature = "trace_job_response_times"))]
let last_runtime = clock_observer.last_runtime();
// let feedbackstate = state // let feedbackstate = state
// .feedback_states_mut() // .feedback_states_mut()
// .match_name_mut::<systemstateFeedbackState>("systemstate") // .match_name_mut::<systemstateFeedbackState>("systemstate")
@ -104,14 +101,14 @@ where
let mut is_novel = false; let mut is_novel = false;
let mut takes_longer = false; let mut takes_longer = false;
match feedbackstate.known_traces.get_mut(&somehash) { match feedbackstate.known_traces.get_mut(&somehash) {
Option::None => { None => {
is_novel = true; is_novel = true;
feedbackstate.known_traces.insert(somehash,(1,last_runtime,observer.last_run.len())); feedbackstate.known_traces.insert(somehash,(1,clock_observer.last_runtime(),observer.last_run.len()));
} }
Some(s) => { Some(s) => {
s.0+=1; s.0+=1;
if s.1 < last_runtime { if s.1 < clock_observer.last_runtime() {
s.1 = last_runtime; s.1 = clock_observer.last_runtime();
takes_longer = true; takes_longer = true;
} }
} }
@ -126,11 +123,11 @@ where
/// Append to the testcase the generated metadata in case of a new corpus item /// Append to the testcase the generated metadata in case of a new corpus item
#[inline] #[inline]
fn append_metadata<EM, OT>(&mut self, _state: &mut S, _manager: &mut EM, _observers: &OT, testcase: &mut Testcase<S::Input>) -> Result<(), Error> { fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
let a = self.last_trace.take(); let a = self.last_trace.take();
match a { match a {
Some(s) => testcase.metadata_map_mut().insert(FreeRTOSSystemStateMetadata::new(s)), Some(s) => testcase.metadata_mut().insert(FreeRTOSSystemStateMetadata::new(s)),
Option::None => (), None => (),
} }
Ok(()) Ok(())
} }
@ -146,92 +143,127 @@ where
impl Named for NovelSystemStateFeedback impl Named for NovelSystemStateFeedback
{ {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name "systemstate"
} }
} }
impl Default for NovelSystemStateFeedback //=============================
{
fn default() -> Self { pub fn match_traces(target: &Vec<RefinedFreeRTOSSystemState>, last: &Vec<RefinedFreeRTOSSystemState>) -> bool {
Self {name: Cow::from("NovelSystemStateFeedback".to_string()), last_trace: None } let mut ret = true;
if target.len() > last.len() {return false;}
for i in 0..target.len() {
ret &= target[i].current_task.task_name==last[i].current_task.task_name;
} }
ret
}
pub fn match_traces_name(target: &Vec<String>, last: &Vec<RefinedFreeRTOSSystemState>) -> bool {
let mut ret = true;
if target.len() > last.len() {return false;}
for i in 0..target.len() {
ret &= target[i]==last[i].current_task.task_name;
}
ret
} }
//=========================== Debugging Feedback /// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
/// A [`Feedback`] meant to dump the system-traces for debugging. Depends on [`QemuSystemStateObserver`] #[derive(Serialize, Deserialize, Clone, Debug, Default)]
#[derive(Debug)] pub struct HitSystemStateFeedback
pub struct DumpSystraceFeedback
{ {
name: Cow<'static, str>, target: Option<Vec<String>>,
dumpfile: Option<PathBuf>,
dump_metadata: bool,
last_states: Option<HashMap<u64, ReducedFreeRTOSSystemState>>,
last_trace: Option<Vec<ExecInterval>>,
} }
impl<S> Feedback<S> for DumpSystraceFeedback impl<S> Feedback<S> for HitSystemStateFeedback
where where
S: State + UsesInput + MaybeHasClientPerfMonitor, S: UsesInput + HasClientPerfMonitor,
{ {
fn is_interesting<EM, OT>( fn is_interesting<EM, OT>(
&mut self, &mut self,
_state: &mut S, state: &mut S,
_manager: &mut EM, manager: &mut EM,
_input: &S::Input, input: &S::Input,
observers: &OT, observers: &OT,
_exit_kind: &ExitKind, exit_kind: &ExitKind,
) -> Result<bool, Error> ) -> Result<bool, Error>
where where
EM: EventFirer<State = S>, EM: EventFirer<State = S>,
OT: ObserversTuple<S> OT: ObserversTuple<S>
{ {
if self.dumpfile.is_none() {return Ok(false)}; let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
let observer = observers.match_name::<QemuSystemStateObserver<S::Input>>("systemstate") .expect("QemuSystemStateObserver not found");
// Do Stuff
match &self.target {
Some(s) => {
// #[cfg(debug_assertions)] eprintln!("Hit systemstate Feedback trigger");
Ok(match_traces_name(s, &observer.last_run))
},
None => Ok(false),
}
}
}
impl Named for HitSystemStateFeedback
{
#[inline]
fn name(&self) -> &str {
"hit_systemstate"
}
}
impl HitSystemStateFeedback {
pub fn new(target: Option<Vec<RefinedFreeRTOSSystemState>>) -> Self {
Self {target: target.map(|x| x.into_iter().map(|y| y.current_task.task_name).collect())}
}
}
//=========================== Debugging Feedback
/// A [`Feedback`] meant to dump the system-traces for debugging. Depends on [`QemuSystemStateObserver`]
#[derive(Debug)]
pub struct DumpSystraceFeedback
{
dumpfile: Option<PathBuf>,
dump_metadata: bool,
last_trace: Option<Vec<RefinedFreeRTOSSystemState>>,
}
impl<S> Feedback<S> for DumpSystraceFeedback
where
S: UsesInput + HasClientPerfMonitor,
{
fn is_interesting<EM, OT>(
&mut self,
state: &mut S,
manager: &mut EM,
input: &S::Input,
observers: &OT,
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>
{
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
.expect("QemuSystemStateObserver not found"); .expect("QemuSystemStateObserver not found");
let names : Vec<String> = observer.last_run.iter().map(|x| x.current_task.task_name.clone()).collect(); let names : Vec<String> = observer.last_run.iter().map(|x| x.current_task.task_name.clone()).collect();
match &self.dumpfile { match &self.dumpfile {
Some(s) => { Some(s) => {
let per_task_metadata = if let Some(worst_instance) = observer.job_instances.iter().filter(|x| Some(&x.name) == observer.select_task.as_ref()).max_by(|a,b| (a.response-a.release).cmp(&(b.response-b.release))) { std::fs::write(s,ron::to_string(&observer.last_run).expect("Error serializing hashmap")).expect("Can not dump to file");
// extract computation time spent in each task and abb
let t : Vec<_> = observer.last_trace.iter().filter(|x| x.start_tick < worst_instance.response && x.end_tick > worst_instance.release ).cloned().collect();
// task_name -> addr -> (count, time)
let mut ret : HashMap<String, HashMap<u32, (usize, usize, u64)>> = HashMap::new();
let mut t2 = t.clone();
t2.sort_by_key(|x| x.get_task_name_unchecked());
t2.chunk_by_mut(|x,y| x.get_task_name_unchecked() == y.get_task_name_unchecked()).for_each(|x| {
x.sort_by_key(|y| y.abb.as_ref().unwrap().start);
x.chunk_by(|y,z| y.abb.as_ref().unwrap().start == z.abb.as_ref().unwrap().start).for_each(|y| {
match ret.get_mut(&y[0].get_task_name_unchecked()) {
Option::None => {
ret.insert(y[0].get_task_name_unchecked(), HashMap::from([(y[0].abb.as_ref().unwrap().start, (y.len(), y.iter().filter(|x| x.is_abb_end()).count(), y.iter().map(|z| z.get_exec_time()).sum::<_>()))]));
}
Some(x) => {
x.insert(y[0].abb.as_ref().unwrap().start, (y.len(), y.iter().filter(|x| x.is_abb_end()).count(), y.iter().map(|z| z.get_exec_time()).sum()));
}
}
});
});
// dbg!(&ret);
ret
} else {HashMap::new()};
std::fs::write(s,ron::to_string(&(&observer.last_trace,&observer.last_states,&observer.job_instances,per_task_metadata)).expect("Error serializing hashmap")).expect("Can not dump to file");
self.dumpfile = None self.dumpfile = None
}, },
Option::None => if self.dump_metadata {println!("{:?}\n{:?}",observer.last_run,names);} None => if !self.dump_metadata {println!("{:?}\n{:?}",observer.last_run,names);}
}; };
// if self.dump_metadata {self.last_trace=Some(observer.last_trace.clone());} if self.dump_metadata {self.last_trace=Some(observer.last_run.clone());}
Ok(false) Ok(!self.dump_metadata)
} }
/// Append to the testcase the generated metadata in case of a new corpus item /// Append to the testcase the generated metadata in case of a new corpus item
#[inline] #[inline]
fn append_metadata<EM, OT>(&mut self, _state: &mut S, _manager: &mut EM, _observers: &OT, _testcase: &mut Testcase<S::Input>) -> Result<(), Error> { fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
if !self.dump_metadata {return Ok(());} if !self.dump_metadata {return Ok(());}
// let a = self.last_trace.take(); let a = self.last_trace.take();
// match a { match a {
// Some(s) => testcase.metadata_map_mut().insert(FreeRTOSSystemStateMetadata::new(s)), Some(s) => testcase.metadata_mut().insert(FreeRTOSSystemStateMetadata::new(s)),
// None => (), None => (),
// } }
Ok(()) Ok(())
} }
@ -246,96 +278,22 @@ where
impl Named for DumpSystraceFeedback impl Named for DumpSystraceFeedback
{ {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name "Dumpsystemstate"
} }
} }
impl DumpSystraceFeedback impl DumpSystraceFeedback
{ {
/// Creates a new [`DumpSystraceFeedback`] /// Creates a new [`DumpSystraceFeedback`]
#[allow(unused)]
pub fn new() -> Self {
Self {name: Cow::from("Dumpsystemstate".to_string()), dumpfile: None, dump_metadata: false, last_trace: None, last_states: None }
}
#[allow(unused)]
pub fn with_dump(dumpfile: Option<PathBuf>) -> Self {
Self {name: Cow::from("Dumpsystemstate".to_string()), dumpfile: dumpfile, dump_metadata: false, last_trace: None, last_states: None}
}
#[allow(unused)]
pub fn metadata_only() -> Self {
Self {name: Cow::from("Dumpsystemstate".to_string()), dumpfile: None, dump_metadata: true, last_trace: None, last_states: None}
}
}
#[derive(Debug, Default)]
pub struct SystraceErrorFeedback
{
name: Cow<'static, str>,
dump_case: bool,
max_reports: Option<usize>,
}
impl<S> Feedback<S> for SystraceErrorFeedback
where
S: State + UsesInput + MaybeHasClientPerfMonitor,
{
fn is_interesting<EM, OT>(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &S::Input,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>
{
#[cfg(feature = "trace_stg")]
{
let observer = observers.match_name::<QemuSystemStateObserver<S::Input>>("systemstate")
.expect("QemuSystemStateObserver not found");
let is_err = (!observer.success || observer.do_report);
if let Some(m) = self.max_reports {
if m <= 0 {return Ok(false);}
if is_err {
self.max_reports = Some(m-1);
}
}
return Ok(self.dump_case&&is_err);
}
#[cfg(not(feature = "trace_stg"))]
{
return Ok(false);
}
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata<EM, OT>(&mut self, _state: &mut S, _manager: &mut EM, _observers: &OT, _testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
Ok(())
}
/// Discard the stored metadata in case that the testcase is not added to the corpus
#[inline]
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
Ok(())
}
}
impl Named for SystraceErrorFeedback
{
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
impl SystraceErrorFeedback
{
#[must_use] #[must_use]
pub fn new(dump_case: bool, max_reports: Option<usize>) -> Self { pub fn new() -> Self {
Self {name: Cow::from(String::from("SystraceErrorFeedback")), dump_case, max_reports} Self {dumpfile: None, dump_metadata: false, last_trace: None}
}
pub fn with_dump(dumpfile: Option<PathBuf>) -> Self {
Self {dumpfile: dumpfile, dump_metadata: false, last_trace: None}
}
pub fn metadata_only() -> Self {
Self {dumpfile: None, dump_metadata: true, last_trace: None}
} }
} }

View File

@ -1,7 +1,7 @@
#![allow(non_camel_case_types,non_snake_case,non_upper_case_globals,deref_nullptr,unused)] #![allow(non_camel_case_types,non_snake_case,non_upper_case_globals,deref_nullptr)]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
// Manual Types // Manual Types
use libafl_qemu::Qemu; use libafl_qemu::Emulator;
/*========== Start of generated Code =============*/ /*========== Start of generated Code =============*/
pub type char_ptr = ::std::os::raw::c_uint; pub type char_ptr = ::std::os::raw::c_uint;
@ -88,7 +88,7 @@ pub type TCB_t = tskTCB;
/*========== End of generated Code =============*/ /*========== End of generated Code =============*/
pub trait emu_lookup { pub trait emu_lookup {
fn lookup(emu: &Qemu, addr: ::std::os::raw::c_uint) -> Self; fn lookup(emu: &Emulator, addr: ::std::os::raw::c_uint) -> Self;
} }
@ -104,7 +104,7 @@ pub enum rtos_struct {
macro_rules! impl_emu_lookup { macro_rules! impl_emu_lookup {
($struct_name:ident) => { ($struct_name:ident) => {
impl $crate::systemstate::freertos::emu_lookup for $struct_name { impl $crate::systemstate::freertos::emu_lookup for $struct_name {
fn lookup(emu: &Qemu, addr: ::std::os::raw::c_uint) -> $struct_name { fn lookup(emu: &Emulator, addr: ::std::os::raw::c_uint) -> $struct_name {
let mut tmp : [u8; std::mem::size_of::<$struct_name>()] = [0u8; std::mem::size_of::<$struct_name>()]; let mut tmp : [u8; std::mem::size_of::<$struct_name>()] = [0u8; std::mem::size_of::<$struct_name>()];
unsafe { unsafe {
emu.read_mem(addr.into(), &mut tmp); emu.read_mem(addr.into(), &mut tmp);

View File

@ -0,0 +1,604 @@
use libafl::SerdeAny;
/// Feedbacks organizing SystemStates as a graph
use libafl::inputs::HasBytesVec;
use libafl::bolts::rands::RandomSeed;
use libafl::bolts::rands::StdRand;
use libafl::mutators::Mutator;
use libafl::mutators::MutationResult;
use libafl::prelude::HasTargetBytes;
use libafl::prelude::UsesInput;
use libafl::state::HasNamedMetadata;
use libafl::state::UsesState;
use core::marker::PhantomData;
use libafl::state::HasCorpus;
use libafl::state::HasSolutions;
use libafl::state::HasRand;
use crate::worst::MaxExecsLenFavFactor;
use libafl::schedulers::MinimizerScheduler;
use libafl::bolts::HasRefCnt;
use libafl::bolts::AsSlice;
use libafl::bolts::ownedref::OwnedSlice;
use libafl::inputs::BytesInput;
use std::path::PathBuf;
use crate::clock::QemuClockObserver;
use libafl::corpus::Testcase;
use libafl::bolts::tuples::MatchName;
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use std::hash::Hash;
use libafl::events::EventFirer;
use libafl::state::HasClientPerfMonitor;
use libafl::feedbacks::Feedback;
use libafl::bolts::tuples::Named;
use libafl::Error;
use hashbrown::HashMap;
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
use serde::{Deserialize, Serialize};
use super::RefinedFreeRTOSSystemState;
use super::FreeRTOSSystemStateMetadata;
use super::observers::QemuSystemStateObserver;
use petgraph::prelude::DiGraph;
use petgraph::graph::NodeIndex;
use petgraph::Direction;
use std::cmp::Ordering;
use libafl::bolts::rands::Rand;
//============================= Data Structures
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default)]
pub struct VariantTuple
{
pub start_tick: u64,
pub end_tick: u64,
input_counter: u32,
pub input: Vec<u8>, // in the end any kind of input are bytes, regardless of type and lifetime
}
impl VariantTuple {
fn from(other: &RefinedFreeRTOSSystemState,input: Vec<u8>) -> Self {
VariantTuple{
start_tick: other.start_tick,
end_tick: other.end_tick,
input_counter: other.input_counter,
input: input,
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct SysGraphNode
{
base: RefinedFreeRTOSSystemState,
pub variants: Vec<VariantTuple>,
}
impl SysGraphNode {
fn from(base: RefinedFreeRTOSSystemState, input: Vec<u8>) -> Self {
SysGraphNode{variants: vec![VariantTuple::from(&base, input)], base:base }
}
/// unites the variants of this value with another, draining the other if the bases are equal
fn unite(&mut self, other: &mut SysGraphNode) -> bool {
if self!=other {return false;}
self.variants.append(&mut other.variants);
self.variants.dedup();
return true;
}
/// add a Varint from a [`RefinedFreeRTOSSystemState`]
fn unite_raw(&mut self, other: &RefinedFreeRTOSSystemState, input: &Vec<u8>) -> bool {
if &self.base!=other {return false;}
self.variants.push(VariantTuple::from(other, input.clone()));
self.variants.dedup();
return true;
}
/// add a Varint from a [`RefinedFreeRTOSSystemState`], if it's interesting
fn unite_interesting(&mut self, other: &RefinedFreeRTOSSystemState, input: &Vec<u8>) -> bool {
if &self.base!=other {return false;}
let interesting =
self.variants.iter().all(|x| x.end_tick-x.start_tick<other.end_tick-other.start_tick) || // longest variant
self.variants.iter().all(|x| x.end_tick-x.start_tick>other.end_tick-other.start_tick) || // shortest variant
self.variants.iter().all(|x| x.input_counter>other.input_counter) || // longest input
self.variants.iter().all(|x| x.input_counter<other.input_counter); // shortest input
if interesting {
let var = VariantTuple::from(other, input.clone());
self.variants.push(var);
}
return interesting;
}
pub fn get_taskname(&self) -> &str {
&self.base.current_task.task_name
}
pub fn get_input_counts(&self) -> Vec<u32> {
self.variants.iter().map(|x| x.input_counter).collect()
}
}
impl PartialEq for SysGraphNode {
fn eq(&self, other: &SysGraphNode) -> bool {
self.base==other.base
}
}
// Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct SysGraphMetadata {
pub inner: Vec<NodeIndex>,
indices: Vec<usize>,
tcref: isize,
}
impl SysGraphMetadata {
pub fn new(inner: Vec<NodeIndex>) -> Self{
Self {indices: inner.iter().map(|x| x.index()).collect(), inner: inner, tcref: 0}
}
}
impl AsSlice for SysGraphMetadata {
/// Convert the slice of system-states to a slice of hashes over enumerated states
fn as_slice(&self) -> &[usize] {
self.indices.as_slice()
}
type Entry = usize;
}
impl HasRefCnt for SysGraphMetadata {
fn refcnt(&self) -> isize {
self.tcref
}
fn refcnt_mut(&mut self) -> &mut isize {
&mut self.tcref
}
}
libafl::impl_serdeany!(SysGraphMetadata);
pub type GraphMaximizerCorpusScheduler<CS> =
MinimizerScheduler<CS, MaxExecsLenFavFactor<<CS as UsesState>::State>,SysGraphMetadata>;
//============================= Graph Feedback
/// Improved System State Graph
#[derive(Serialize, Deserialize, Clone, Debug, Default, SerdeAny)]
pub struct SysGraphFeedbackState
{
pub graph: DiGraph<SysGraphNode, ()>,
entrypoint: NodeIndex,
exit: NodeIndex,
name: String,
}
impl SysGraphFeedbackState
{
pub fn new() -> Self {
let mut graph = DiGraph::<SysGraphNode, ()>::new();
let mut entry = SysGraphNode::default();
entry.base.current_task.task_name="Start".to_string();
let mut exit = SysGraphNode::default();
exit.base.current_task.task_name="End".to_string();
let entry = graph.add_node(entry);
let exit = graph.add_node(exit);
Self {graph: graph, entrypoint: entry, exit: exit, name: String::from("SysMap")}
}
fn insert(&mut self, list: Vec<RefinedFreeRTOSSystemState>, input: &Vec<u8>) {
let mut current_index = self.entrypoint;
for n in list {
let mut done = false;
for i in self.graph.neighbors_directed(current_index, Direction::Outgoing) {
if n == self.graph[i].base {
done = true;
current_index = i;
break;
}
}
if !done {
let j = self.graph.add_node(SysGraphNode::from(n,input.clone()));
self.graph.add_edge(current_index, j, ());
current_index = j;
}
}
}
/// Try adding a system state path from a [Vec<RefinedFreeRTOSSystemState>], return true if the path was interesting
fn update(&mut self, list: &Vec<RefinedFreeRTOSSystemState>, input: &Vec<u8>) -> (bool, Vec<NodeIndex>) {
let mut current_index = self.entrypoint;
let mut novel = false;
let mut trace : Vec<NodeIndex> = vec![current_index];
for n in list {
let mut matching : Option<NodeIndex> = None;
for i in self.graph.neighbors_directed(current_index, Direction::Outgoing) {
let tmp = &self.graph[i];
if n == &tmp.base {
matching = Some(i);
current_index = i;
break;
}
}
match matching {
None => {
novel = true;
let j = self.graph.add_node(SysGraphNode::from(n.clone(),input.clone()));
self.graph.add_edge(current_index, j, ());
current_index = j;
},
Some(i) => {
novel |= self.graph[i].unite_interesting(&n, input);
}
}
trace.push(current_index);
}
self.graph.update_edge(current_index, self.exit, ()); // every path ends in the exit noded
return (novel, trace);
}
}
impl Named for SysGraphFeedbackState
{
#[inline]
fn name(&self) -> &str {
&self.name
}
}
impl SysGraphFeedbackState
{
fn reset(&mut self) -> Result<(), Error> {
self.graph.clear();
let mut entry = SysGraphNode::default();
entry.base.current_task.task_name="Start".to_string();
let mut exit = SysGraphNode::default();
exit.base.current_task.task_name="End".to_string();
self.entrypoint = self.graph.add_node(entry);
self.exit = self.graph.add_node(exit);
Ok(())
}
}
/// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct SysMapFeedback
{
name: String,
last_trace: Option<Vec<NodeIndex>>,
}
impl SysMapFeedback {
pub fn new() -> Self {
Self {name: String::from("SysMapFeedback"), last_trace: None }
}
}
impl<S> Feedback<S> for SysMapFeedback
where
S: UsesInput + HasClientPerfMonitor + HasNamedMetadata,
S::Input: HasTargetBytes,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>(
&mut self,
state: &mut S,
_manager: &mut EM,
_input: &S::Input,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
{
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
.expect("QemuSystemStateObserver not found");
let feedbackstate = match state
.named_metadata_mut()
.get_mut::<SysGraphFeedbackState>("SysMap") {
Some(s) => s,
None => {
let n=SysGraphFeedbackState::default();
state.named_metadata_mut().insert(n, "SysMap");
state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap").unwrap()
}
};
let ret = feedbackstate.update(&observer.last_run, &observer.last_input);
self.last_trace = Some(ret.1);
Ok(ret.0)
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
let a = self.last_trace.take();
match a {
Some(s) => testcase.metadata_mut().insert(SysGraphMetadata::new(s)),
None => (),
}
Ok(())
}
/// Discard the stored metadata in case that the testcase is not added to the corpus
#[inline]
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
self.last_trace = None;
Ok(())
}
}
impl Named for SysMapFeedback
{
#[inline]
fn name(&self) -> &str {
&self.name
}
}
//============================= Mutators
//=============================== Snippets
// pub struct RandGraphSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// phantom: PhantomData<(I, S)>,
// }
// impl<I, S> RandGraphSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// pub fn new() -> Self {
// RandGraphSnippetMutator{phantom: PhantomData}
// }
// }
// impl<I, S> Mutator<I, S> for RandGraphSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn mutate(
// &mut self,
// state: &mut S,
// input: &mut I,
// _stage_idx: i32
// ) -> Result<MutationResult, Error>
// {
// // need our own random generator, because borrowing rules
// let mut myrand = StdRand::new();
// let tmp = &mut state.rand_mut();
// myrand.set_seed(tmp.next());
// drop(tmp);
// let feedbackstate = state
// .feedback_states()
// .match_name::<SysGraphFeedbackState>("SysMap")
// .unwrap();
// let g = &feedbackstate.graph;
// let tmp = state.metadata().get::<SysGraphMetadata>();
// if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
// return Ok(MutationResult::Skipped);
// }
// let trace =tmp.expect("SysGraphMetadata not found");
// // follow the path, extract snippets from last reads, find common snippets.
// // those are likley keys parts. choose random parts from other sibling traces
// let sibling_inputs : Vec<&Vec<u8>>= g[*trace.inner.last().unwrap()].variants.iter().map(|x| &x.input).collect();
// let mut snippet_collector = vec![];
// let mut per_input_counters = HashMap::<&Vec<u8>,usize>::new(); // ugly workaround to track multiple inputs
// for t in &trace.inner {
// let node = &g[*t];
// let mut per_node_snippets = HashMap::<&Vec<u8>,&[u8]>::new();
// for v in &node.variants {
// match per_input_counters.get_mut(&v.input) {
// None => {
// if sibling_inputs.iter().any(|x| *x==&v.input) { // only collect info about siblin inputs from target
// per_input_counters.insert(&v.input, v.input_counter.try_into().unwrap());
// }
// },
// Some(x) => {
// let x_u = *x;
// if x_u<v.input_counter as usize {
// *x=v.input_counter as usize;
// per_node_snippets.insert(&v.input,&v.input[x_u..v.input_counter as usize]);
// }
// }
// }
// }
// snippet_collector.push(per_node_snippets);
// }
// let mut new_input : Vec<u8> = vec![];
// for c in snippet_collector {
// new_input.extend_from_slice(myrand.choose(c).1);
// }
// for i in new_input.iter().enumerate() {
// input.bytes_mut()[i.0]=*i.1;
// }
// Ok(MutationResult::Mutated)
// }
// fn post_exec(
// &mut self,
// _state: &mut S,
// _stage_idx: i32,
// _corpus_idx: Option<usize>
// ) -> Result<(), Error> {
// Ok(())
// }
// }
// impl<I, S> Named for RandGraphSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn name(&self) -> &str {
// "RandGraphSnippetMutator"
// }
// }
// //=============================== Snippets
// pub struct RandInputSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// phantom: PhantomData<(I, S)>,
// }
// impl<I, S> RandInputSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// pub fn new() -> Self {
// RandInputSnippetMutator{phantom: PhantomData}
// }
// }
// impl<I, S> Mutator<I, S> for RandInputSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn mutate(
// &mut self,
// state: &mut S,
// input: &mut I,
// _stage_idx: i32
// ) -> Result<MutationResult, Error>
// {
// // need our own random generator, because borrowing rules
// let mut myrand = StdRand::new();
// let tmp = &mut state.rand_mut();
// myrand.set_seed(tmp.next());
// drop(tmp);
// let feedbackstate = state
// .feedback_states()
// .match_name::<SysGraphFeedbackState>("SysMap")
// .unwrap();
// let g = &feedbackstate.graph;
// let tmp = state.metadata().get::<SysGraphMetadata>();
// if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
// return Ok(MutationResult::Skipped);
// }
// let trace = tmp.expect("SysGraphMetadata not found");
// let mut collection : Vec<Vec<u8>> = Vec::new();
// let mut current_pointer : usize = 0;
// for t in &trace.inner {
// let node = &g[*t];
// for v in &node.variants {
// if v.input == input.bytes() {
// if v.input_counter > current_pointer.try_into().unwrap() {
// collection.push(v.input[current_pointer..v.input_counter as usize].to_owned());
// current_pointer = v.input_counter as usize;
// }
// break;
// }
// }
// }
// let index_to_mutate = myrand.below(collection.len() as u64) as usize;
// for i in 0..collection[index_to_mutate].len() {
// collection[index_to_mutate][i] = myrand.below(0xFF) as u8;
// }
// for i in collection.concat().iter().enumerate() {
// input.bytes_mut()[i.0]=*i.1;
// }
// Ok(MutationResult::Mutated)
// }
// fn post_exec(
// &mut self,
// _state: &mut S,
// _stage_idx: i32,
// _corpus_idx: Option<usize>
// ) -> Result<(), Error> {
// Ok(())
// }
// }
// impl<I, S> Named for RandInputSnippetMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn name(&self) -> &str {
// "RandInputSnippetMutator"
// }
// }
// //=============================== Suffix
// pub struct RandGraphSuffixMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// phantom: PhantomData<(I, S)>,
// }
// impl<I, S> RandGraphSuffixMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// pub fn new() -> Self {
// RandGraphSuffixMutator{phantom: PhantomData}
// }
// }
// impl<I, S> Mutator<I, S> for RandGraphSuffixMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn mutate(
// &mut self,
// state: &mut S,
// input: &mut I,
// _stage_idx: i32
// ) -> Result<MutationResult, Error>
// {
// // need our own random generator, because borrowing rules
// let mut myrand = StdRand::new();
// let tmp = &mut state.rand_mut();
// myrand.set_seed(tmp.next());
// drop(tmp);
// let feedbackstate = state
// .feedback_states()
// .match_name::<SysGraphFeedbackState>("SysMap")
// .unwrap();
// let g = &feedbackstate.graph;
// let tmp = state.metadata().get::<SysGraphMetadata>();
// if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
// return Ok(MutationResult::Skipped);
// }
// let trace =tmp.expect("SysGraphMetadata not found");
// // follow the path, extract snippets from last reads, find common snippets.
// // those are likley keys parts. choose random parts from other sibling traces
// let inp_c_end = g[*trace.inner.last().unwrap()].base.input_counter;
// let mut num_to_reverse = myrand.below(trace.inner.len().try_into().unwrap());
// for t in trace.inner.iter().rev() {
// let int_c_prefix = g[*t].base.input_counter;
// if int_c_prefix < inp_c_end {
// num_to_reverse-=1;
// if num_to_reverse<=0 {
// let mut new_input=input.bytes()[..(int_c_prefix as usize)].to_vec();
// let mut ext : Vec<u8> = (int_c_prefix..inp_c_end).map(|_| myrand.next().to_le_bytes()).flatten().collect();
// new_input.append(&mut ext);
// for i in new_input.iter().enumerate() {
// if input.bytes_mut().len()>i.0 {
// input.bytes_mut()[i.0]=*i.1;
// }
// else { break };
// }
// break;
// }
// }
// }
// Ok(MutationResult::Mutated)
// }
// fn post_exec(
// &mut self,
// _state: &mut S,
// _stage_idx: i32,
// _corpus_idx: Option<usize>
// ) -> Result<(), Error> {
// Ok(())
// }
// }
// impl<I, S> Named for RandGraphSuffixMutator<I, S>
// where
// I: Input + HasBytesVec,
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
// {
// fn name(&self) -> &str {
// "RandGraphSuffixMutator"
// }
// }

View File

@ -1,157 +1,61 @@
use std::cell::UnsafeCell;
use std::io::Write;
use std::ops::Range; use std::ops::Range;
use hashbrown::HashMap;
use hashbrown::HashSet;
use libafl::prelude::ExitKind;
use libafl::prelude::UsesInput; use libafl::prelude::UsesInput;
use libafl_qemu::elf::EasyElf; use libafl_qemu::Emulator;
use libafl_qemu::read_user_reg_unchecked;
use libafl_qemu::GuestAddr; use libafl_qemu::GuestAddr;
use libafl_qemu::GuestPhysAddr;
use libafl_qemu::QemuHooks; use libafl_qemu::QemuHooks;
use libafl_qemu::Hook; use libafl_qemu::edges::QemuEdgesMapMetadata;
use libafl_qemu::helpers::{QemuHelper, QemuHelperTuple}; use libafl_qemu::emu;
use libafl_qemu::sys::TCGTemp; use libafl_qemu::hooks;
use libafl_qemu::qemu::MemAccessInfo;
use crate::systemstate::RawFreeRTOSSystemState; use crate::systemstate::RawFreeRTOSSystemState;
use crate::systemstate::CURRENT_SYSTEMSTATE_VEC; use crate::systemstate::CURRENT_SYSTEMSTATE_VEC;
use crate::systemstate::NUM_PRIOS; use crate::systemstate::NUM_PRIOS;
use super::freertos::void_ptr;
use super::freertos::TCB_t; use super::freertos::TCB_t;
use super::freertos::rtos_struct::List_Item_struct; use super::freertos::rtos_struct::List_Item_struct;
use super::freertos::rtos_struct::*; use super::freertos::rtos_struct::*;
use super::freertos; use super::freertos;
use super::CaptureEvent;
use libafl_qemu::{
helper::{QemuHelper, QemuHelperTuple},
// edges::SAVED_JUMP,
};
//============================= Struct definitions
//============================= API symbols pub static mut INTR_OFFSET : Option<u64> = None;
pub static mut INTR_DONE : bool = true;
pub const ISR_SYMBOLS : &'static [&'static str] = &[ // only used when inputs are injected
// ISRs pub static mut NEXT_INPUT : Vec<u8> = Vec::new();
"Reset_Handler","Default_Handler","Default_Handler2","Default_Handler3","Default_Handler4","Default_Handler5","Default_Handler6","vPortSVCHandler","xPortPendSVHandler","xPortSysTickHandler","ISR_0_Handler", "ISR_1_Handler", "ISR_2_Handler", "ISR_3_Handler", "ISR_4_Handler", "ISR_5_Handler", "ISR_6_Handler", "ISR_7_Handler", "ISR_8_Handler", "ISR_9_Handler", "ISR_10_Handler", "ISR_11_Handler", "ISR_12_Handler", "ISR_13_Handler"
];
pub const USR_ISR_SYMBOLS : &'static [&'static str] = &[
"ISR_0_Handler", "ISR_1_Handler", "ISR_2_Handler", "ISR_3_Handler", "ISR_4_Handler", "ISR_5_Handler", "ISR_6_Handler", "ISR_7_Handler", "ISR_8_Handler", "ISR_9_Handler", "ISR_10_Handler", "ISR_11_Handler", "ISR_12_Handler", "ISR_13_Handler"
];
/// Read ELF program headers to resolve physical load addresses.
fn virt2phys(vaddr: GuestPhysAddr, tab: &EasyElf) -> GuestPhysAddr {
let ret;
for i in &tab.goblin().program_headers {
if i.vm_range().contains(&vaddr.try_into().unwrap()) {
ret = vaddr - TryInto::<GuestPhysAddr>::try_into(i.p_vaddr).unwrap()
+ TryInto::<GuestPhysAddr>::try_into(i.p_paddr).unwrap();
return ret - (ret % 2);
}
}
return vaddr;
}
/// Lookup a symbol in the ELF file, optionally resolve segment offsets
pub fn load_symbol(elf : &EasyElf, symbol : &str, do_translation : bool) -> GuestAddr {
try_load_symbol(elf, symbol, do_translation).expect(&format!("Symbol {} not found", symbol))
}
pub fn try_load_symbol(elf : &EasyElf, symbol : &str, do_translation : bool) -> Option<GuestAddr> {
let ret = elf
.resolve_symbol(symbol, 0);
if do_translation {
Option::map_or(ret, None, |x| Some(virt2phys(x as GuestPhysAddr,&elf) as GuestAddr))
} else {ret}
}
/// Try looking up the address range of a function in the ELF file
pub fn get_function_range(elf: &EasyElf, symbol: &str) -> Option<std::ops::Range<GuestAddr>> {
let gob = elf.goblin();
let mut funcs : Vec<_> = gob.syms.iter().filter(|x| x.is_function()).collect();
funcs.sort_unstable_by(|x,y| x.st_value.cmp(&y.st_value));
for sym in &gob.syms {
if let Some(sym_name) = gob.strtab.get_at(sym.st_name) {
if sym_name == symbol {
if sym.st_value == 0 {
return None;
} else {
//#[cfg(cpu_target = "arm")]
// Required because of arm interworking addresses aka bit(0) for thumb mode
let addr = (sym.st_value as GuestAddr) & !(0x1 as GuestAddr);
//#[cfg(not(cpu_target = "arm"))]
//let addr = sym.st_value as GuestAddr;
// look for first function after addr
let sym_end = funcs.iter().find(|x| x.st_value > sym.st_value);
if let Some(sym_end) = sym_end {
// println!("{} {:#x}..{} {:#x}", gob.strtab.get_at(sym.st_name).unwrap_or(""),addr, gob.strtab.get_at(sym_end.st_name).unwrap_or(""),sym_end.st_value & !0x1);
return Some(addr..((sym_end.st_value & !0x1) as GuestAddr));
}
return None;
};
}
}
}
return None;
}
//============================= Qemu Helper //============================= Qemu Helper
/// A Qemu Helper with reads FreeRTOS specific structs from Qemu whenever certain syscalls occur, also inject inputs /// A Qemu Helper with reads FreeRTOS specific structs from Qemu whenever certain syscalls occur, also inject inputs
#[derive(Debug)] #[derive(Debug)]
pub struct QemuSystemStateHelper { pub struct QemuSystemStateHelper {
// Address of API functions kerneladdr: u32,
api_fn_addrs: HashMap<GuestAddr, String>, tcb_addr: u32,
api_fn_ranges: Vec<(String, std::ops::Range<GuestAddr>)>, ready_queues: u32,
// Address of interrupt routines input_counter: Option<u64>,
isr_addrs: HashMap<GuestAddr, String>, app_range: Range<u32>,
isr_ranges: Vec<(String, std::ops::Range<GuestAddr>)>,
input_mem: Range<GuestAddr>,
tcb_addr: GuestAddr,
ready_queues: GuestAddr,
delay_queue: GuestAddr,
delay_queue_overflow: GuestAddr,
scheduler_lock_addr: GuestAddr,
scheduler_running_addr: GuestAddr,
critical_addr: GuestAddr,
input_counter: Option<GuestAddr>,
app_range: Range<GuestAddr>,
job_done_addrs: GuestAddr,
} }
impl QemuSystemStateHelper { impl QemuSystemStateHelper {
#[must_use] #[must_use]
pub fn new( pub fn new(
api_fn_addrs: HashMap<GuestAddr, String>, kerneladdr: u32,
api_fn_ranges: Vec<(String, std::ops::Range<GuestAddr>)>, tcb_addr: u32,
isr_addrs: HashMap<GuestAddr, String>, ready_queues: u32,
isr_ranges: Vec<(String, std::ops::Range<GuestAddr>)>, input_counter: Option<u64>,
input_mem: Range<GuestAddr>, app_range: Range<u32>,
tcb_addr: GuestAddr,
ready_queues: GuestAddr,
delay_queue: GuestAddr,
delay_queue_overflow: GuestAddr,
scheduler_lock_addr: GuestAddr,
scheduler_running_addr: GuestAddr,
critical_addr: GuestAddr,
input_counter: Option<GuestAddr>,
app_range: Range<GuestAddr>,
job_done_addrs: GuestAddr,
) -> Self { ) -> Self {
QemuSystemStateHelper { QemuSystemStateHelper {
api_fn_addrs, kerneladdr,
api_fn_ranges,
isr_addrs,
isr_ranges,
input_mem,
tcb_addr: tcb_addr, tcb_addr: tcb_addr,
ready_queues: ready_queues, ready_queues: ready_queues,
delay_queue,
delay_queue_overflow,
scheduler_lock_addr,
scheduler_running_addr,
critical_addr,
input_counter: input_counter, input_counter: input_counter,
app_range, app_range,
job_done_addrs,
} }
} }
} }
@ -160,229 +64,117 @@ impl<S> QemuHelper<S> for QemuSystemStateHelper
where where
S: UsesInput, S: UsesInput,
{ {
fn first_exec<QT>(&self, _hooks: &QemuHooks<QT, S>) fn first_exec<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
where where
QT: QemuHelperTuple<S>, QT: QemuHelperTuple<S>,
{ {
// for wp in self.api_fn_addrs.keys() { _hooks.instruction(self.kerneladdr, exec_syscall_hook::<QT, S>, false);
// _hooks.instruction(*wp, Hook::Function(exec_syscall_hook::<QT, S>), false); #[cfg(feature = "trace_abbs")]
// } _hooks.jmps(Some(gen_jmp_is_syscall::<QT, S>), Some(trace_api_call::<QT, S>));
for wp in self.isr_addrs.keys() {
_hooks.instruction(*wp, Hook::Function(exec_isr_hook::<QT, S>), false);
}
_hooks.jmps(Hook::Function(gen_jmp_is_syscall::<QT, S>), Hook::Function(trace_jmp::<QT, S>));
#[cfg(feature = "trace_job_response_times")]
_hooks.instruction(self.job_done_addrs, Hook::Function(job_done_hook::<QT, S>), false);
#[cfg(feature = "trace_reads")]
_hooks.reads(Hook::Function(gen_read_is_input::<QT, S>), Hook::Empty,Hook::Empty,Hook::Empty,Hook::Empty,Hook::Function(trace_reads::<QT, S>));
unsafe { INPUT_MEM = self.input_mem.clone() };
} }
// TODO: refactor duplicate code // TODO: refactor duplicate code
fn pre_exec(&mut self, _emulator: libafl_qemu::Qemu, _input: &S::Input) { fn pre_exec(&mut self, _emulator: &Emulator, _input: &S::Input) {
unsafe { unsafe {
CURRENT_SYSTEMSTATE_VEC.clear(); CURRENT_SYSTEMSTATE_VEC.clear();
JOBS_DONE.clear(); let p = LAST_API_CALL.with(|x| x.get());
*p = None;
} }
} }
fn post_exec<OT>(&mut self, emulator: libafl_qemu::Qemu, _input: &S::Input, _observers: &mut OT, _exit_kind: &mut ExitKind) { fn post_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
trigger_collection(&emulator,(0, 0), CaptureEvent::End, self); trigger_collection(emulator, self)
unsafe {
let c = emulator.cpu_from_index(0);
let pc = c.read_reg::<i32, u32>(15).unwrap();
if CURRENT_SYSTEMSTATE_VEC.len() == 0 {return;}
CURRENT_SYSTEMSTATE_VEC[CURRENT_SYSTEMSTATE_VEC.len()-1].edge = (pc,0);
CURRENT_SYSTEMSTATE_VEC[CURRENT_SYSTEMSTATE_VEC.len()-1].capture_point = (CaptureEvent::End,"Breakpoint".to_string());
}
// Find the first ISREnd of vPortSVCHandler and drop anything before
unsafe {
let mut index = 0;
while index < CURRENT_SYSTEMSTATE_VEC.len() {
if CaptureEvent::ISREnd == CURRENT_SYSTEMSTATE_VEC[index].capture_point.0 && CURRENT_SYSTEMSTATE_VEC[index].capture_point.1 == "xPortPendSVHandler" {
break;
}
index += 1;
}
CURRENT_SYSTEMSTATE_VEC.drain(..index);
}
} }
} }
fn read_freertos_list(systemstate : &mut RawFreeRTOSSystemState, emulator: &libafl_qemu::Qemu, target: GuestAddr) -> (freertos::List_t, bool) {
let read : freertos::List_t = freertos::emu_lookup::lookup(emulator, target);
let listbytes : GuestAddr = GuestAddr::try_from(std::mem::size_of::<freertos::List_t>()).unwrap();
let mut next_index = read.pxIndex;
for _j in 0..read.uxNumberOfItems {
// always jump over the xListEnd marker
if (target..target+listbytes).contains(&next_index) {
let next_item : freertos::MiniListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
let new_next_index=next_item.pxNext;
systemstate.dumping_ground.insert(next_index,List_MiniItem_struct(next_item));
next_index = new_next_index;
}
let next_item : freertos::ListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
// println!("Item at {}: {:?}",next_index,next_item);
if next_item.pvContainer != target {
// the list is being modified, abort by setting the list empty
eprintln!("Warning: attempted to read a list that is being modified");
let mut read=read;
read.uxNumberOfItems = 0;
return (read, false);
}
// assert_eq!(next_item.pvContainer,target);
let new_next_index=next_item.pxNext;
let next_tcb : TCB_t= freertos::emu_lookup::lookup(emulator,next_item.pvOwner);
// println!("TCB at {}: {:?}",next_item.pvOwner,next_tcb);
systemstate.dumping_ground.insert(next_item.pvOwner,TCB_struct(next_tcb.clone()));
systemstate.dumping_ground.insert(next_index,List_Item_struct(next_item));
next_index=new_next_index;
}
// Handle edge case where the end marker was not included yet
if (target..target+listbytes).contains(&next_index) {
let next_item : freertos::MiniListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
systemstate.dumping_ground.insert(next_index,List_MiniItem_struct(next_item));
}
return (read, true);
}
#[inline] #[inline]
fn trigger_collection(emulator: &libafl_qemu::Qemu, edge: (GuestAddr, GuestAddr), event: CaptureEvent, h: &QemuSystemStateHelper) { fn trigger_collection(emulator: &Emulator, h: &QemuSystemStateHelper) {
let listbytes : GuestAddr = GuestAddr::try_from(std::mem::size_of::<freertos::List_t>()).unwrap(); let listbytes : u32 = u32::try_from(std::mem::size_of::<freertos::List_t>()).unwrap();
let mut systemstate = RawFreeRTOSSystemState::default(); let mut systemstate = RawFreeRTOSSystemState::default();
unsafe {
match event { // TODO: investigate why can_do_io is not set sometimes, as this is just a workaround
CaptureEvent::APIStart => { let c = emulator.cpu_from_index(0);
let s = h.api_fn_addrs.get(&edge.1).unwrap(); let can_do_io = (*c.raw_ptr()).can_do_io;
systemstate.capture_point=(CaptureEvent::APIStart, s.to_string()); (*c.raw_ptr()).can_do_io = 1;
}, systemstate.qemu_tick = emu::icount_get_raw();
CaptureEvent::APIEnd => { (*c.raw_ptr()).can_do_io = can_do_io;
let s = h.api_fn_addrs.get(&edge.0).unwrap();
systemstate.capture_point=(CaptureEvent::APIEnd, s.to_string());
},
CaptureEvent::ISRStart => {
let s = h.isr_addrs.get(&edge.1).unwrap();
systemstate.capture_point=(CaptureEvent::ISRStart, s.to_string());
},
CaptureEvent::ISREnd => {
let s = h.isr_addrs.get(&edge.0).unwrap();
systemstate.capture_point=(CaptureEvent::ISREnd, s.to_string());
},
CaptureEvent::End => {systemstate.capture_point=(CaptureEvent::End, "".to_string());},
CaptureEvent::Undefined => (),
} }
if systemstate.capture_point.0 == CaptureEvent::Undefined {
// println!("Not found: {:#x} {:#x}", edge.0.unwrap_or(0), edge.1.unwrap_or(0));
}
systemstate.edge = ((edge.0),(edge.1));
systemstate.qemu_tick = get_icount(emulator);
let mut buf : [u8; 4] = [0,0,0,0]; let mut buf : [u8; 4] = [0,0,0,0];
match h.input_counter { match h.input_counter {
Some(s) => unsafe { emulator.read_mem(s, &mut buf); }, Some(s) => unsafe { emulator.read_phys_mem(s, &mut buf); },
Option::None => (), None => (),
}; };
systemstate.input_counter = GuestAddr::from_le_bytes(buf); systemstate.input_counter = u32::from_le_bytes(buf);
let curr_tcb_addr : freertos::void_ptr = freertos::emu_lookup::lookup(emulator, h.tcb_addr); let curr_tcb_addr : freertos::void_ptr = freertos::emu_lookup::lookup(emulator, h.tcb_addr);
if curr_tcb_addr == 0 { if curr_tcb_addr == 0 {
return; return;
}; };
// println!("{:?}",std::str::from_utf8(&current_tcb.pcTaskName));
let critical : void_ptr = freertos::emu_lookup::lookup(emulator, h.critical_addr);
let suspended : void_ptr = freertos::emu_lookup::lookup(emulator, h.scheduler_lock_addr);
let _running : void_ptr = freertos::emu_lookup::lookup(emulator, h.scheduler_running_addr);
systemstate.current_tcb = freertos::emu_lookup::lookup(emulator,curr_tcb_addr); systemstate.current_tcb = freertos::emu_lookup::lookup(emulator,curr_tcb_addr);
// During ISRs it is only safe to extract structs if they are not currently being modified
if systemstate.capture_point.0==CaptureEvent::APIStart || systemstate.capture_point.0==CaptureEvent::APIEnd || (critical == 0 && suspended == 0 ) {
// Extract delay list
let mut target : GuestAddr = h.delay_queue;
target = freertos::emu_lookup::lookup(emulator, target);
let _temp = read_freertos_list(&mut systemstate, emulator, target);
systemstate.delay_list = _temp.0;
systemstate.read_invalid |= !_temp.1;
// Extract delay list overflow unsafe {
let mut target : GuestAddr = h.delay_queue_overflow; LAST_API_CALL.with(|x|
target = freertos::emu_lookup::lookup(emulator, target); match *x.get() {
let _temp = read_freertos_list(&mut systemstate, emulator, target); Some(s) => {
systemstate.delay_list_overflow = _temp.0; systemstate.last_pc = Some(s.0 as u64);
systemstate.read_invalid |= !_temp.1; },
None => (),
// Extract suspended tasks (infinite wait), seems broken, always appreas to be modified }
// let mut target : GuestAddr = h.suspended_queue; );
// target = freertos::emu_lookup::lookup(emulator, target);
// systemstate.suspended_list = read_freertos_list(&mut systemstate, emulator, target);
// Extract priority lists
for i in 0..NUM_PRIOS {
let target : GuestAddr = listbytes*GuestAddr::try_from(i).unwrap()+h.ready_queues;
let _temp = read_freertos_list(&mut systemstate, emulator, target);
systemstate.prio_ready_lists[i] = _temp.0;
systemstate.read_invalid |= !_temp.1;
}
} else {
systemstate.read_invalid = true;
} }
systemstate.mem_reads = unsafe { MEM_READ.take().unwrap_or_default() }; // println!("{:?}",std::str::from_utf8(&current_tcb.pcTaskName));
for i in 0..NUM_PRIOS {
let target : u32 = listbytes*u32::try_from(i).unwrap()+h.ready_queues;
systemstate.prio_ready_lists[i] = freertos::emu_lookup::lookup(emulator, target);
// println!("List at {}: {:?}",target, systemstate.prio_ready_lists[i]);
let mut next_index = systemstate.prio_ready_lists[i].pxIndex;
for _j in 0..systemstate.prio_ready_lists[i].uxNumberOfItems {
// always jump over the xListEnd marker
if (target..target+listbytes).contains(&next_index) {
let next_item : freertos::MiniListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
let new_next_index=next_item.pxNext;
systemstate.dumping_ground.insert(next_index,List_MiniItem_struct(next_item));
next_index = new_next_index;
}
let next_item : freertos::ListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
// println!("Item at {}: {:?}",next_index,next_item);
assert_eq!(next_item.pvContainer,target);
let new_next_index=next_item.pxNext;
let next_tcb : TCB_t= freertos::emu_lookup::lookup(emulator,next_item.pvOwner);
// println!("TCB at {}: {:?}",next_item.pvOwner,next_tcb);
systemstate.dumping_ground.insert(next_item.pvOwner,TCB_struct(next_tcb.clone()));
systemstate.dumping_ground.insert(next_index,List_Item_struct(next_item));
next_index=new_next_index;
}
// Handle edge case where the end marker was not included yet
if (target..target+listbytes).contains(&next_index) {
let next_item : freertos::MiniListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
systemstate.dumping_ground.insert(next_index,List_MiniItem_struct(next_item));
}
}
unsafe { CURRENT_SYSTEMSTATE_VEC.push(systemstate); } unsafe { CURRENT_SYSTEMSTATE_VEC.push(systemstate); }
} }
//============================= Trace job response times pub fn exec_syscall_hook<QT, S>(
hooks: &mut QemuHooks<'_, QT, S>,
pub static mut JOBS_DONE : Vec<(u64, String)> = vec![];
pub fn job_done_hook<QT, S>(
hooks: &mut QemuHooks<QT, S>,
_state: Option<&mut S>, _state: Option<&mut S>,
_pc: GuestAddr, _pc: u32,
) )
where where
S: UsesInput, S: UsesInput,
QT: QemuHelperTuple<S>, QT: QemuHelperTuple<S>,
{ {
let emulator = hooks.qemu(); let emulator = hooks.emulator();
let h = hooks.helpers().match_first_type::<QemuSystemStateHelper>().expect("QemuSystemHelper not found in helper tupel"); let h = hooks.helpers().match_first_type::<QemuSystemStateHelper>().expect("QemuSystemHelper not found in helper tupel");
let curr_tcb_addr : freertos::void_ptr = freertos::emu_lookup::lookup(emulator, h.tcb_addr); trigger_collection(emulator, h);
if curr_tcb_addr == 0 {
return;
};
let current_tcb : TCB_t = freertos::emu_lookup::lookup(emulator,curr_tcb_addr);
let tmp = unsafe {std::mem::transmute::<[i8; 10],[u8; 10]>(current_tcb.pcTaskName)};
let name : String = std::str::from_utf8(&tmp).expect("TCB name was not utf8").chars().filter(|x| *x != '\0').collect::<String>();
unsafe { JOBS_DONE.push((get_icount(emulator), name)); }
} }
//============================= Trace interrupt service routines thread_local!(static LAST_API_CALL : UnsafeCell<Option<(GuestAddr,GuestAddr)>> = UnsafeCell::new(None));
pub fn exec_isr_hook<QT, S>(
hooks: &mut QemuHooks<QT, S>,
_state: Option<&mut S>,
pc: GuestAddr,
)
where
S: UsesInput,
QT: QemuHelperTuple<S>,
{
let emulator = hooks.qemu();
let h = hooks.helpers().match_first_type::<QemuSystemStateHelper>().expect("QemuSystemHelper not found in helper tupel");
let src = read_rec_return_stackframe(emulator, 0xfffffffc);
trigger_collection(emulator, (src, pc), CaptureEvent::ISRStart, h);
// println!("Exec ISR Call {:#x} {:#x} {}", src, pc, get_icount(emulator));
}
//============================= Trace syscalls and returns
pub fn gen_jmp_is_syscall<QT, S>( pub fn gen_jmp_is_syscall<QT, S>(
hooks: &mut QemuHooks<QT, S>, hooks: &mut QemuHooks<'_, QT, S>,
_state: Option<&mut S>, _state: Option<&mut S>,
src: GuestAddr, src: GuestAddr,
dest: GuestAddr, dest: GuestAddr,
@ -392,152 +184,26 @@ where
QT: QemuHelperTuple<S>, QT: QemuHelperTuple<S>,
{ {
if let Some(h) = hooks.helpers().match_first_type::<QemuSystemStateHelper>() { if let Some(h) = hooks.helpers().match_first_type::<QemuSystemStateHelper>() {
if h.app_range.contains(&src) && !h.app_range.contains(&dest) && in_any_range(&h.isr_ranges,src).is_none() { if h.app_range.contains(&src) && !h.app_range.contains(&dest) {
if let Some(_) = in_any_range(&h.api_fn_ranges,dest) { // println!("New jmp {:x} {:x}", src, dest);
// println!("New jmp {:x} {:x}", src, dest);
// println!("API Call Edge {:x} {:x}", src, dest);
return Some(1);
// TODO: trigger collection right here
// otherwise there can be a race-condition, where LAST_API_CALL is set before the api starts, if the interrupt handler calls an api function, it will misidentify the callsite of that api call
}
} else if dest == 0 { // !h.app_range.contains(&src) &&
if let Some(_) = in_any_range(&h.api_fn_ranges, src) {
// println!("API Return Edge {:#x}", src);
return Some(2);
}
if let Some(_) = in_any_range(&h.isr_ranges, src) {
// println!("ISR Return Edge {:#x}", src);
return Some(3);
}
}
}
return None;
}
pub fn trace_jmp<QT, S>(
hooks: &mut QemuHooks<QT, S>,
_state: Option<&mut S>,
src: GuestAddr, mut dest: GuestAddr, id: u64
)
where
S: UsesInput,
QT: QemuHelperTuple<S>,
{
let h = hooks.helpers().match_first_type::<QemuSystemStateHelper>().expect("QemuSystemHelper not found in helper tupel");
let emulator = hooks.qemu();
if id == 1 { // API call
trigger_collection(emulator, (src, dest), CaptureEvent::APIStart, h);
// println!("Exec API Call {:#x} {:#x} {}", src, dest, get_icount(emulator));
} else if id == 2 { // API return
// Ignore returns to other APIs or ISRs. We only account for the first call depth of API calls from user space.
if in_any_range(&h.api_fn_ranges, dest).is_none() && in_any_range(&h.isr_ranges, dest).is_none() {
let mut edge = (0, 0);
edge.0=in_any_range(&h.api_fn_ranges, src).unwrap().start;
edge.1=dest;
trigger_collection(emulator, edge, CaptureEvent::APIEnd, h);
// println!("Exec API Return Edge {:#x} {:#x} {}", src, dest, get_icount(emulator));
}
} else if id == 3 { // ISR return
dest = read_rec_return_stackframe(emulator, dest);
let mut edge = (0, 0);
edge.0=in_any_range(&h.isr_ranges, src).unwrap().start;
edge.1=dest;
trigger_collection(emulator, edge, CaptureEvent::ISREnd, h);
// println!("Exec ISR Return Edge {:#x} {:#x} {}", src, dest, get_icount(emulator));
}
}
//============================= Read Hooks
#[allow(unused)]
pub fn gen_read_is_input<QT, S>(
hooks: &mut QemuHooks<QT, S>,
_state: Option<&mut S>,
pc: GuestAddr,
_addr: *mut TCGTemp,
_info: MemAccessInfo,
) -> Option<u64>
where
S: UsesInput,
QT: QemuHelperTuple<S>,
{
if let Some(h) = hooks.helpers().match_first_type::<QemuSystemStateHelper>() {
if h.app_range.contains(&pc) {
// println!("gen_read {:x}", pc);
return Some(1); return Some(1);
} }
} }
return None; return None;
} }
static mut INPUT_MEM : Range<GuestAddr> = 0..0; pub fn trace_api_call<QT, S>(
static mut MEM_READ : Option<Vec<(GuestAddr, u8)>> = None; _hooks: &mut QemuHooks<'_, QT, S>,
#[allow(unused)]
pub fn trace_reads<QT, S>(
hooks: &mut QemuHooks<QT, S>,
_state: Option<&mut S>, _state: Option<&mut S>,
_id: u64, src: GuestAddr, dest: GuestAddr, id: u64
addr: GuestAddr,
_size: usize
) )
where where
S: UsesInput, S: UsesInput,
QT: QemuHelperTuple<S>, QT: QemuHelperTuple<S>,
{ {
if unsafe { INPUT_MEM.contains(&addr) } {
let emulator = hooks.qemu();
let mut buf : [u8; 1] = [0];
unsafe {emulator.read_mem(addr, &mut buf);}
if unsafe { MEM_READ.is_none() } {
unsafe { MEM_READ = Some(Vec::from([(addr, buf[0])])) };
} else {
unsafe { MEM_READ.as_mut().unwrap().push((addr, buf[0])) };
}
// println!("exec_read {:x} {}", addr, size);
}
}
//============================= Utility functions
fn get_icount(emulator : &libafl_qemu::Qemu) -> u64 {
unsafe { unsafe {
// TODO: investigate why can_do_io is not set sometimes, as this is just a workaround let p = LAST_API_CALL.with(|x| x.get());
let c = emulator.cpu_from_index(0); *p = Some((src,dest));
let can_do_io = (*c.raw_ptr()).neg.can_do_io; // print!("*");
(*c.raw_ptr()).neg.can_do_io = true;
let r = libafl_qemu::sys::icount_get_raw();
(*c.raw_ptr()).neg.can_do_io = can_do_io;
r
} }
} }
fn read_rec_return_stackframe(emu : &libafl_qemu::Qemu, lr : GuestAddr) -> GuestAddr {
let lr_ = lr & u32::MAX-1;
if lr_ == 0xfffffffc || lr_ == 0xFFFFFFF8 || lr_ == 0xFFFFFFF0 {
unsafe {
// if 0xFFFFFFF0/1 0xFFFFFFF8/9 -> "main stack" MSP
let mut buf = [0u8; 4];
let sp : GuestAddr = if lr_ == 0xfffffffc || lr_ == 0xFFFFFFF0 { // PSP
read_user_reg_unchecked(emu) as u32
} else {
emu.read_reg(13).unwrap()
};
let ret_pc = sp+0x18; // https://developer.arm.com/documentation/dui0552/a/the-cortex-m3-processor/exception-model/exception-entry-and-return
emu.read_mem(ret_pc, buf.as_mut_slice());
return u32::from_le_bytes(buf);
// elseif 0xfffffffc/d
}} else {
return lr;
};
}
pub fn in_any_range<'a>(ranges: &'a Vec<(String, Range<u32>)>, addr : GuestAddr) -> Option<&'a std::ops::Range<GuestAddr>> {
for (_,r) in ranges {
if r.contains(&addr) {return Some(r);}
}
return None;
}

View File

@ -1,14 +1,11 @@
//! systemstate referes to the State of a FreeRTOS fuzzing target //! systemstate referes to the State of a FreeRTOS fuzzing target
use std::collections::hash_map::DefaultHasher; use std::collections::hash_map::DefaultHasher;
use std::fmt; use libafl::bolts::HasRefCnt;
use hashbrown::HashSet; use libafl::bolts::AsSlice;
use libafl_bolts::HasRefCnt;
use libafl_qemu::GuestAddr;
use std::hash::Hasher; use std::hash::Hasher;
use std::hash::Hash; use std::hash::Hash;
use hashbrown::HashMap; use hashbrown::HashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use itertools::Itertools;
use freertos::TCB_t; use freertos::TCB_t;
@ -16,85 +13,49 @@ pub mod freertos;
pub mod helpers; pub mod helpers;
pub mod observers; pub mod observers;
pub mod feedbacks; pub mod feedbacks;
pub mod graph;
pub mod schedulers; pub mod schedulers;
pub mod stg;
pub mod mutational; // #[cfg(feature = "fuzz_interrupt")]
pub mod report; // pub const IRQ_INPUT_BYTES_NUMBER : u32 = 2; // Offset for interrupt bytes
// #[cfg(not(feature = "fuzz_interrupt"))]
// pub const IRQ_INPUT_BYTES_NUMBER : u32 = 0; // Offset for interrupt bytes
// pub const IRQ_INPUT_OFFSET : u32 = 347780; // Tick offset for app code start
// Constants // Constants
const NUM_PRIOS: usize = 15; const NUM_PRIOS: usize = 5;
//============================= Struct definitions //============================= Struct definitions
#[derive(Debug, Default, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Hash)]
pub enum CaptureEvent {
APIStart, /// src,dst
APIEnd, /// src,dst
ISRStart, /// _,dst
ISREnd, /// src,_
End, /// src,_
#[default]
Undefined,
}
/*
Hierarchy of tracing data:
- RawFreeRTOSSystemState: Raw data from Qemu, represents a particular instant
- ReducedFreeRTOSSystemState: Generalized state of the system, without execution context
- ExecInterval: Some interval of execution between instants
- AtomicBasicBlock: A single-entry multiple-exit region between api calls. May be used referenced in multiple intervals.
- JobInstance: A single execution of a task, records the place and input read
- TaskJob: Generalized Job instance, records the worst inputs seen so far
*/
// ============================= State info
/// Raw info Dump from Qemu /// Raw info Dump from Qemu
#[derive(Debug, Default)] #[derive(Debug, Default, Serialize, Deserialize)]
pub struct RawFreeRTOSSystemState { pub struct RawFreeRTOSSystemState {
qemu_tick: u64, qemu_tick: u64,
current_tcb: TCB_t, current_tcb: TCB_t,
prio_ready_lists: [freertos::List_t; NUM_PRIOS], prio_ready_lists: [freertos::List_t; NUM_PRIOS],
delay_list: freertos::List_t,
delay_list_overflow: freertos::List_t,
dumping_ground: HashMap<u32,freertos::rtos_struct>, dumping_ground: HashMap<u32,freertos::rtos_struct>,
read_invalid: bool,
input_counter: u32, input_counter: u32,
edge: (GuestAddr,GuestAddr), last_pc: Option<u64>,
capture_point: (CaptureEvent,String),
mem_reads: Vec<(u32, u8)>
} }
/// List of system state dumps from QemuHelpers /// List of system state dumps from QemuHelpers
static mut CURRENT_SYSTEMSTATE_VEC: Vec<RawFreeRTOSSystemState> = vec![]; static mut CURRENT_SYSTEMSTATE_VEC: Vec<RawFreeRTOSSystemState> = vec![];
/// A reduced version of freertos::TCB_t /// A reduced version of freertos::TCB_t
#[derive(Debug, Default, Serialize, Deserialize, Clone)] #[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)]
pub struct RefinedTCB { pub struct RefinedTCB {
pub task_name: String, pub task_name: String,
pub priority: u32, pub priority: u32,
pub base_priority: u32, pub base_priority: u32,
mutexes_held: u32, mutexes_held: u32,
// notify_value: u32, notify_value: u32,
notify_state: u8, notify_state: u8,
} }
impl PartialEq for RefinedTCB {
fn eq(&self, other: &Self) -> bool {
let ret = self.task_name == other.task_name &&
self.priority == other.priority &&
self.base_priority == other.base_priority;
#[cfg(feature = "do_hash_notify_state")]
let ret = ret && self.notify_state == other.notify_state;
ret
}
}
impl Hash for RefinedTCB { impl Hash for RefinedTCB {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
self.task_name.hash(state); self.task_name.hash(state);
self.priority.hash(state); self.priority.hash(state);
self.mutexes_held.hash(state); self.mutexes_held.hash(state);
#[cfg(feature = "do_hash_notify_state")] #[cfg(not(feature = "no_hash_state"))]
self.notify_state.hash(state); self.notify_state.hash(state);
// self.notify_value.hash(state); // self.notify_value.hash(state);
} }
@ -110,7 +71,7 @@ impl RefinedTCB {
priority: input.uxPriority, priority: input.uxPriority,
base_priority: input.uxBasePriority, base_priority: input.uxBasePriority,
mutexes_held: input.uxMutexesHeld, mutexes_held: input.uxMutexesHeld,
// notify_value: input.ulNotifiedValue[0], notify_value: input.ulNotifiedValue[0],
notify_state: input.ucNotifyState[0], notify_state: input.ucNotifyState[0],
} }
} }
@ -124,379 +85,53 @@ impl RefinedTCB {
priority: input.uxPriority, priority: input.uxPriority,
base_priority: input.uxBasePriority, base_priority: input.uxBasePriority,
mutexes_held: input.uxMutexesHeld, mutexes_held: input.uxMutexesHeld,
// notify_value: input.ulNotifiedValue[0], notify_value: input.ulNotifiedValue[0],
notify_state: input.ucNotifyState[0], notify_state: input.ucNotifyState[0],
} }
} }
} }
} }
/// Reduced information about a systems state, without any execution context /// Refined information about the states an execution transitioned between
#[derive(Debug, Default, Serialize, Deserialize, Clone)] #[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct ReducedFreeRTOSSystemState { pub struct RefinedFreeRTOSSystemState {
// pub tick: u64, pub start_tick: u64,
pub end_tick: u64,
last_pc: Option<u64>,
input_counter: u32,
pub current_task: RefinedTCB, pub current_task: RefinedTCB,
ready_list_after: Vec<RefinedTCB>, ready_list_after: Vec<RefinedTCB>,
delay_list_after: Vec<RefinedTCB>,
read_invalid: bool,
// edge: (Option<GuestAddr>,Option<GuestAddr>),
// pub capture_point: (CaptureEvent,String),
// input_counter: u32
} }
impl PartialEq for ReducedFreeRTOSSystemState { impl PartialEq for RefinedFreeRTOSSystemState {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.current_task == other.current_task && self.ready_list_after == other.ready_list_after && self.current_task == other.current_task && self.ready_list_after == other.ready_list_after &&
self.delay_list_after == other.delay_list_after && self.read_invalid == other.read_invalid self.last_pc == other.last_pc
// && self.edge == other.edge
// && self.capture_point == other.capture_point
} }
} }
impl Hash for ReducedFreeRTOSSystemState { impl Hash for RefinedFreeRTOSSystemState {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
self.current_task.hash(state); self.current_task.hash(state);
self.ready_list_after.hash(state); self.ready_list_after.hash(state);
self.delay_list_after.hash(state); // self.last_pc.hash(state);
self.read_invalid.hash(state);
} }
} }
impl ReducedFreeRTOSSystemState { impl RefinedFreeRTOSSystemState {
// fn get_tick(&self) -> u64 { fn get_time(&self) -> u64 {
// self.tick self.end_tick-self.start_tick
// }
pub fn print_lists(&self) -> String {
let mut ret = String::from("+");
for j in self.ready_list_after.iter() {
ret.push_str(format!(" {}", j.task_name).as_str());
}
ret.push_str("\n-");
for j in self.delay_list_after.iter() {
ret.push_str(format!(" {}", j.task_name).as_str());
}
ret
}
pub fn get_hash(&self) -> u64 {
let mut h = DefaultHasher::new();
self.hash(&mut h);
h.finish()
} }
} }
impl fmt::Display for ReducedFreeRTOSSystemState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let ready = self.ready_list_after.iter().map(|x| x.task_name.clone()).collect::<Vec<_>>().join(" ");
let delay = self.delay_list_after.iter().map(|x| x.task_name.clone()).collect::<Vec<_>>().join(" ");
write!(f, "Valid: {} | Current: {} | Ready: {} | Delay: {}", u32::from(!self.read_invalid), self.current_task.task_name, ready, delay)
}
}
// ============================= Interval info
// #[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)]
// pub enum ExecLevel {
// APP = 0,
// API = 1,
// ISR = 2,
// }
#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)]
pub struct ExecInterval {
pub start_tick: u64,
pub end_tick: u64,
pub start_state: u64,
pub end_state: u64,
pub start_capture: (CaptureEvent, String),
pub end_capture: (CaptureEvent, String),
pub level: u8,
tick_spend_preempted: u64,
pub abb: Option<AtomicBasicBlock>
}
impl ExecInterval {
pub fn get_exec_time(&self) -> u64 {
self.end_tick-self.start_tick-self.tick_spend_preempted
}
pub fn is_valid(&self) -> bool {
self.start_tick != 0 || self.end_tick != 0
}
pub fn invaildate(&mut self) {
self.start_tick = 0;
self.end_tick = 0;
}
/// Attach this interval to the later one, keep a record of the time spend preempted
pub fn try_unite_with_later_interval(&mut self, later_interval : &mut Self) -> bool {
if self.end_state!=later_interval.start_state || self.abb!=later_interval.abb || !self.is_valid() || !later_interval.is_valid() {
return false;
}
// assert_eq!(self.end_state, later_interval.start_state);
// assert_eq!(self.abb, later_interval.abb);
later_interval.tick_spend_preempted += self.tick_spend_preempted + (later_interval.start_tick-self.end_tick);
later_interval.start_tick = self.start_tick;
later_interval.start_state = self.start_state;
self.invaildate();
return true;
}
pub fn get_hash_index(&self) -> (u64, u64) {
return (self.start_state, self.abb.as_ref().expect("ABB not set").get_hash())
}
pub fn get_task_name(&self) -> Option<String> {
self.abb.as_ref().map(|x| x.instance_name.clone()).flatten()
}
pub fn get_task_name_unchecked(&self) -> String {
self.get_task_name().unwrap_or_else(|| "unknown".to_string())
}
pub fn is_abb_end(&self) -> bool {
match self.end_capture.0 {
CaptureEvent::APIStart | CaptureEvent::APIEnd | CaptureEvent::ISREnd | CaptureEvent::End => true,
_ => false
}
}
}
// ============================= Atomic Basic Block
/// A single-entry multiple-exit region between api calls. May be used referenced in multiple intervals.
#[derive(Default, Serialize, Deserialize, Clone)]
pub struct AtomicBasicBlock {
start: GuestAddr,
ends: HashSet<GuestAddr>,
level: u8,
instance_id: usize,
instance_name: Option<String>,
}
impl PartialEq for AtomicBasicBlock {
fn eq(&self, other: &Self) -> bool {
self.start == other.start && self.ends == other.ends && self.level == other.level && self.instance_name == other.instance_name
}
}
impl Eq for AtomicBasicBlock {}
impl Hash for AtomicBasicBlock {
fn hash<H: Hasher>(&self, state: &mut H) {
// Use a combination of the start address and the set of ending addresses to compute the hash value
self.start.hash(state);
let mut keys : Vec<_> = self.ends.iter().collect();
keys.sort();
self.level.hash(state);
self.instance_name.hash(state);
keys.hash(state);
}
}
impl fmt::Display for AtomicBasicBlock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut ends_str = String::new();
for end in &self.ends {
ends_str.push_str(&format!("0x{:#x}, ", end));
}
write!(f, "ABB {} {{ level: {}, start: 0x{:#x}, ends: [{}]}}", &self.instance_name.as_ref().unwrap_or(&"".to_string()), self.level, self.start, ends_str.trim().trim_matches(','))
}
}
impl fmt::Debug for AtomicBasicBlock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut ends_str = String::new();
for end in &self.ends {
ends_str.push_str(&format!("{:#x}, ", end));
}
write!(f, "ABB {} {{ level: {}, start: 0x{:#x}, ends: [{}]}}", &self.instance_name.as_ref().unwrap_or(&"".to_string()), self.level, self.start, ends_str.trim().trim_matches(','))
}
}
impl PartialOrd for AtomicBasicBlock {
fn partial_cmp(&self, other: &AtomicBasicBlock) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for AtomicBasicBlock {
fn cmp(&self, other: &AtomicBasicBlock) -> std::cmp::Ordering {
if self.start.cmp(&other.start) == std::cmp::Ordering::Equal {
if self.level.cmp(&other.level) != std::cmp::Ordering::Equal {
return self.level.cmp(&other.level);
}
// If the start addresses are equal, compare by 'ends'
let end1 = if self.ends.len() == 1 { *self.ends.iter().next().unwrap() as u64 } else {
let mut temp = self.ends.iter().collect::<Vec<_>>().into_iter().collect::<Vec<&GuestAddr>>();
temp.sort_unstable();
let mut h = DefaultHasher::new();
temp.hash(&mut h);
h.finish()
};
let end2 = if other.ends.len() == 1 { *self.ends.iter().next().unwrap() as u64 } else {
let mut temp = other.ends.iter().collect::<Vec<_>>().into_iter().collect::<Vec<&GuestAddr>>();
temp.sort_unstable();
let mut h = DefaultHasher::new();
temp.hash(&mut h);
h.finish()
};
end1.cmp(&end2)
} else {
// If the start addresses are not equal, compare by 'start'
self.start.cmp(&other.start)
}
}
}
impl AtomicBasicBlock {
pub fn get_hash(&self) -> u64 {
let mut s = DefaultHasher::new();
self.hash(&mut s);
s.finish()
}
pub fn instance_eq(&self, other: &Self) -> bool {
self == other && self.instance_id == other.instance_id
}
}
fn get_task_names(trace: &Vec<ReducedFreeRTOSSystemState>) -> HashSet<String> {
let mut ret: HashSet<_, _> = HashSet::new();
for state in trace {
ret.insert(state.current_task.task_name.to_string());
}
ret
}
libafl_bolts::impl_serdeany!(AtomicBasicBlock);
// ============================= Job instances
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct JobInstance {
pub name: String,
pub mem_reads: Vec<(u32, u8)>,
pub release: u64,
pub response: u64,
pub exec_ticks: u64,
pub ticks_per_abb: Vec<u64>,
pub abbs: Vec<AtomicBasicBlock>,
hash_cache: u64
}
impl PartialEq for JobInstance {
fn eq(&self, other: &Self) -> bool {
self.abbs == other.abbs
}
}
impl Eq for JobInstance {}
impl Hash for JobInstance {
fn hash<H: Hasher>(&self, state: &mut H) {
self.abbs.hash(state);
}
}
impl JobInstance {
pub fn get_hash(&mut self) -> u64 {
if self.hash_cache == 0 {
let mut s = DefaultHasher::new();
self.hash(&mut s);
self.hash_cache = s.finish();
}
self.hash_cache
}
pub fn get_hash_cached(&self) -> u64 {
if self.hash_cache == 0 {
let mut s = DefaultHasher::new();
self.hash(&mut s);
s.finish()
} else {
self.hash_cache
}
}
}
// ============================= Generalized job instances
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct TaskJob {
pub name: String,
pub worst_bytes: Vec<u8>,
pub woet_ticks: u64,
pub woet_per_abb: Vec<u64>,
pub abbs: Vec<AtomicBasicBlock>,
hash_cache: u64
}
impl PartialEq for TaskJob {
fn eq(&self, other: &Self) -> bool {
self.abbs == other.abbs
}
}
impl Eq for TaskJob {}
impl Hash for TaskJob {
fn hash<H: Hasher>(&self, state: &mut H) {
self.abbs.hash(state);
}
}
impl TaskJob {
pub fn get_hash(&mut self) -> u64 {
if self.hash_cache == 0 {
let mut s = DefaultHasher::new();
self.hash(&mut s);
self.hash_cache = s.finish();
}
self.hash_cache
}
pub fn get_hash_cached(&self) -> u64 {
if self.hash_cache == 0 {
let mut s = DefaultHasher::new();
self.hash(&mut s);
s.finish()
} else {
self.hash_cache
}
}
pub fn try_update(&mut self, other: &JobInstance) -> bool {
assert_eq!(self.get_hash(), other.get_hash_cached());
let mut ret = false;
if other.exec_ticks > self.woet_ticks {
self.woet_ticks = other.exec_ticks;
self.woet_per_abb = other.ticks_per_abb.clone();
self.worst_bytes = other.mem_reads.iter().sorted_by(|a,b| a.0.cmp(&b.0)).map(|x| x.1).collect();
ret = true;
}
ret
}
pub fn from_instance(input: &JobInstance) -> Self {
let c = input.get_hash_cached();
Self {
name: input.name.clone(),
worst_bytes: input.mem_reads.iter().map(|x| x.1.clone()).collect(),
woet_ticks: input.exec_ticks,
woet_per_abb: input.ticks_per_abb.clone(),
abbs: input.abbs.clone(),
hash_cache: c
}
}
pub fn map_bytes_onto(&self, input: &JobInstance, offset: Option<u32>) -> Vec<(u32,u8)> {
if input.mem_reads.len() == 0 {return vec![];}
let ret = input.mem_reads.iter().take(self.worst_bytes.len()).enumerate().filter_map(|(idx,(addr,oldbyte))| if self.worst_bytes[idx]!=*oldbyte {Some((*addr-offset.unwrap_or_default(), self.worst_bytes[idx]))} else {None}).collect();
// eprintln!("Mapped: {:?}", ret);
ret
}
}
// ============================= Per testcase metadata
// Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata // Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata
#[derive(Debug, Default, Serialize, Deserialize, Clone)] #[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct FreeRTOSSystemStateMetadata { pub struct FreeRTOSSystemStateMetadata {
pub inner: Vec<ReducedFreeRTOSSystemState>, pub inner: Vec<RefinedFreeRTOSSystemState>,
// TODO: Add abbs and memory reads
trace_length: usize, trace_length: usize,
indices: Vec<usize>, // Hashed enumeration of States indices: Vec<usize>, // Hashed enumeration of States
tcref: isize, tcref: isize,
} }
impl FreeRTOSSystemStateMetadata { impl FreeRTOSSystemStateMetadata {
pub fn new(inner: Vec<ReducedFreeRTOSSystemState>) -> Self{ pub fn new(inner: Vec<RefinedFreeRTOSSystemState>) -> Self{
let tmp = inner.iter().enumerate().map(|x| compute_hash(x) as usize).collect(); let tmp = inner.iter().enumerate().map(|x| compute_hash(x) as usize).collect();
Self {trace_length: inner.len(), inner: inner, indices: tmp, tcref: 0} Self {trace_length: inner.len(), inner: inner, indices: tmp, tcref: 0}
} }
@ -510,14 +145,14 @@ where
s.finish() s.finish()
} }
// impl AsSlice for FreeRTOSSystemStateMetadata { impl AsSlice for FreeRTOSSystemStateMetadata {
// /// Convert the slice of system-states to a slice of hashes over enumerated states /// Convert the slice of system-states to a slice of hashes over enumerated states
// fn as_slice(&self) -> &[usize] { fn as_slice(&self) -> &[usize] {
// self.indices.as_slice() self.indices.as_slice()
// } }
// type Entry = usize; type Entry = usize;
// } }
impl HasRefCnt for FreeRTOSSystemStateMetadata { impl HasRefCnt for FreeRTOSSystemStateMetadata {
fn refcnt(&self) -> isize { fn refcnt(&self) -> isize {
@ -529,4 +164,4 @@ impl HasRefCnt for FreeRTOSSystemStateMetadata {
} }
} }
libafl_bolts::impl_serdeany!(FreeRTOSSystemStateMetadata); libafl::impl_serdeany!(FreeRTOSSystemStateMetadata);

View File

@ -1,599 +0,0 @@
//| The [`MutationalStage`] is the default stage used during fuzzing.
//! For the current input, it will perform a range of random mutations, and then run them in the executor.
use core::marker::PhantomData;
use std::cmp::{max, min};
use hashbrown::HashMap;
use libafl_bolts::{rands::{
random_seed, Rand, StdRand
}, Named};
use libafl::{
common::{HasMetadata, HasNamedMetadata}, corpus::{self, Corpus}, events::{Event, EventFirer, EventProcessor, LogSeverity}, fuzzer::Evaluator, inputs::{HasMutatorBytes, HasTargetBytes, Input, MultipartInput}, mark_feature_time, prelude::{new_hash_feedback, AggregatorOps, CorpusId, MutationResult, Mutator, UserStats, UserStatsValue, UsesInput}, stages::Stage, start_timer, state::{HasCorpus, HasRand, MaybeHasClientPerfMonitor, UsesState}, Error
};
use libafl::prelude::State;
use petgraph::{graph::NodeIndex, graph::{self, DiGraph}};
use crate::{time::clock::{IcHist, QEMU_ISNS_PER_USEC}, fuzzer::{DO_NUM_INTERRUPT, FIRST_INT, MAX_NUM_INTERRUPT}, systemstate::{stg::{STGFeedbackState, STGNodeMetadata}, CaptureEvent, ExecInterval, FreeRTOSSystemStateMetadata, ReducedFreeRTOSSystemState}};
use libafl::state::HasCurrentTestcase;
use std::borrow::Cow;
use simple_moving_average::SMA;
use super::{stg::{STGEdge, STGNode}, JobInstance};
// pub static mut MINIMUM_INTER_ARRIVAL_TIME : u32 = 1000 /*us*/ * QEMU_ISNS_PER_USEC;
// one isn per 2**4 ns
// virtual insn/sec 62500000 = 1/16 GHz
// 1ms = 62500 insn
// 1us = 62.5 insn
pub fn input_bytes_to_interrupt_times(buf: &[u8], config: (usize,u32)) -> Vec<u32> {
let len = buf.len();
let mut start_tick;
let mut ret = Vec::with_capacity(min(DO_NUM_INTERRUPT, len/4));
for i in 0..DO_NUM_INTERRUPT {
let mut buf4b : [u8; 4] = [0,0,0,0];
if len >= (i+1)*4 {
for j in 0usize..4usize {
buf4b[j]=buf[i*4+j];
}
start_tick = u32::from_le_bytes(buf4b);
if start_tick < FIRST_INT {start_tick=0;}
ret.push(start_tick);
} else {break;}
}
ret.sort_unstable();
// obey the minimum inter arrival time while maintaining the sort
for i in 0..ret.len() {
if ret[i]==0 {continue;}
for j in i+1..ret.len()-1 {
if ret[j]-ret[i] < config.1 as u32 * QEMU_ISNS_PER_USEC {
// ret[j] = u32::saturating_add(ret[i],config.1 * QEMU_ISNS_PER_USEC);
ret[j] = 0; // remove the interrupt
ret.sort_unstable();
} else {break;}
}
}
ret
}
pub fn interrupt_times_to_input_bytes(interrupt_times: &[u32]) -> Vec<u8> {
let mut ret = Vec::with_capacity(interrupt_times.len()*4);
for i in interrupt_times {
ret.extend(u32::to_le_bytes(*i));
}
ret
}
//======================= Custom mutator
fn is_interrupt_handler(graph: &DiGraph<STGNode, STGEdge>, node: NodeIndex) -> bool {
graph.edges_directed(node as NodeIndex, petgraph::Direction::Incoming).any(|x| x.weight().event == CaptureEvent::ISRStart)
}
fn has_interrupt_handler_non_systick(graph: &DiGraph<STGNode, STGEdge>, node: NodeIndex) -> bool {
graph.edges_directed(node as NodeIndex, petgraph::Direction::Outgoing).any(|x| x.weight().event == CaptureEvent::ISRStart && x.weight().name!="xPortSysTickHandler")
}
fn is_candidate_for_new_branches(graph: &DiGraph<STGNode, STGEdge>, node: NodeIndex) -> bool {
!has_interrupt_handler_non_systick(graph, node) && !is_interrupt_handler(graph, node)
}
// TODO: this can be much more efficient, if the graph stored snapshots of the state and input progress was tracked
/// Determines if a given node in the state transition graph (STG) is a candidate for introducing new branches.
pub fn try_force_new_branches(interrupt_ticks : &[u32], fbs: &STGFeedbackState, meta: &STGNodeMetadata, config: (usize, u32)) -> Option<Vec<u32>> {
let mut new = false;
let mut new_interrupt_times = Vec::new();
for (num,&interrupt_time) in interrupt_ticks.iter().enumerate() {
let lower_bound = if num==0 {FIRST_INT} else {interrupt_ticks[num-1].saturating_add(config.1 * QEMU_ISNS_PER_USEC)};
let next = if interrupt_ticks.len()>num+1 {interrupt_ticks[num+1]} else {u32::MAX};
for exec_interval in meta.intervals().iter().filter(|x| x.start_tick >= lower_bound as u64 && x.start_tick < next as u64) {
if !(exec_interval.start_capture.0==CaptureEvent::ISRStart) { // shortcut to skip interrupt handers without node lookup
let node_index = fbs.state_abb_hash_index.get(&exec_interval.get_hash_index()).unwrap();
if !has_interrupt_handler_non_systick(&fbs.graph, node_index.clone()) {
let new_time = exec_interval.start_tick.saturating_add((exec_interval.end_tick+exec_interval.start_tick)/4);
new_interrupt_times.push(new_time.try_into().expect("ticks > u32"));
if (new_time + config.1 as u64) < next as u64 { // the new interrupt is not too close to the next one
new_interrupt_times.extend(interrupt_ticks.iter().skip(num).cloned());
} else { // the new interrupt is too close to the next one, skip the next one
new_interrupt_times.extend(interrupt_ticks.iter().skip(num+1).cloned());
}
new=true;
break;
}
}
}
if new {break;}
new_interrupt_times.push(interrupt_time);
}
if new {Some(new_interrupt_times)} else {None}
}
/// The default mutational stage
#[derive(Clone, Debug)]
pub struct InterruptShiftStage<E, EM, Z> {
#[allow(clippy::type_complexity)]
phantom: PhantomData<(E, EM, Z)>,
interrup_config: Vec<(usize,u32)>,
success: simple_moving_average::SingleSumSMA<f32, f32, 50>
}
impl<E, EM, Z> InterruptShiftStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: MaybeHasClientPerfMonitor + HasCorpus + HasRand,
{
pub fn new(config : &Vec<(usize,u32)>) -> Self {
Self { phantom: PhantomData, interrup_config: config.clone(), success: simple_moving_average::SingleSumSMA::from_zero(1.0) }
}
}
static mut num_stage_execs : u64 = 0;
static mut sum_reruns : u64 = 0;
static mut sum_interesting_reruns : u64 = 0;
impl<E, EM, Z, I> InterruptShiftStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
EM: EventFirer,
Z: Evaluator<E, EM>,
Z::State: MaybeHasClientPerfMonitor + HasCorpus + HasRand + HasMetadata + HasNamedMetadata,
<Z::State as UsesInput>::Input: Input,
Z::State: UsesInput<Input = MultipartInput<I>>,
I: HasMutatorBytes + Default
{
fn report_stats(&self, state: &mut <InterruptShiftStage<E, EM, Z> as UsesState>::State, manager: &mut EM) {
unsafe {
let _ = manager.fire(
state,
Event::UpdateUserStats {
name: Cow::from("InterruptShiftStage"),
value: UserStats::new(
UserStatsValue::String(Cow::from(format!("{} -> {}/{} {:.1}% ", num_stage_execs, sum_interesting_reruns, sum_reruns, sum_interesting_reruns as f32 * 100.0 / sum_reruns as f32))),
AggregatorOps::None,
),
phantom: PhantomData,
},
);
}
}
}
impl<E, EM, Z, I> Stage<E, EM, Z> for InterruptShiftStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
EM: EventFirer,
Z: Evaluator<E, EM>,
Z::State: MaybeHasClientPerfMonitor + HasCorpus + HasRand + HasMetadata + HasNamedMetadata,
<Z::State as UsesInput>::Input: Input,
Z::State: UsesInput<Input = MultipartInput<I>>,
I: HasMutatorBytes + Default
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut Self::State,
manager: &mut EM
) -> Result<(), Error> {
if self.interrup_config.len() == 0 {return Ok(());} // configuration implies no interrupts
let mut myrand = StdRand::new();
myrand.set_seed(state.rand_mut().next());
unsafe {num_stage_execs+=1;}
let mut rerun_count = 0; // count how many times we rerun the executor
let mut interesting_rerun_count = 0; // count how many reruns were interesting
// Try many times to find a mutation that is not already in the corpus
let loopbound = max(1, (self.success.get_average()*100.0) as usize);
for _ in 0..loopbound {
// Choose which isr to mutate
let interrup_config = match myrand.choose(&self.interrup_config) {
Some(s) => s,
Option::None => {
self.report_stats(state, manager);
return Ok(())
}
};
let name = format!("isr_{}_times", interrup_config.0);
// manager.log(state, LogSeverity::Info, format!("Mutation {}/{}", loopbound, loopcount))?;
let curr_case = state.current_testcase()?;
let curr_input = curr_case.input().as_ref().unwrap();
let mut new_input = curr_input.clone();
let new_interrupt_part : &mut I = if new_input.parts_by_name(&name).next().is_some() {
new_input.parts_by_name_mut(&name).next().unwrap()
} else {
new_input.add_part(String::from(&name), I::default()); new_input.parts_by_name_mut(&name).next().unwrap()
}.1;
let old_interrupt_times = input_bytes_to_interrupt_times(new_interrupt_part.bytes(), *interrup_config);
let mut new_interrupt_times = Vec::with_capacity(MAX_NUM_INTERRUPT);
let mut do_rerun = false;
// if state.rand_mut().between(1, 100) <= 50 // only attempt the mutation half of the time
{
#[cfg(feature = "mutate_stg")]
{
let metadata = state.metadata_map();
let maxtick = {metadata.get::<IcHist>().unwrap().1.0};
drop(new_interrupt_part.drain(..).collect::<Vec<u8>>());
{
let choice = myrand.between(1,100);
if choice <= 25 || *old_interrupt_times.get(0).unwrap_or(&u32::MAX) as u64 > maxtick { // 0.5*0.25 = 12.5% of the time fully randomize all interrupts
do_rerun = true;
let hist = metadata.get::<IcHist>().unwrap();
let maxtick : u64 = hist.1.0;
// let maxtick : u64 = (_input.exec_time().expect("No duration found").as_nanos() >> 4).try_into().unwrap();
for _ in 0..myrand.between(0,min(MAX_NUM_INTERRUPT, (maxtick as usize * 3) / (interrup_config.1 as usize * QEMU_ISNS_PER_USEC as usize * 2))) {
new_interrupt_times.push(myrand.between(0, min(maxtick, u32::MAX as u64) as usize).try_into().expect("ticks > u32"));
}
}
else if choice <= 75 { // 0.5 * 0.25 = 12.5% of cases
let feedbackstate = match state
.named_metadata_map()
.get::<STGFeedbackState>("stgfeedbackstate") {
Some(s) => s,
Option::None => {
panic!("STGfeedbackstate not visible")
}
};
if let Some(meta) = curr_case.metadata_map().get::<STGNodeMetadata>() {
if let Some(t) = try_force_new_branches(&old_interrupt_times, feedbackstate, meta, *interrup_config) {
do_rerun = true;
new_interrupt_times=t;
}
}
// let tmp = current_case.metadata_map().get::<STGNodeMetadata>();
// if tmp.is_some() {
// let trace = tmp.expect("STGNodeMetadata not found");
// let mut node_indices = vec![];
// for i in (0..trace.intervals.len()).into_iter() {
// if let Some(abb) = &trace.intervals[i].abb {
// if let Some(idx) = feedbackstate.state_abb_hash_index.get(&(trace.intervals[i].start_state,abb.get_hash())) {
// node_indices.push(Some(idx));
// continue;
// }
// }
// node_indices.push(None);
// }
// // let mut marks : HashMap<u32, usize>= HashMap::new(); // interrupt -> block hit
// // for i in 0..trace.intervals.len() {
// // let curr = &trace.intervals[i];
// // let m = interrupt_offsets[0..num_interrupts].iter().filter(|x| (curr.start_tick..curr.end_tick).contains(&((**x) as u64)));
// // for k in m {
// // marks.insert(*k,i);
// // }
// // }
// // walk backwards trough the trace and try moving the interrupt to a block that does not have an outgoing interrupt edge or ist already hit by a predecessor
// for i in (0..num_interrupts).rev() {
// let mut lb = FIRST_INT;
// let mut ub : u32 = trace.intervals[trace.intervals.len()-1].end_tick.try_into().expect("ticks > u32");
// if i > 0 {
// lb = u32::saturating_add(interrupt_offsets[i-1],unsafe{MINIMUM_INTER_ARRIVAL_TIME});
// }
// if i < num_interrupts-1 {
// ub = u32::saturating_sub(interrupt_offsets[i+1],unsafe{MINIMUM_INTER_ARRIVAL_TIME});
// }
// let alternatives : Vec<_> = (0..trace.intervals.len()).filter(|x|
// node_indices[*x].is_some() &&
// (trace.intervals[*x].start_tick < (lb as u64) && (lb as u64) < trace.intervals[*x].end_tick
// || trace.intervals[*x].start_tick > (lb as u64) && trace.intervals[*x].start_tick < (ub as u64))
// ).collect();
// let not_yet_hit : Vec<_> = alternatives.iter().filter(
// |x| feedbackstate.graph.edges_directed(*node_indices[**x].unwrap(), petgraph::Direction::Outgoing).any(|y| y.weight().event != CaptureEvent::ISRStart)).collect();
// if not_yet_hit.len() > 0 {
// let replacement = &trace.intervals[*myrand.choose(not_yet_hit).unwrap()];
// interrupt_offsets[i] = (myrand.between(replacement.start_tick as usize,
// replacement.end_tick as usize)).try_into().expect("ticks > u32");
// // println!("chose new alternative, i: {} {} -> {}",i,tmp, interrupt_offsets[i]);
// do_rerun = true;
// break;
// }
// }
// }
}
else { // old version of the alternative search
new_interrupt_times = old_interrupt_times.clone();
let tmp = curr_case.metadata_map().get::<STGNodeMetadata>();
if tmp.is_some() {
let trace = tmp.expect("STGNodeMetadata not found");
// calculate hits and identify snippets
let mut last_m = false;
let mut marks : Vec<(&ExecInterval, usize, usize)>= vec![]; // 1: got interrupted, 2: interrupt handler
for i in 0..trace.intervals().len() {
let curr = &trace.intervals()[i];
let m = old_interrupt_times.iter().any(|x| (curr.start_tick..curr.end_tick).contains(&(*x as u64)));
if m {
marks.push((curr, i, 1));
// println!("1: {}",curr.current_task.0.task_name);
} else if last_m {
marks.push((curr, i, 2));
// println!("2: {}",curr.current_task.0.task_name);
} else {
marks.push((curr, i, 0));
}
last_m = m;
}
for i in 0..old_interrupt_times.len() {
// bounds based on minimum inter-arrival time
let mut lb = FIRST_INT;
let mut ub : u32 = trace.intervals()[trace.intervals().len()-1].end_tick.try_into().expect("ticks > u32");
if i > 0 {
// use the new times, because changes to preceding timings are not accounted for yet
lb = u32::saturating_add(new_interrupt_times[i-1], interrup_config.1 * QEMU_ISNS_PER_USEC);
}
if i < old_interrupt_times.len()-1 {
ub = u32::saturating_sub(new_interrupt_times[i+1], interrup_config.1 * QEMU_ISNS_PER_USEC);
}
// get old hit and handler
let old_hit = marks.iter().filter(
|x| x.0.start_tick < (old_interrupt_times[i] as u64) && (old_interrupt_times[i] as u64) < x.0.end_tick
).next();
let old_handler = match old_hit {
Some(s) => if s.1 < old_interrupt_times.len()-1 && s.1 < marks.len()-1 {
Some(marks[s.1+1])
} else {None},
None => None
};
// find reachable alternatives
let alternatives : Vec<_> = marks.iter().filter(|x|
x.2 != 2 &&
(
x.0.start_tick < (lb as u64) && (lb as u64) < x.0.end_tick
|| x.0.start_tick > (lb as u64) && x.0.start_tick < (ub as u64))
).collect();
// in cases there are no alternatives
if alternatives.len() == 0 {
if old_hit.is_none() {
// choose something random
let untouched : Vec<_> = marks.iter().filter(
|x| x.2 == 0
).collect();
if untouched.len() > 0 {
let tmp = old_interrupt_times[i];
let choice = myrand.choose(untouched).unwrap();
new_interrupt_times[i] = myrand.between(choice.0.start_tick as usize, choice.0.end_tick as usize)
.try_into().expect("tick > u32");
do_rerun = true;
}
// println!("no alternatives, choose random i: {} {} -> {}",i,tmp,interrupt_offsets[i]);
continue;
} else {
// do nothing
// println!("no alternatives, do nothing i: {} {}",i,interrupt_offsets[i]);
continue;
}
}
let replacement = myrand.choose(alternatives).unwrap();
if (old_hit.map_or(false, |x| x == replacement)) {
// use the old value
// println!("chose old value, do nothing i: {} {}",i,interrupt_offsets[i]);
continue;
} else {
let extra = if (old_hit.map_or(false, |x| x.1 < replacement.1)) {
// move futher back, respect old_handler
old_handler.map_or(0, |x| x.0.end_tick - x.0.start_tick)
} else { 0 };
// let tmp = new_interrupt_times[i];
new_interrupt_times[i] = (myrand.between(replacement.0.start_tick as usize,
replacement.0.end_tick as usize) + extra as usize).try_into().expect("ticks > u32");
// println!("chose new alternative, i: {} {} -> {}",i,tmp, interrupt_offsets[i]);
do_rerun = true;
}
}
// println!("Mutator: {:?}", numbers);
// let mut start : u32 = 0;
// for i in 0..numbers.len() {
// let tmp = numbers[i];
// numbers[i] = numbers[i]-start;
// start = tmp;
// }
new_interrupt_part.extend(&interrupt_times_to_input_bytes(&new_interrupt_times));
}
}
}
}
#[cfg(not(feature = "trace_stg"))]
{
if myrand.between(1,100) <= 25 { // we have no hint if interrupt times will change anything
do_rerun = true;
let metadata = state.metadata_map();
let maxtick = {metadata.get::<IcHist>().unwrap().1.0};
new_interrupt_times = Vec::with_capacity(MAX_NUM_INTERRUPT);
for i in 0..myrand.between(0,min(MAX_NUM_INTERRUPT, (maxtick as usize * 3) / (interrup_config.1 as usize * QEMU_ISNS_PER_USEC as usize * 2))) {
new_interrupt_times.push(myrand.between(0, min(maxtick, u32::MAX as u64) as usize).try_into().expect("ticks > u32"));
}
}
}
new_interrupt_part.extend(&interrupt_times_to_input_bytes(&new_interrupt_times));
}
drop(curr_case);
if do_rerun {
rerun_count+=1;
let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, new_input)?;
if corpus_idx.is_some() { unsafe{interesting_rerun_count+=1;}} else
if corpus_idx.is_none() && loopbound<=0 { break;}
} else {if loopbound<=0 {break;}}
}
unsafe {
sum_reruns+=rerun_count;
sum_interesting_reruns+=interesting_rerun_count;
if rerun_count>0 {self.success.add_sample(interesting_rerun_count as f32 / rerun_count as f32);}
}
self.report_stats(state, manager);
Ok(())
}
fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result<bool, Error> {
Ok(true)
}
fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
Ok(())
}
}
impl<E, EM, Z> UsesState for InterruptShiftStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: MaybeHasClientPerfMonitor + HasCorpus + HasRand,
{
type State = Z::State;
}
pub fn try_worst_snippets(bytes : &[u8], fbs: &STGFeedbackState, meta: &STGNodeMetadata) -> Option<Vec<u8>> {
let mut new = false;
let mut ret = Vec::new();
for (num,interval) in meta.intervals().iter().enumerate() {
todo!();
}
if new {Some(ret)} else {None}
}
static mut num_snippet_stage_execs : u64 = 0;
static mut num_snippet_rerun : u64 = 0;
static mut num_snippet_success : u64 = 0;
/// The default mutational stage
#[derive(Clone, Debug, Default)]
pub struct STGSnippetStage<E, EM, Z> {
#[allow(clippy::type_complexity)]
phantom: PhantomData<(E, EM, Z)>,
input_addr: u32
}
impl<E, EM, Z> STGSnippetStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: MaybeHasClientPerfMonitor + HasCorpus + HasRand,
{
pub fn new(input_addr: u32) -> Self {
Self { phantom: PhantomData, input_addr }
}
}
impl<E, EM, Z, I> STGSnippetStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
EM: EventFirer,
Z: Evaluator<E, EM>,
Z::State: MaybeHasClientPerfMonitor + HasCorpus + HasRand + HasMetadata + HasNamedMetadata,
<Z::State as UsesInput>::Input: Input,
Z::State: UsesInput<Input = MultipartInput<I>>,
I: HasMutatorBytes + Default
{
fn report_stats(&self, state: &mut <STGSnippetStage<E, EM, Z> as UsesState>::State, manager: &mut EM) {
unsafe {
let _ = manager.fire(
state,
Event::UpdateUserStats {
name: Cow::from("STGSnippetStage"),
value: UserStats::new(
UserStatsValue::String(Cow::from(format!("{} -> {}/{} {:.1}% ", num_snippet_stage_execs, num_snippet_success, num_snippet_rerun, num_snippet_success as f32 * 100.0 / num_snippet_rerun as f32))),
AggregatorOps::None,
),
phantom: PhantomData,
},
);
}
}
}
impl<E, EM, Z, I> Stage<E, EM, Z> for STGSnippetStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
EM: EventFirer,
Z: Evaluator<E, EM>,
Z::State: MaybeHasClientPerfMonitor + HasCorpus + HasRand + HasMetadata + HasNamedMetadata,
<Z::State as UsesInput>::Input: Input,
Z::State: UsesInput<Input = MultipartInput<I>>,
I: HasMutatorBytes + Default
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut Self::State,
manager: &mut EM
) -> Result<(), Error> {
let mut myrand = StdRand::new();
myrand.set_seed(state.rand_mut().next());
let mut do_rerun = false;
let current_case = state.current_testcase()?;
let old_input = current_case.input().as_ref().unwrap();
let mut new_input = old_input.clone();
let new_bytes = new_input.parts_by_name_mut("bytes").next().expect("bytes not found in multipart input").1.bytes_mut();
// dbg!(current_case.metadata_map());
// eprintln!("Run mutator {}", current_case.metadata_map().get::<STGNodeMetadata>().is_some());
if let Some(meta) = current_case.metadata_map().get::<STGNodeMetadata>() {
let feedbackstate = match state
.named_metadata_map()
.get::<STGFeedbackState>("stgfeedbackstate") {
Some(s) => s,
Option::None => {
panic!("STGfeedbackstate not visible")
}
};
// Maximize all snippets
// dbg!(meta.jobs().len());
for jobinst in meta.jobs().iter() {
match feedbackstate.worst_task_jobs.get(&jobinst.get_hash_cached()) {
Some(worst) => {
let new = worst.map_bytes_onto(jobinst, Some(self.input_addr));
do_rerun |= new.len() > 0;
for (addr, byte) in new {
if (addr as usize) < new_bytes.len() {
new_bytes[addr as usize] = byte;
}
}
},
Option::None => {}
}
}
}
drop(current_case);
unsafe {num_snippet_stage_execs+=1;}
if do_rerun {
unsafe {num_snippet_rerun+=1;}
let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, new_input)?;
if corpus_idx.is_some() { unsafe{num_snippet_success+=1};}
}
self.report_stats(state, manager);
Ok(())
}
fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result<bool, Error> {
Ok(true)
}
fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
Ok(())
}
}
impl<E, EM, Z> UsesState for STGSnippetStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: MaybeHasClientPerfMonitor + HasCorpus + HasRand,
{
type State = Z::State;
}

View File

@ -1,58 +1,38 @@
use libafl::prelude::ExitKind; // use crate::systemstate::IRQ_INPUT_BYTES_NUMBER;
use libafl::prelude::UsesInput; use libafl::prelude::{ExitKind, AsSlice};
use libafl::HasMetadata; use libafl::{inputs::HasTargetBytes, prelude::UsesInput};
use libafl_bolts::HasLen; use libafl::bolts::HasLen;
use libafl_bolts::Named; use libafl::bolts::tuples::Named;
use libafl::Error; use libafl::Error;
use libafl::observers::Observer; use libafl::observers::Observer;
use hashbrown::HashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use hashbrown::{HashMap, HashSet};
use crate::systemstate::CaptureEvent;
use crate::time::clock::IcHist;
use crate::time::clock::FUZZ_START_TIMESTAMP;
use std::time::SystemTime;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::borrow::Cow;
use super::helpers::USR_ISR_SYMBOLS;
use super::JobInstance;
use super::{ AtomicBasicBlock, ExecInterval};
use super::{ use super::{
CURRENT_SYSTEMSTATE_VEC, CURRENT_SYSTEMSTATE_VEC,
RawFreeRTOSSystemState, RawFreeRTOSSystemState,
RefinedTCB, RefinedTCB,
ReducedFreeRTOSSystemState, RefinedFreeRTOSSystemState,
freertos::{List_t, TCB_t, rtos_struct, rtos_struct::*}, freertos::{List_t, TCB_t, rtos_struct, rtos_struct::*},
helpers::JOBS_DONE,
}; };
//============================= Observer //============================= Observer
/// The Qemusystemstate Observer retrieves the systemstate /// The Qemusystemstate Observer retrieves the systemstate
/// that will get updated by the target. /// that will get updated by the target.
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug, Default)]
#[allow(clippy::unsafe_derive_deserialize)] #[allow(clippy::unsafe_derive_deserialize)]
pub struct QemuSystemStateObserver<I> pub struct QemuSystemStateObserver
{ {
pub last_run: Vec<ReducedFreeRTOSSystemState>, pub last_run: Vec<RefinedFreeRTOSSystemState>,
pub last_states: HashMap<u64, ReducedFreeRTOSSystemState>, pub last_input: Vec<u8>,
pub last_trace: Vec<ExecInterval>, name: String,
pub last_reads: Vec<Vec<(u32, u8)>>,
pub last_input: I,
pub job_instances: Vec<JobInstance>,
pub do_report: bool,
pub worst_job_instances: HashMap<String, JobInstance>,
pub select_task: Option<String>,
pub success: bool,
name: Cow<'static, str>,
} }
impl<S> Observer<S> for QemuSystemStateObserver<S::Input> impl<S> Observer<S> for QemuSystemStateObserver
where where
S: UsesInput + HasMetadata, S: UsesInput,
S::Input: Default S::Input : HasTargetBytes,
{ {
#[inline] #[inline]
fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
@ -62,91 +42,21 @@ where
#[inline] #[inline]
fn post_exec(&mut self, _state: &mut S, _input: &S::Input, _exit_kind: &ExitKind) -> Result<(), Error> { fn post_exec(&mut self, _state: &mut S, _input: &S::Input, _exit_kind: &ExitKind) -> Result<(), Error> {
// unsafe {self.last_run = invalidate_ineffective_isr(refine_system_states(&mut CURRENT_SYSTEMSTATE_VEC));} unsafe {self.last_run = refine_system_states(&mut CURRENT_SYSTEMSTATE_VEC);}
unsafe { self.last_input=_input.target_bytes().as_slice().to_owned();
let temp = refine_system_states(CURRENT_SYSTEMSTATE_VEC.split_off(0));
// fix_broken_trace(&mut temp.1);
self.last_run = temp.0.clone();
// println!("{:?}",temp);
let temp = states2intervals(temp.0, temp.1);
self.last_trace = temp.0;
self.last_reads = temp.1;
self.last_states = temp.2;
self.success = temp.3;
#[cfg(feature="trace_job_response_times")]
{
let metadata =_state.metadata_map_mut();
let releases = get_releases(&self.last_trace, &self.last_states);
// println!("Releases: {:?}",&releases);
let jobs_done = JOBS_DONE.split_off(0);
let (job_instances, do_report) = get_release_response_pairs(&releases, &jobs_done);
self.do_report = do_report;
let job_instances = job_instances.into_iter().map(|x| {
let intervals = self.last_trace.iter().enumerate().filter(|y| y.1.start_tick <= x.1 && y.1.end_tick >= x.0 && x.2 == y.1.get_task_name_unchecked()).map(|(idx,x)| (x, &self.last_reads[idx])).collect::<Vec<_>>();
let (abbs, rest) : (Vec<_>, Vec<_>) = intervals.chunk_by(|a,b| a.0.abb.as_ref().unwrap().instance_eq(b.0.abb.as_ref().unwrap())).into_iter().map(|intervals| (intervals[0].0.abb.as_ref().unwrap().clone(), (intervals.iter().fold(0, |sum, x| sum+x.0.get_exec_time()), intervals.iter().fold(Vec::new(), |mut sum, x| {sum.extend(x.1.iter()); sum})))).unzip();
let (ticks_per_abb, mem_reads) : (Vec<_>, Vec<_>) = rest.into_iter().unzip();
JobInstance {
name: x.2.clone(),
mem_reads: mem_reads.into_iter().flatten().collect(), // TODO: add read values
release: x.0,
response: x.1,
exec_ticks: ticks_per_abb.iter().sum(),
ticks_per_abb: ticks_per_abb,
abbs: abbs,
hash_cache: 0
}
}).collect::<Vec<_>>();
// println!("Instances: {:?}",&job_instances);
self.job_instances = job_instances;
let observer = &self;
let mut worst_case_per_task : HashMap<String, JobInstance> = HashMap::new();
observer.job_instances.iter().for_each(|x| {
if worst_case_per_task.get(&x.name).is_some() {
let old = worst_case_per_task.get_mut(&x.name).unwrap();
if x.exec_ticks > old.exec_ticks {
old.exec_ticks=x.exec_ticks;
}
} else {
worst_case_per_task.insert(x.name.clone(), x.clone());
}
});
self.worst_job_instances = worst_case_per_task;
// copy-paste form clock observer
{
let hist = metadata.get_mut::<IcHist>();
let timestamp = SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis();
match hist {
Option::None => {
metadata.insert(IcHist(vec![(self.last_runtime(), timestamp)],
(self.last_runtime(), timestamp)));
}
Some(v) => {
v.0.push((self.last_runtime(), timestamp));
if v.1.0 < self.last_runtime() {
v.1 = (self.last_runtime(), timestamp);
}
}
}
}
}
}
// let abbs = extract_abbs_from_trace(&self.last_run);
// println!("{:?}",abbs);
// let abbs = trace_to_state_abb(&self.last_run);
// println!("{:?}",abbs);
self.last_input=_input.clone();
Ok(()) Ok(())
} }
} }
impl<I> Named for QemuSystemStateObserver<I> impl Named for QemuSystemStateObserver
{ {
fn name(&self) -> &Cow<'static, str> { #[inline]
&self.name fn name(&self) -> &str {
self.name.as_str()
} }
} }
impl<I> HasLen for QemuSystemStateObserver<I> impl HasLen for QemuSystemStateObserver
{ {
#[inline] #[inline]
fn len(&self) -> usize { fn len(&self) -> usize {
@ -154,20 +64,11 @@ impl<I> HasLen for QemuSystemStateObserver<I>
} }
} }
impl<I> QemuSystemStateObserver<I> impl QemuSystemStateObserver {
where I: Default { pub fn new() -> Self {
pub fn new(select_task: &Option<String>) -> Self { Self{last_run: vec![], last_input: vec![], name: "systemstate".to_string()}
Self{last_run: vec![], last_trace: vec![], last_reads: vec![], last_input: I::default(), worst_job_instances: HashMap::new(), do_report: false, select_task: select_task.clone(), name: Cow::from("systemstate".to_string()), last_states: HashMap::new(), success: false, job_instances: vec![]}
}
pub fn last_runtime(&self) -> u64 {
self.select_task.as_ref().map(|x| self.worst_job_instances.get(x).map(|y| y.response-y.release).unwrap_or(0).clone()).unwrap_or(unsafe{libafl_qemu::sys::icount_get_raw()})
}
}
impl<I> Default for QemuSystemStateObserver<I>
where I: Default {
fn default() -> Self {
Self::new(&None)
} }
} }
//============================= Parsing helpers //============================= Parsing helpers
@ -208,481 +109,25 @@ fn tcb_list_to_vec_cached(list: List_t, dump: &mut HashMap<u32,rtos_struct>) ->
ret.push(last_tcb); ret.push(last_tcb);
ret ret
} }
/// Drains a List of raw SystemStates to produce a refined trace /// Drains a List of raw SystemStates to produce a refined trace
/// returns: fn refine_system_states(input: &mut Vec<RawFreeRTOSSystemState>) -> Vec<RefinedFreeRTOSSystemState> {
/// - a Vec of ReducedFreeRTOSSystemStates let mut ret = Vec::<RefinedFreeRTOSSystemState>::new();
/// - a Vec of metadata tuples (qemu_tick, capture_event, capture_name, edge, mem_reads) let mut start_tick : u64 = 0;
fn refine_system_states(mut input: Vec<RawFreeRTOSSystemState>) -> (Vec<ReducedFreeRTOSSystemState>, Vec<(u64, CaptureEvent, String, (u32, u32), Vec<(u32, u8)>)>) { for mut i in input.drain(..) {
let mut ret = (Vec::<_>::new(), Vec::<_>::new()); let mut collector = Vec::<RefinedTCB>::new();
for mut i in input.drain(..) { for j in i.prio_ready_lists.into_iter().rev() {
let cur = RefinedTCB::from_tcb_owned(i.current_tcb); let mut tmp = tcb_list_to_vec_cached(j,&mut i.dumping_ground).iter().map(|x| RefinedTCB::from_tcb(x)).collect();
// println!("Refine: {} {:?} {:?} {:x}-{:x}", cur.task_name, i.capture_point.0, i.capture_point.1.to_string(), i.edge.0, i.edge.1); collector.append(&mut tmp);
// collect ready list
let mut collector = Vec::<RefinedTCB>::new();
for j in i.prio_ready_lists.into_iter().rev() {
let mut tmp = tcb_list_to_vec_cached(j,&mut i.dumping_ground).iter().map(|x| RefinedTCB::from_tcb(x)).collect();
collector.append(&mut tmp);
}
// collect delay list
let mut delay_list : Vec::<RefinedTCB> = tcb_list_to_vec_cached(i.delay_list, &mut i.dumping_ground).iter().map(|x| RefinedTCB::from_tcb(x)).collect();
let mut delay_list_overflow : Vec::<RefinedTCB> = tcb_list_to_vec_cached(i.delay_list_overflow, &mut i.dumping_ground).iter().map(|x| RefinedTCB::from_tcb(x)).collect();
delay_list.append(&mut delay_list_overflow);
delay_list.sort_by(|a,b| a.task_name.cmp(&b.task_name));
ret.0.push(ReducedFreeRTOSSystemState {
current_task: cur,
ready_list_after: collector,
delay_list_after: delay_list,
read_invalid: i.read_invalid,
// input_counter: i.input_counter,//+IRQ_INPUT_BYTES_NUMBER,
});
ret.1.push((i.qemu_tick, i.capture_point.0, i.capture_point.1.to_string(), i.edge, i.mem_reads));
} }
return ret; ret.push(RefinedFreeRTOSSystemState {
current_task: RefinedTCB::from_tcb_owned(i.current_tcb),
start_tick: start_tick,
end_tick: i.qemu_tick,
ready_list_after: collector,
input_counter: i.input_counter,//+IRQ_INPUT_BYTES_NUMBER,
last_pc: i.last_pc,
});
start_tick=i.qemu_tick;
} }
return ret;
// Find all task release times.
fn get_releases(trace: &Vec<ExecInterval>, states: &HashMap<u64, ReducedFreeRTOSSystemState>) -> Vec<(u64, String)> {
let mut ret = Vec::new();
let mut initial_released = false;
for (_n, i) in trace.iter().enumerate() {
// The first release starts from xPortPendSVHandler
if !initial_released && i.start_capture.0 == CaptureEvent::ISREnd && i.start_capture.1 == "xPortPendSVHandler" {
let start_state = states.get(&i.start_state).expect("State not found");
initial_released = true;
start_state.ready_list_after.iter().for_each(|x| {
ret.push((i.start_tick, x.task_name.clone()));
});
continue;
}
// A timed release is SysTickHandler isr block that moves a task from the delay list to the ready list.
if i.start_capture.0 == CaptureEvent::ISRStart && ( i.start_capture.1 == "xPortSysTickHandler" || USR_ISR_SYMBOLS.contains(&i.start_capture.1.as_str()) ) {
// detect race-conditions, get start and end state from the nearest valid intervals
if states.get(&i.start_state).map(|x| x.read_invalid).unwrap_or(true) {
let mut start_index=None;
for n in 1.._n {
if let Some(interval_start) = trace.get(_n-n) {
let start_state = states.get(&interval_start.start_state).unwrap();
if !start_state.read_invalid {
start_index = Some(_n-n);
break;
}
} else {break;}
};
let mut end_index=None;
for n in (_n+1)..trace.len() {
if let Some(interval_end) = trace.get(n) {
let end_state = states.get(&interval_end.end_state).unwrap();
if !end_state.read_invalid {
end_index = Some(n);
break;
}
} else {break;}
};
if let Some(Some(start_state)) = start_index.map(|x| states.get(&trace[x].start_state)) {
if let Some(Some(end_state)) = end_index.map(|x| states.get(&trace[x].end_state)) {
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name && x.task_name != start_state.current_task.task_name && !start_state.ready_list_after.iter().any(|y| x.task_name == y.task_name) {
ret.push((i.end_tick, x.task_name.clone()));
}
});
}
}
} else
// canonical case, userspace -> isr -> userspace
if i.end_capture.0 == CaptureEvent::ISREnd {
let start_state = states.get(&i.start_state).expect("State not found");
let end_state = states.get(&i.end_state).expect("State not found");
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name && x.task_name != start_state.current_task.task_name && !start_state.ready_list_after.iter().any(|y| x.task_name == y.task_name) {
ret.push((i.end_tick, x.task_name.clone()));
}
});
// start_state.delay_list_after.iter().for_each(|x| {
// if !end_state.delay_list_after.iter().any(|y| x.task_name == y.task_name) {
// ret.push((i.end_tick, x.task_name.clone()));
// }
// });
} else if i.end_capture.0 == CaptureEvent::ISRStart {
// Nested interrupts. Fast-forward to the end of the original interrupt, or the first valid state thereafter
// TODO: this may cause the same release to be registered multiple times
let mut isr_has_ended = false;
let start_state = states.get(&i.start_state).expect("State not found");
for n in (_n+1)..trace.len() {
if let Some(interval_end) = trace.get(n) {
if interval_end.end_capture.1 == i.start_capture.1 || isr_has_ended {
let end_state = states.get(&interval_end.end_state).unwrap();
isr_has_ended = true;
if !end_state.read_invalid {
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name && x.task_name != start_state.current_task.task_name && !start_state.ready_list_after.iter().any(|y| x.task_name == y.task_name) {
ret.push((i.end_tick, x.task_name.clone()));
}
});
break;
}
}
} else {break;}
};
// if let Some(interval_end) = trace.get(_n+2) {
// if interval_end.start_capture.0 == CaptureEvent::ISREnd && interval_end.end_capture.0 == CaptureEvent::ISREnd && interval_end.end_capture.1 == i.start_capture.1 {
// let start_state = states.get(&i.start_state).expect("State not found");
// let end_state = states.get(&interval_end.end_state).expect("State not found");
// end_state.ready_list_after.iter().for_each(|x| {
// if x.task_name != end_state.current_task.task_name && x.task_name != start_state.current_task.task_name && !start_state.ready_list_after.iter().any(|y| x.task_name == y.task_name) {
// ret.push((i.end_tick, x.task_name.clone()));
// }
// });
// }
// }
}
}
// Release driven by an API call. This produces a lot of false positives, as a job may block multiple times per instance. Despite this, aperiodic jobs not be modeled otherwise. If we assume the first release is the real one, we can filter out the rest.
if i.start_capture.0 == CaptureEvent::APIStart {
let api_start_state = states.get(&i.start_state).expect("State not found");
let api_end_state = {
let mut end_index = _n;
for n in (_n)..trace.len() {
if trace[n].end_capture.0 == CaptureEvent::APIEnd || trace[n].end_capture.0 == CaptureEvent::End {
end_index = n;
break;
} else if n > _n && trace[n].level == 0 { // API Start -> ISR Start+End -> APP Continue
end_index = n-1; // any return to a regular app block is a fair point of comparison for the ready list, because scheduling has been performed
break;
}
};
states.get(&trace[end_index].end_state).expect("State not found")
};
api_end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != api_start_state.current_task.task_name && !api_start_state.ready_list_after.iter().any(|y| x.task_name == y.task_name) {
ret.push((i.end_tick, x.task_name.clone()));
// eprintln!("Task {} released by API call at {:.1}ms", x.task_name, crate::time::clock::tick_to_time(i.end_tick).as_micros() as f32/1000.0);
}
});
}
}
ret
} }
fn get_release_response_pairs(rel: &Vec<(u64, String)>, resp: &Vec<(u64, String)>) -> (Vec<(u64, u64, String)>, bool) {
let mut maybe_error = false;
let mut ret = Vec::new();
let mut ready : HashMap<&String, u64> = HashMap::new();
let mut last_response : HashMap<&String, u64> = HashMap::new();
let mut r = rel.iter().peekable();
let mut d = resp.iter().peekable();
loop {
while let Some(peek_rel) = r.peek() {
// Fill releases as soon as possible
if !ready.contains_key(&peek_rel.1) {
ready.insert(&peek_rel.1, peek_rel.0);
r.next();
} else {
if let Some(peek_resp) = d.peek() {
if peek_resp.0 > peek_rel.0 { // multiple releases before response
// It is unclear which release is real
// maybe_error = true;
// eprintln!("Task {} released multiple times before response ({:.1}ms and {:.1}ms)", peek_rel.1, crate::time::clock::tick_to_time(ready[&peek_rel.1]).as_micros()/1000, crate::time::clock::tick_to_time(peek_rel.0).as_micros()/1000);
// ready.insert(&peek_rel.1, peek_rel.0);
r.next();
} else {
// releases have overtaken responses, wait until the ready list clears up a bit
break;
}
} else {
// no more responses
break;
}
}
}
if let Some(next_resp) = d.next() {
if ready.contains_key(&next_resp.1) {
if ready[&next_resp.1] >= next_resp.0 {
if let Some(lr) = last_response.get(&next_resp.1) {
if u128::abs_diff(crate::time::clock::tick_to_time(next_resp.0).as_micros(), crate::time::clock::tick_to_time(*lr).as_micros()) > 500 { // tolerate pending notifications for 500us
maybe_error = true;
// eprintln!("Task {} response at {:.1}ms before next release at {:.1}ms. Fallback to last response at {:.1}ms.", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(ready[&next_resp.1]).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(*lr).as_micros() as f32/1000.0);
}
// Sometimes a task is released immediately after a response. This might not be detected.
// Assume that the release occured with the last response
ret.push((*lr, next_resp.0, next_resp.1.clone()));
last_response.insert(&next_resp.1, next_resp.0);
} else {
maybe_error = true;
// eprintln!("Task {} released after response", next_resp.1);
}
} else {
// assert!(peek_resp.0 >= ready[&peek_resp.1]);
last_response.insert(&next_resp.1, next_resp.0);
ret.push((ready[&next_resp.1], next_resp.0, next_resp.1.clone()));
ready.remove(&next_resp.1);
}
} else {
if let Some(lr) = last_response.get(&next_resp.1) {
if u128::abs_diff(crate::time::clock::tick_to_time(next_resp.0).as_micros(), crate::time::clock::tick_to_time(*lr).as_micros()) > 1000 { // tolerate pending notifications for 1ms
// maybe_error = true;
// eprintln!("Task {} response at {:.1}ms not found in ready list. Fallback to last response at {:.1}ms.", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(*lr).as_micros() as f32/1000.0);
}
// Sometimes a task is released immediately after a response (e.g. pending notification). This might not be detected.
// Assume that the release occured with the last response
ret.push((*lr, next_resp.0, next_resp.1.clone()));
last_response.insert(&next_resp.1, next_resp.0);
} else {
maybe_error = true;
// eprintln!("Task {} response at {:.1}ms not found in ready list", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0);
}
}
} else {
// TODO: should remaining released tasks be counted as finished?
return (ret,maybe_error);
}
}
}
/// Transform the states and metadata into a list of ExecIntervals, along with a HashMap of states, a list of HashSets marking memory reads and a bool indicating success
/// returns:
/// - a Vec of ExecIntervals
/// - a Vec of HashSets marking memory reads during these intervals
/// - a HashMap of ReducedFreeRTOSSystemStates by hash
/// - a bool indicating success
fn states2intervals(trace: Vec<ReducedFreeRTOSSystemState>, meta: Vec<(u64, CaptureEvent, String, (u32, u32), Vec<(u32, u8)>)>) -> (Vec<ExecInterval>, Vec<Vec<(u32, u8)>>, HashMap<u64, ReducedFreeRTOSSystemState>, bool) {
if trace.len() == 0 {return (Vec::new(), Vec::new(), HashMap::new(), true);}
let mut isr_stack : VecDeque<u8> = VecDeque::from([]); // 2+ = ISR, 1 = systemcall, 0 = APP. Trace starts with an ISREnd and executes the app
let mut level_of_task : HashMap<&str, u8> = HashMap::new();
let mut ret: Vec<ExecInterval> = vec![];
let mut reads: Vec<Vec<(u32, u8)>> = vec![];
let mut edges: Vec<(u32, u32)> = vec![];
let mut last_hash : u64 = trace[0].get_hash();
let mut table : HashMap<u64, ReducedFreeRTOSSystemState> = HashMap::new();
table.insert(last_hash, trace[0].clone());
for i in 0..trace.len()-1 {
let curr_name = trace[i].current_task.task_name.as_str();
// let mut interval_name = curr_name; // Name of the interval, either the task name or the isr/api funtion name
let level = match meta[i].1 {
CaptureEvent::APIEnd => { // API end always exits towards the app
if !level_of_task.contains_key(curr_name) {
level_of_task.insert(curr_name, 0);
}
*level_of_task.get_mut(curr_name).unwrap()=0;
0
},
CaptureEvent::APIStart => { // API start can only be called in the app
if !level_of_task.contains_key(curr_name) { // Should not happen, apps start from an ISR End. Some input exibited this behavior for unknown reasons
level_of_task.insert(curr_name, 0);
}
*level_of_task.get_mut(curr_name).unwrap()=1;
// interval_name = &meta[i].2;
1
},
CaptureEvent::ISREnd => {
// special case where the next block is an app start
if !level_of_task.contains_key(curr_name) {
level_of_task.insert(curr_name, 0);
}
// nested isr, TODO: Test level > 2
if isr_stack.len() > 1 {
// interval_name = ""; // We can't know which isr is running
isr_stack.pop_back().unwrap();
*isr_stack.back().unwrap()
} else {
isr_stack.pop_back();
// possibly go back to an api call that is still running for this task
if level_of_task.get(curr_name).unwrap() == &1 {
// interval_name = ""; // We can't know which api is running
}
*level_of_task.get(curr_name).unwrap()
}
},
CaptureEvent::ISRStart => {
// special case for isrs which do not capture their end
// if meta[i].2 == "ISR_0_Handler" {
// &2
// } else {
// regular case
// interval_name = &meta[i].2;
if isr_stack.len() > 0 {
let l = *isr_stack.back().unwrap();
isr_stack.push_back(l+1);
l+1
} else {
isr_stack.push_back(2);
2
}
// }
}
_ => 100
};
// if trace[i].2 == CaptureEvent::End {break;}
let next_hash=trace[i+1].get_hash();
if !table.contains_key(&next_hash) {
table.insert(next_hash, trace[i+1].clone());
}
ret.push(ExecInterval{
start_tick: meta[i].0,
end_tick: meta[i+1].0,
start_state: last_hash,
end_state: next_hash,
start_capture: (meta[i].1, meta[i].2.clone()),
end_capture: (meta[i+1].1, meta[i+1].2.clone()),
level: level,
tick_spend_preempted: 0,
abb: None
});
reads.push(meta[i+1].4.clone());
last_hash = next_hash;
edges.push((meta[i].3.1, meta[i+1].3.0));
}
let t = add_abb_info(&mut ret, &table, &edges);
(ret, reads, table, t)
}
/// Marks which abbs were executed at each interval
fn add_abb_info(trace: &mut Vec<ExecInterval>, table: &HashMap<u64, ReducedFreeRTOSSystemState>, edges: &Vec<(u32, u32)>) -> bool {
let mut id_count = 0;
let mut ret = true;
let mut task_has_started : HashSet<String> = HashSet::new();
let mut wip_abb_trace : Vec<Rc<RefCell<AtomicBasicBlock>>> = vec![];
// let mut open_abb_at_this_task_or_level : HashMap<(u8,&str),usize> = HashMap::new();
let mut open_abb_at_this_ret_addr_and_task : HashMap<(u32,&str),usize> = HashMap::new();
for i in 0..trace.len() {
let curr_name = &table[&trace[i].start_state].current_task.task_name;
// let last : Option<&usize> = last_abb_start_of_task.get(&curr_name);
// let open_abb = open_abb_at_this_task_or_level.get(&(trace[i].level, if trace[i].level<2 {&curr_name} else {""})).to_owned(); // apps/apis are differentiated by task name, isrs by nested level
let open_abb = open_abb_at_this_ret_addr_and_task.get(&(edges[i].0, if trace[i].level<2 {&curr_name} else {""})).to_owned(); // apps/apis are differentiated by task name, isrs by nested level
// println!("Edge {:x}-{:x}", edges[i].0.unwrap_or(0xffff), edges[i].1.unwrap_or(0xffff));
match trace[i].start_capture.0 {
// generic api abb start
CaptureEvent::APIStart => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert((edges[i].1, if trace[i].level<2 {&curr_name} else {""}), i);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock{start: edges[i].0, ends: HashSet::new(), level: if trace[i].level<2 {trace[i].level} else {2}, instance_id: id_count, instance_name: Some(trace[i].start_capture.1.clone())})));
id_count+=1;
},
// generic isr abb start
CaptureEvent::ISRStart => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert((edges[i].1, if trace[i].level<2 {&curr_name} else {""}), i);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock{start: edges[i].0, ends: HashSet::new(), level: if trace[i].level<2 {trace[i].level} else {2}, instance_id: id_count, instance_name: Some(trace[i].start_capture.1.clone())})));
id_count+=1;
},
// generic app abb start
CaptureEvent::APIEnd => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert((edges[i].1, if trace[i].level<2 {&curr_name} else {""}), i);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock{start: edges[i].0, ends: HashSet::new(), level: if trace[i].level<2 {trace[i].level} else {2}, instance_id: id_count, instance_name: if trace[i].level<2 {Some(curr_name.clone())} else {None}})));
id_count+=1;
},
// generic continued blocks
CaptureEvent::ISREnd => {
// special case app abb start
if trace[i].start_capture.1=="xPortPendSVHandler" && !task_has_started.contains(curr_name) {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock{start: 0, ends: HashSet::new(), level: if trace[i].level<2 {trace[i].level} else {2}, instance_id: id_count, instance_name: Some(curr_name.clone())})));
id_count+=1;
open_abb_at_this_ret_addr_and_task.insert((edges[i].1, if trace[i].level<2 {&curr_name} else {""}), i);
task_has_started.insert(curr_name.clone());
} else {
if let Some(last) = open_abb_at_this_ret_addr_and_task.get(&(edges[i].0, if trace[i].level<2 {&curr_name} else {""})) {
let last = last.clone(); // required to drop immutable reference
wip_abb_trace.push(wip_abb_trace[last].clone());
// if the abb is interrupted again, it will need to continue at edge[i].1
open_abb_at_this_ret_addr_and_task.remove(&(edges[i].0, if trace[i].level<2 {&curr_name} else {""}));
open_abb_at_this_ret_addr_and_task.insert((edges[i].1, if trace[i].level<2 {&curr_name} else {""}), last); // order matters!
} else {
// panic!();
// println!("Continued block with no start {} {} {:?} {:?} {:x}-{:x} {} {}", curr_name, trace[i].start_tick, trace[i].start_capture, trace[i].end_capture, edges[i].0, edges[i].1, task_has_started.contains(curr_name),trace[i].level);
// println!("{:x?}", open_abb_at_this_ret_addr_and_task);
ret = false;
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock{start: edges[i].1, ends: HashSet::new(), level: if trace[i].level<2 {trace[i].level} else {2}, instance_id: id_count, instance_name: if trace[i].level<1 {Some(curr_name.clone())} else {None}})));
id_count+=1;
}
}
},
_ => panic!("Undefined block start")
}
match trace[i].end_capture.0 {
// generic app abb end
CaptureEvent::APIStart => {
let _t = &wip_abb_trace[i];
RefCell::borrow_mut(&*wip_abb_trace[i]).ends.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task.remove(&(edges[i].1, if trace[i].level<2 {&curr_name} else {""}));
},
// generic api abb end
CaptureEvent::APIEnd => {
RefCell::borrow_mut(&*wip_abb_trace[i]).ends.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task.remove(&(edges[i].1, if trace[i].level<2 {&curr_name} else {""}));
},
// generic isr abb end
CaptureEvent::ISREnd => {
RefCell::borrow_mut(&*wip_abb_trace[i]).ends.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task.remove(&(edges[i].1, if trace[i].level<2 {&curr_name} else {""}));
},
// end anything
CaptureEvent::End => {
RefCell::borrow_mut(&*wip_abb_trace[i]).ends.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task.remove(&(edges[i].1, if trace[i].level<2 {&curr_name} else {""}));
},
CaptureEvent::ISRStart => (),
_ => panic!("Undefined block end")
}
// println!("{} {} {:x}-{:x} {:x}-{:x} {:?} {:?} {}",curr_name, trace[i].level, edges[i].0, edges[i].1, ((*wip_abb_trace[i])).borrow().start, ((*wip_abb_trace[i])).borrow().ends.iter().next().unwrap_or(&0xffff), trace[i].start_capture, trace[i].end_capture, trace[i].start_tick);
// println!("{:x?}", open_abb_at_this_ret_addr_and_task);
}
// drop(open_abb_at_this_task_or_level);
for i in 0..trace.len() {
trace[i].abb = Some((*wip_abb_trace[i]).borrow().clone());
}
return ret;
}
// /// restore the isr/api begin/end invariant
// fn fix_broken_trace(meta: &mut Vec<(u64, CaptureEvent, String, (Option<u32>, Option<u32>))>) {
// for i in meta.iter_mut() {
// if i.1 == CaptureEvent::APIStart && i.2.ends_with("FromISR") {
// i.1 = CaptureEvent::ISREnd;
// i.2 = "ISR_0_Handler".to_string();
// }
// }
// }
// /// invalidate subsequent intervals of equal states where an ISREnd follows an ISRStart. If the interrupt had no effect on the system we, are not interested.
// fn invalidate_ineffective_isr(trace: &mut Vec<ExecInterval>) {
// let mut i = 0;
// while i < trace.len() - 1 {
// if trace[i].is_valid() &&
// matches!(trace[i].start_capture.0, CaptureEvent::ISRStart) && matches!(trace[i].end_capture.0, CaptureEvent::ISREnd) &&
// trace[i].start_capture.1 == trace[i].end_capture.1 && trace[i].start_state == trace[i].end_state
// {
// trace[i].invaildate();
// }
// }
// }
// /// merge a sequence of intervals of the same state+abb. jump over all invalid blocks.
// fn merge_subsequent_abbs(trace: &mut Vec<ExecInterval>) {
// let mut i = 1;
// let mut lst_valid=0;
// while i < trace.len() - 1 {
// if trace[i].is_valid() {
// let mut temp = trace[i].clone();
// trace[lst_valid].try_unite_with_later_interval(&mut temp);
// trace[i] = temp;
// lst_valid = i;
// }
// }
// }

View File

@ -1,185 +0,0 @@
//! Stage to compute/report AFL stats
use core::{marker::PhantomData, time::Duration};
use libafl_bolts::current_time;
use itertools::Itertools;
use libafl::{
corpus::{Corpus, HasCurrentCorpusId}, events::EventFirer, prelude::{minimizer::TopRatedsMetadata, RemovableScheduler}, schedulers::minimizer::IsFavoredMetadata, stages::Stage, state::{HasCorpus, HasImported, UsesState}, Error, HasMetadata, HasScheduler
};
use libafl::{
events::Event,
monitors::{AggregatorOps, UserStats, UserStatsValue},
};
use std::borrow::Cow;
use serde_json::json;
use libafl::prelude::mutational::MUTATION_STAGE_ITER;
use libafl::prelude::mutational::MUTATION_STAGE_RETRY;
use libafl::prelude::mutational::MUTATION_STAGE_SUCCESS;
/// The [`AflStatsStage`] is a simple stage that computes and reports some stats.
#[derive(Debug, Clone)]
pub struct SchedulerStatsStage<E, EM, Z> {
last_report_time: Duration,
// the interval that we report all stats
stats_report_interval: Duration,
phantom: PhantomData<(E, EM, Z)>,
}
impl<E, EM, Z> UsesState for SchedulerStatsStage<E, EM, Z>
where
E: UsesState,
{
type State = E::State;
}
impl<E, EM, Z> Stage<E, EM, Z> for SchedulerStatsStage<E, EM, Z>
where
E: UsesState,
EM: EventFirer<State = Self::State>,
Z: UsesState<State = Self::State> + HasScheduler,
<Z as HasScheduler>::Scheduler: RemovableScheduler,
Self::State: HasImported + HasCorpus + HasMetadata,
{
fn perform(
&mut self,
fuzzer: &mut Z,
_executor: &mut E,
state: &mut <Self as UsesState>::State,
_manager: &mut EM,
) -> Result<(), Error> {
// let Some(corpus_idx) = state.current_corpus_id()? else {
// return Err(Error::illegal_state(
// "state is not currently processing a corpus index",
// ));
// };
// let corpus_size = state.corpus().count();
let cur = current_time();
if cur.checked_sub(self.last_report_time).unwrap_or_default() > self.stats_report_interval {
if let Some(meta) = state.metadata_map().get::<TopRatedsMetadata>() {
let kc = meta.map.keys().count();
let mut v : Vec<_> = meta.map.values().cloned().collect();
v.sort_unstable();
v.dedup();
let vc = v.len();
#[cfg(feature = "std")]
{
let json = json!({
"relevant":vc,
"objects":kc,
});
_manager.fire(
state,
Event::UpdateUserStats {
name: Cow::from("Minimizer"),
value: UserStats::new(
UserStatsValue::String(Cow::from(json.to_string())),
AggregatorOps::None,
),
phantom: PhantomData,
},
)?;
}
#[cfg(not(feature = "std"))]
log::info!(
"pending: {}, pend_favored: {}, own_finds: {}, imported: {}",
pending_size,
pend_favored_size,
self.own_finds_size,
self.imported_size
);
self.last_report_time = cur;
// Experimental pruning
#[cfg(any(feature = "sched_stg",feature = "sched_afl"))]
{
const MULTI: usize = 10;
const PRUNE_THRESHOLD: usize = 20;
const PRUNE_MAX_KEEP: usize = 1000;
const PRUNE_MIN_KEEP: usize = 100;
let cc = state.corpus().count();
let to_keep = usize::max(vc*MULTI, PRUNE_MIN_KEEP);
let activate = cc > PRUNE_MAX_KEEP || cc > usize::max(vc*PRUNE_THRESHOLD, PRUNE_MIN_KEEP*2);
if activate {
println!("Pruning corpus, keeping {} / {}", to_keep, cc);
let corpus = state.corpus_mut();
let currid = corpus.current();
let ids : Vec<_> = corpus.ids().filter_map(|x| {
let tc = corpus.get(x).unwrap().borrow();
let md = tc.metadata_map();
if vc < PRUNE_MAX_KEEP && (md.get::<IsFavoredMetadata>().is_some() || &Some(x) == currid || v.contains(&&x)) {
None
} else {
Some((x, tc.exec_time().clone()))
}
}).sorted_by_key(|x| x.1).take(usize::saturating_sub(corpus.count(),to_keep)).sorted_by_key(|x| x.0).unique().rev().collect();
for (cid, _) in ids {
let c = state.corpus_mut().remove(cid).unwrap();
fuzzer
.scheduler_mut()
.on_remove(state, cid, &Some(c))?;
}
}
}
#[cfg(feature = "std")]
unsafe {
let _ = _manager.fire(
state,
Event::UpdateUserStats {
name: Cow::from("StdMutationalStage"),
value: UserStats::new(
UserStatsValue::String(Cow::from(format!("{} -> {}/{} {:.1}% ", MUTATION_STAGE_ITER, MUTATION_STAGE_SUCCESS, MUTATION_STAGE_RETRY, MUTATION_STAGE_SUCCESS as f32 * 100.0 / MUTATION_STAGE_RETRY as f32))),
AggregatorOps::None,
),
phantom: PhantomData,
},
);
}
}
}
Ok(())
}
#[inline]
fn restart_progress_should_run(&mut self, _state: &mut <Self as UsesState>::State) -> Result<bool, Error> {
// Not running the target so we wont't crash/timeout and, hence, don't need to restore anything
Ok(true)
}
#[inline]
fn clear_restart_progress(&mut self, _state: &mut <Self as UsesState>::State) -> Result<(), Error> {
// Not running the target so we wont't crash/timeout and, hence, don't need to restore anything
Ok(())
}
}
impl<E, EM, Z> SchedulerStatsStage<E, EM, Z> {
/// create a new instance of the [`AflStatsStage`]
#[must_use]
pub fn new(interval: Duration) -> Self {
Self {
stats_report_interval: interval,
..Default::default()
}
}
}
impl<E, EM, Z> Default for SchedulerStatsStage<E, EM, Z> {
/// the default instance of the [`AflStatsStage`]
#[must_use]
fn default() -> Self {
Self {
last_report_time: current_time(),
stats_report_interval: Duration::from_secs(3),
phantom: PhantomData,
}
}
}

View File

@ -1,20 +1,24 @@
//! The Minimizer schedulers are a family of corpus schedulers that feed the fuzzer //! The Minimizer schedulers are a family of corpus schedulers that feed the fuzzer
//! with testcases only from a subset of the total corpus. //! with testcases only from a subset of the total corpus.
use core::marker::PhantomData; use core::{marker::PhantomData};
use std::{cmp::{max, min}, mem::swap}; use std::{cmp::{max, min}, mem::swap, borrow::BorrowMut};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use libafl_bolts::{rands::Rand, AsIter, HasLen};
use libafl::{ use libafl::{
common::HasMetadata, corpus::{Corpus, Testcase}, inputs::UsesInput, prelude::{CanTrack, CorpusId, RemovableScheduler}, schedulers::{minimizer::DEFAULT_SKIP_NON_FAVORED_PROB, Scheduler, TestcaseScore }, state::{HasCorpus, HasRand, State, UsesState}, Error, SerdeAny bolts::{rands::Rand, serdeany::SerdeAny, AsSlice, HasRefCnt},
corpus::{Corpus, Testcase},
inputs::UsesInput,
schedulers::{Scheduler, TestcaseScore, minimizer::DEFAULT_SKIP_NON_FAVORED_PROB },
state::{HasCorpus, HasMetadata, HasRand, UsesState, State},
Error, SerdeAny, prelude::HasLen,
}; };
use crate::time::worst::MaxTimeFavFactor; use crate::worst::MaxTimeFavFactor;
use super::{stg::STGNodeMetadata, FreeRTOSSystemStateMetadata}; use super::FreeRTOSSystemStateMetadata;
/// A state metadata holding a map of favoreds testcases for each map entry /// A state metadata holding a map of favoreds testcases for each map entry
#[derive(Debug, Serialize, Deserialize, SerdeAny, Default)] #[derive(Debug, Serialize, Deserialize, SerdeAny, Default)]
@ -35,7 +39,7 @@ impl LongestTracesMetadata {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct LongestTraceScheduler<CS> { pub struct LongestTraceScheduler<CS> {
base: CS, base: CS,
skip_non_favored_prob: f64, skip_non_favored_prob: u64,
} }
impl<CS> UsesState for LongestTraceScheduler<CS> impl<CS> UsesState for LongestTraceScheduler<CS>
@ -51,55 +55,55 @@ where
CS::State: HasCorpus + HasMetadata + HasRand, CS::State: HasCorpus + HasMetadata + HasRand,
{ {
/// Add an entry to the corpus and return its index /// Add an entry to the corpus and return its index
fn on_add(&mut self, state: &mut CS::State, idx: CorpusId) -> Result<(), Error> { fn on_add(&self, state: &mut CS::State, idx: usize) -> Result<(), Error> {
let l = state.corpus() let l = state.corpus()
.get(idx)? .get(idx)?
.borrow() .borrow()
.metadata_map() .metadata()
.get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length); .get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
self.get_update_trace_length(state,l); self.get_update_trace_length(state,l);
self.base.on_add(state, idx) self.base.on_add(state, idx)
} }
/// Replaces the testcase at the given idx /// Replaces the testcase at the given idx
// fn on_replace( fn on_replace(
// &mut self, &self,
// state: &mut CS::State, state: &mut CS::State,
// idx: CorpusId, idx: usize,
// testcase: &Testcase<<CS::State as UsesInput>::Input>, testcase: &Testcase<<CS::State as UsesInput>::Input>,
// ) -> Result<(), Error> { ) -> Result<(), Error> {
// let l = state.corpus() let l = state.corpus()
// .get(idx)? .get(idx)?
// .borrow() .borrow()
// .metadata() .metadata()
// .get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length); .get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
// self.get_update_trace_length(state, l); self.get_update_trace_length(state, l);
// self.base.on_replace(state, idx, testcase) self.base.on_replace(state, idx, testcase)
// } }
/// Removes an entry from the corpus, returning M if M was present. /// Removes an entry from the corpus, returning M if M was present.
// fn on_remove( fn on_remove(
// &self, &self,
// state: &mut CS::State, state: &mut CS::State,
// idx: usize, idx: usize,
// testcase: &Option<Testcase<<CS::State as UsesInput>::Input>>, testcase: &Option<Testcase<<CS::State as UsesInput>::Input>>,
// ) -> Result<(), Error> { ) -> Result<(), Error> {
// self.base.on_remove(state, idx, testcase)?; self.base.on_remove(state, idx, testcase)?;
// Ok(()) Ok(())
// } }
/// Gets the next entry /// Gets the next entry
fn next(&mut self, state: &mut CS::State) -> Result<CorpusId, Error> { fn next(&self, state: &mut CS::State) -> Result<usize, Error> {
let mut idx = self.base.next(state)?; let mut idx = self.base.next(state)?;
while { while {
let l = state.corpus() let l = state.corpus()
.get(idx)? .get(idx)?
.borrow() .borrow()
.metadata_map() .metadata()
.get::<STGNodeMetadata>().map_or(0, |x| x.nodes().len()); .get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
let m = self.get_update_trace_length(state,l); let m = self.get_update_trace_length(state,l);
state.rand_mut().below(m as usize) > l state.rand_mut().below(m) > l as u64
} && state.rand_mut().coinflip(self.skip_non_favored_prob) } && state.rand_mut().below(100) < self.skip_non_favored_prob
{ {
idx = self.base.next(state)?; idx = self.base.next(state)?;
} }
@ -114,7 +118,7 @@ where
{ {
pub fn get_update_trace_length(&self, state: &mut CS::State, par: usize) -> u64 { pub fn get_update_trace_length(&self, state: &mut CS::State, par: usize) -> u64 {
// Create a new top rated meta if not existing // Create a new top rated meta if not existing
if let Some(td) = state.metadata_map_mut().get_mut::<LongestTracesMetadata>() { if let Some(td) = state.metadata_mut().get_mut::<LongestTracesMetadata>() {
let m = max(td.max_trace_length, par); let m = max(td.max_trace_length, par);
td.max_trace_length = m; td.max_trace_length = m;
m as u64 m as u64
@ -123,7 +127,6 @@ where
par as u64 par as u64
} }
} }
#[allow(unused)]
pub fn new(base: CS) -> Self { pub fn new(base: CS) -> Self {
Self { Self {
base, base,
@ -157,33 +160,32 @@ pub struct GenerationScheduler<S> {
impl<S> UsesState for GenerationScheduler<S> impl<S> UsesState for GenerationScheduler<S>
where where
S: State + UsesInput, S: UsesInput,
{ {
type State = S; type State = S;
} }
impl<S> Scheduler for GenerationScheduler<S> impl<S> Scheduler for GenerationScheduler<S>
where where
S: State + HasCorpus + HasMetadata, S: HasCorpus + HasMetadata,
S::Input: HasLen,
{ {
/// get first element in current gen, /// get first element in current gen,
/// if current_gen is empty, swap lists, sort by FavFactor, take top k and return first /// if current_gen is empty, swap lists, sort by FavFactor, take top k and return first
fn next(&mut self, state: &mut Self::State) -> Result<CorpusId, Error> { fn next(&self, state: &mut Self::State) -> Result<usize, Error> {
let mut to_remove : Vec<(usize, f64)> = vec![]; let mut to_remove : Vec<(usize, f64)> = vec![];
let mut _to_return : usize = 0; let mut to_return : usize = 0;
let corpus_len = state.corpus().count(); let c = state.corpus().count();
let mut _current_len = 0; let gm = state.metadata_mut().get_mut::<GeneticMetadata>().expect("Corpus Scheduler empty");
let gm = state.metadata_map_mut().get_mut::<GeneticMetadata>().expect("Corpus Scheduler empty");
// println!("index: {} curr: {:?} next: {:?} gen: {} corp: {}", gm.current_cursor, gm.current_gen.len(), gm.next_gen.len(), gm.gen, // println!("index: {} curr: {:?} next: {:?} gen: {} corp: {}", gm.current_cursor, gm.current_gen.len(), gm.next_gen.len(), gm.gen,
// c); // c);
match gm.current_gen.get(gm.current_cursor) { match gm.current_gen.get(gm.current_cursor) {
Some(c) => { Some(c) => {
_current_len = gm.current_gen.len();
gm.current_cursor+=1; gm.current_cursor+=1;
// println!("normal next: {}", (*c).0); // println!("normal next: {}", (*c).0);
return Ok((*c).0.into()) return Ok((*c).0)
}, },
Option::None => { None => {
swap(&mut to_remove, &mut gm.current_gen); swap(&mut to_remove, &mut gm.current_gen);
swap(&mut gm.next_gen, &mut gm.current_gen); swap(&mut gm.next_gen, &mut gm.current_gen);
gm.current_gen.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap()); gm.current_gen.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
@ -193,114 +195,73 @@ where
to_remove.extend(d); to_remove.extend(d);
// move all indices to the left, since all other indices will be deleted // move all indices to the left, since all other indices will be deleted
gm.current_gen.sort_by(|a,b| a.0.cmp(&(*b).0)); // in order of the corpus index gm.current_gen.sort_by(|a,b| a.0.cmp(&(*b).0)); // in order of the corpus index
// for i in 0..gm.current_gen.len() { for i in 0..gm.current_gen.len() {
// gm.current_gen[i] = (i, gm.current_gen[i].1); gm.current_gen[i] = (i, gm.current_gen[i].1);
// } }
_to_return = gm.current_gen.get(0).unwrap().0; to_return = gm.current_gen.get(0).unwrap().0;
// assert_eq!(to_return, 0);
gm.current_cursor=1; gm.current_cursor=1;
gm.gen+=1; gm.gen+=1;
_current_len = gm.current_gen.len();
} }
}; };
// removing these elements will move all indices left by to_remove.len() // removing these elements will move all indices left by to_remove.len()
// to_remove.sort_by(|x,y| x.0.cmp(&(*y).0)); to_remove.sort_by(|x,y| x.0.cmp(&(*y).0));
// to_remove.reverse(); to_remove.reverse();
let cm = state.corpus_mut();
assert_eq!(corpus_len-to_remove.len(), _current_len);
assert_ne!(_current_len,0);
for i in to_remove { for i in to_remove {
cm.remove(i.0.into()).unwrap(); state.corpus_mut().remove(i.0).unwrap();
} }
assert_eq!(cm.get(_to_return.into()).is_ok(),true);
// println!("switch next: {to_return}"); // println!("switch next: {to_return}");
return Ok(_to_return.into()); return Ok(to_return);
} }
/// Add the new input to the next generation /// Add the new input to the next generation
fn on_add( fn on_add(
&mut self, &self,
state: &mut Self::State, state: &mut Self::State,
idx: CorpusId idx: usize
) -> Result<(), Error> { ) -> Result<(), Error> {
// println!("On Add {idx}"); // println!("On Add {idx}");
let mut tc = state.corpus_mut().get(idx).expect("Newly added testcase not found by index").borrow_mut().clone(); let mut tc = state.corpus_mut().get(idx).unwrap().borrow_mut().clone();
let ff = MaxTimeFavFactor::compute(state, &mut tc).unwrap(); let ff = MaxTimeFavFactor::compute(&mut tc, state).unwrap();
if let Some(gm) = state.metadata_map_mut().get_mut::<GeneticMetadata>() { if let Some(gm) = state.metadata_mut().get_mut::<GeneticMetadata>() {
gm.next_gen.push((idx.into(),ff)); gm.next_gen.push((idx,ff));
} else { } else {
state.add_metadata(GeneticMetadata::new(vec![], vec![(idx.into(),ff)])); state.add_metadata(GeneticMetadata::new(vec![], vec![(idx,ff)]));
} }
Ok(()) Ok(())
} }
// fn on_replace(
// &self,
// _state: &mut Self::State,
// _idx: usize,
// _prev: &Testcase<<Self::State as UsesInput>::Input>
// ) -> Result<(), Error> {
// // println!("On Replace {_idx}");
// Ok(())
// }
// fn on_remove(
// &self,
// state: &mut Self::State,
// idx: usize,
// _testcase: &Option<Testcase<<Self::State as UsesInput>::Input>>
// ) -> Result<(), Error> {
// // println!("On Remove {idx}");
// if let Some(gm) = state.metadata_mut().get_mut::<GeneticMetadata>() {
// gm.next_gen = gm.next_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
// gm.current_gen = gm.current_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
// } else {
// state.add_metadata(GeneticMetadata::new(vec![], vec![]));
// }
// Ok(())
// }
}
impl<S> RemovableScheduler for GenerationScheduler<S>
where
S: State + HasCorpus + HasMetadata,
{
/// Replaces the testcase at the given idx
fn on_replace( fn on_replace(
&mut self, &self,
state: &mut <Self as UsesState>::State, _state: &mut Self::State,
idx: CorpusId, _idx: usize,
testcase: &Testcase<<<Self as UsesState>::State as UsesInput>::Input>, _prev: &Testcase<<Self::State as UsesInput>::Input>
) -> Result<(), Error> { ) -> Result<(), Error> {
// println!("On Replace {_idx}");
Ok(()) Ok(())
} }
/// Removes an entry from the corpus
fn on_remove( fn on_remove(
&mut self, &self,
state: &mut <Self as UsesState>::State, state: &mut Self::State,
idx: CorpusId, idx: usize,
testcase: &Option<Testcase<<<Self as UsesState>::State as UsesInput>::Input>>, _testcase: &Option<Testcase<<Self::State as UsesInput>::Input>>
) -> Result<(), Error> { ) -> Result<(), Error> {
// println!("On Remove {idx}");
if let Some(gm) = state.metadata_mut().get_mut::<GeneticMetadata>() {
gm.next_gen = gm.next_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
gm.current_gen = gm.current_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
} else {
state.add_metadata(GeneticMetadata::new(vec![], vec![]));
}
Ok(()) Ok(())
} }
} }
impl<S> GenerationScheduler<S> impl<S> GenerationScheduler<S>
{ {
#[allow(unused)]
pub fn new() -> Self { pub fn new() -> Self {
let gen_size = 100;
#[cfg(feature = "gensize_1")]
let gen_size= 1;
#[cfg(feature = "gensize_10")]
let gen_size= 10;
#[cfg(feature = "gensize_100")]
let gen_size= 100;
#[cfg(feature = "gensize_1000")]
let gen_size= 1000;
Self { Self {
phantom: PhantomData, phantom: PhantomData,
gen_size gen_size: 100,
} }
} }
} }

View File

@ -1,721 +0,0 @@
use hashbrown::HashSet;
use libafl::inputs::Input;
/// Feedbacks organizing SystemStates as a graph
use libafl::SerdeAny;
use libafl_bolts::ownedref::OwnedMutSlice;
use petgraph::graph::EdgeIndex;
use libafl::prelude::UsesInput;
use libafl::common::HasNamedMetadata;
use libafl::state::UsesState;
use libafl::prelude::State;
use libafl::schedulers::MinimizerScheduler;
use libafl_bolts::HasRefCnt;
use std::path::PathBuf;
use libafl::corpus::Testcase;
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use std::hash::Hash;
use libafl::events::EventFirer;
use libafl::state::MaybeHasClientPerfMonitor;
use libafl::feedbacks::Feedback;
use libafl_bolts::Named;
use libafl::Error;
use libafl_qemu::edges::EDGES_MAP_SIZE_IN_USE;
use hashbrown::HashMap;
use libafl::{executors::ExitKind, observers::ObserversTuple, common::HasMetadata};
use serde::{Deserialize, Serialize};
use super::AtomicBasicBlock;
use super::CaptureEvent;
use super::ExecInterval;
use super::JobInstance;
use super::ReducedFreeRTOSSystemState;
use super::observers::QemuSystemStateObserver;
use super::TaskJob;
use petgraph::prelude::DiGraph;
use petgraph::graph::NodeIndex;
use petgraph::Direction;
use crate::time::clock::QemuClockObserver;
use crate::time::clock::FUZZ_START_TIMESTAMP;
use crate::time::worst::MaxTimeFavFactor;
use std::time::SystemTime;
use std::{fs::OpenOptions, io::Write};
use std::borrow::Cow;
use std::ops::Deref;
use std::ops::DerefMut;
use std::rc::Rc;
use petgraph::visit::EdgeRef;
//============================= Data Structures
#[derive(Serialize, Deserialize, Clone, Debug, Default, Hash)]
pub struct STGNode
{
base: ReducedFreeRTOSSystemState,
abb: AtomicBasicBlock,
}
impl STGNode {
pub fn _pretty_print(&self) -> String {
format!("{}\nl{} {:x}-{:x}\n{}", self.base.current_task.task_name, self.abb.level, self.abb.start, self.abb.ends.iter().next().unwrap_or_else(||&0xFFFF), self.base.print_lists())
}
pub fn color_print(&self) -> String {
let color = match self.abb.level {
1 => "\", shape=box, style=filled, fillcolor=\"lightblue",
2 => "\", shape=box, style=filled, fillcolor=\"yellow",
0 => "\", shape=box, style=filled, fillcolor=\"white",
_ => "\", style=filled, fillcolor=\"lightgray",
};
let message = match self.abb.level {
1 => format!("API Call"),
2 => format!("ISR"),
0 => format!("Task: {}",self.base.current_task.task_name),
_ => format!(""),
};
let mut label = format!("{}\nABB: {:x}-{:x}\nHash:{:X}\n{}", message, self.abb.start, self.abb.ends.iter().next().unwrap_or_else(||&0xFFFF), self.base.get_hash()>>48, self.base.print_lists());
label.push_str(color);
label
}
fn get_hash(&self) -> u64 {
let mut s = DefaultHasher::new();
self.base.hash(&mut s);
self.abb.hash(&mut s);
s.finish()
}
}
impl PartialEq for STGNode {
fn eq(&self, other: &STGNode) -> bool {
self.base==other.base
}
}
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)]
pub struct STGEdge
{
pub event: CaptureEvent,
pub name: String,
pub worst: Option<(u64, Vec<(u32, u8)>)>,
}
impl STGEdge {
pub fn _pretty_print(&self) -> String {
let mut short = match self.event {
CaptureEvent::APIStart => "Call: ",
CaptureEvent::APIEnd => "Ret: ",
CaptureEvent::ISRStart => "Int: ",
CaptureEvent::ISREnd => "IRet: ",
CaptureEvent::End => "End: ",
CaptureEvent::Undefined => "",
}.to_string();
short.push_str(&self.name);
short
}
pub fn color_print(&self) -> String {
let mut short = self.name.clone();
short.push_str(match self.event {
CaptureEvent::APIStart => "\", color=\"blue",
CaptureEvent::APIEnd => "\", color=\"black",
CaptureEvent::ISRStart => "\", color=red, style=\"dashed",
CaptureEvent::ISREnd => "\", color=red, style=\"solid",
CaptureEvent::End => "",
CaptureEvent::Undefined => "",
});
short
}
pub fn is_abb_end(&self) -> bool {
match self.event {
CaptureEvent::APIStart | CaptureEvent::APIEnd | CaptureEvent::ISREnd | CaptureEvent::End => true,
_ => false
}
}
}
impl Hash for STGEdge {
fn hash<H: Hasher>(&self, state: &mut H) {
self.event.hash(state);
self.name.hash(state);
}
}
/// Shared Metadata for a systemstateFeedback
#[derive(Debug, Serialize, Deserialize, SerdeAny, Clone)]
pub struct STGFeedbackState
{
name: Cow<'static, str>,
// aggregated traces as a graph
pub graph: DiGraph<STGNode, STGEdge>,
systemstate_index: HashMap<u64, ReducedFreeRTOSSystemState>,
pub state_abb_hash_index: HashMap<(u64, u64), NodeIndex>,
stgnode_index: HashMap<u64, NodeIndex>,
entrypoint: NodeIndex,
exitpoint: NodeIndex,
// Metadata about aggregated traces. aggegated meaning, order has been removed
worst_observed_per_aggegated_path: HashMap<Vec<AtomicBasicBlock>,u64>,
worst_observed_per_abb_path: HashMap<u64,u64>,
worst_observed_per_stg_path: HashMap<u64,u64>,
worst_abb_exec_count: HashMap<AtomicBasicBlock, usize>,
// Metadata about job instances
pub worst_task_jobs: HashMap<u64, TaskJob>,
}
impl Default for STGFeedbackState {
fn default() -> STGFeedbackState {
let mut graph = DiGraph::new();
let mut entry = STGNode::default();
entry.base.current_task.task_name="Start".to_string();
let mut exit = STGNode::default();
exit.base.current_task.task_name="End".to_string();
let systemstate_index = HashMap::from([(entry.base.get_hash(), entry.base.clone()), (exit.base.get_hash(), exit.base.clone())]);
let h_entry = entry.get_hash();
let h_exit = exit.get_hash();
let entrypoint = graph.add_node(entry.clone());
let exitpoint = graph.add_node(exit.clone());
let state_abb_hash_index = HashMap::from([((entry.base.get_hash(), entry.abb.get_hash()), entrypoint), ((exit.base.get_hash(), exit.abb.get_hash()), exitpoint)]);
let index = HashMap::from([(h_entry, entrypoint), (h_exit, exitpoint)]);
STGFeedbackState {
name: Cow::from("stgfeedbackstate".to_string()),
graph,
stgnode_index: index,
entrypoint,
exitpoint,
worst_observed_per_aggegated_path: HashMap::new(),
worst_observed_per_abb_path: HashMap::new(),
worst_observed_per_stg_path: HashMap::new(),
worst_abb_exec_count: HashMap::new(),
systemstate_index,
state_abb_hash_index,
worst_task_jobs: HashMap::new(),
}
}
}
impl Named for STGFeedbackState
{
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
// Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct STGNodeMetadata {
nodes: Vec<NodeIndex>,
edges: Vec<EdgeIndex>,
abbs: u64,
aggregate: u64,
top_abb_counts: Vec<u64>,
intervals: Vec<ExecInterval>,
jobs: Vec<JobInstance>,
indices: Vec<usize>,
tcref: isize,
}
impl STGNodeMetadata {
pub fn new(nodes: Vec<NodeIndex>, edges: Vec<EdgeIndex>, abb_trace: Vec<AtomicBasicBlock>, abbs_pathhash: u64, aggregate: u64, top_abb_counts: Vec<u64>, intervals: Vec<ExecInterval>, jobs: Vec<JobInstance>) -> Self {
#[allow(unused)]
let mut indices : Vec<_> = vec![];
#[cfg(feature = "sched_stg_edge")]
{
indices = edges.iter().map(|x| x.index()).collect();
indices.sort_unstable();
indices.dedup();
}
#[cfg(feature = "sched_stg_pathhash")]
{
indices.push(get_generic_hash(&edges) as usize);
}
#[cfg(feature = "sched_stg_abbhash")]
{
indices.push(abbs_pathhash as usize);
}
#[cfg(feature = "sched_stg_aggregatehash")]
{
// indices.push(aggregate as usize);
indices = top_abb_counts.iter().map(|x| (*x) as usize).collect();
}
Self {indices, intervals, jobs, nodes, abbs: abbs_pathhash, aggregate, top_abb_counts, edges, tcref: 0}
}
pub fn nodes(&self) -> &Vec<NodeIndex> {
&self.nodes
}
pub fn edges(&self) -> &Vec<EdgeIndex> {
&self.edges
}
pub fn abbs(&self) -> u64 {
self.abbs
}
pub fn aggregate(&self) -> u64 {
self.aggregate
}
pub fn top_abb_counts(&self) -> &Vec<u64> {
&self.top_abb_counts
}
pub fn intervals(&self) -> &Vec<ExecInterval> {
&self.intervals
}
pub fn jobs(&self) -> &Vec<JobInstance> {
&self.jobs
}
}
impl Deref for STGNodeMetadata {
type Target = [usize];
/// Convert to a slice
fn deref(&self) -> &[usize] {
&self.indices
}
}
impl DerefMut for STGNodeMetadata {
/// Convert to a slice
fn deref_mut(&mut self) -> &mut [usize] {
&mut self.indices
}
}
impl HasRefCnt for STGNodeMetadata {
fn refcnt(&self) -> isize {
self.tcref
}
fn refcnt_mut(&mut self) -> &mut isize {
&mut self.tcref
}
}
libafl_bolts::impl_serdeany!(STGNodeMetadata);
pub type GraphMaximizerCorpusScheduler<CS, O> =
MinimizerScheduler<CS, MaxTimeFavFactor<<CS as UsesState>::State>,STGNodeMetadata,O>;
// AI generated, human verified
/// Count the occurrences of each element in a vector, assumes the vector is sorted
fn count_occurrences_sorted<T>(vec: &Vec<T>) -> HashMap<&T, usize>
where
T: PartialEq + Eq + Hash + Clone,
{
let mut counts = HashMap::new();
if vec.is_empty() {
return counts;
}
let mut current_obj = &vec[0];
let mut current_count = 1;
for obj in vec.iter().skip(1) {
if obj == current_obj {
current_count += 1;
} else {
counts.insert(current_obj, current_count);
current_obj = obj;
current_count = 1;
}
}
// Insert the count of the last object
counts.insert(current_obj, current_count);
counts
}
//============================= Graph Feedback
pub static mut STG_MAP: [u16; EDGES_MAP_SIZE_IN_USE] = [0; EDGES_MAP_SIZE_IN_USE];
pub static mut MAX_STG_NUM: usize = 0;
pub unsafe fn stg_map_mut_slice<'a>() -> OwnedMutSlice<'a, u16> {
OwnedMutSlice::from_raw_parts_mut(STG_MAP.as_mut_ptr(), STG_MAP.len())
}
/// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct StgFeedback
{
name: Cow<'static, str>,
last_node_trace: Option<Vec<NodeIndex>>,
last_edge_trace: Option<Vec<EdgeIndex>>,
last_intervals: Option<Vec<ExecInterval>>,
last_abb_trace: Option<Vec<AtomicBasicBlock>>,
last_abbs_hash: Option<u64>, // only set, if it was interesting
last_aggregate_hash: Option<u64>, // only set, if it was interesting
last_top_abb_hashes: Option<Vec<u64>>, // only set, if it was interesting
last_job_trace: Option<Vec<JobInstance>>, // only set, if it was interesting
dump_path: Option<PathBuf>
}
#[cfg(feature = "feed_stg")]
const INTEREST_EDGE : bool = true;
#[cfg(feature = "feed_stg")]
const INTEREST_NODE : bool = true;
#[cfg(feature = "feed_stg_pathhash")]
const INTEREST_PATH : bool = true;
#[cfg(feature = "feed_stg_abbhash")]
const INTEREST_ABBPATH : bool = true;
#[cfg(feature = "feed_stg_aggregatehash")]
const INTEREST_AGGREGATE : bool = true;
#[cfg(not(feature = "feed_stg"))]
const INTEREST_EDGE : bool = false;
#[cfg(not(feature = "feed_stg"))]
const INTEREST_NODE : bool = false;
#[cfg(not(feature = "feed_stg_pathhash"))]
const INTEREST_PATH : bool = false;
#[cfg(not(feature = "feed_stg_abbhash"))]
const INTEREST_ABBPATH : bool = false;
#[cfg(not(feature = "feed_stg_aggregatehash"))]
const INTEREST_AGGREGATE : bool = false;
const INTEREST_JOB_INSTANCE : bool = true;
fn set_observer_map(trace : &Vec<EdgeIndex>) {
// dbg!(trace);
unsafe {
for i in 0..MAX_STG_NUM {
STG_MAP[i] = 0;
}
for i in trace {
if MAX_STG_NUM < i.index() {
MAX_STG_NUM = i.index();
}
STG_MAP[i.index()] = STG_MAP[i.index()].saturating_add(1);
}
}
}
fn get_generic_hash<H>(input: &H) -> u64
where
H: Hash,
{
let mut s = DefaultHasher::new();
input.hash(&mut s);
s.finish()
}
fn execinterval_to_abb_instances(trace: &Vec<ExecInterval>, read_trace: &Vec<Vec<(u32, u8)>>) -> HashMap<usize, (u64, Vec<(u32, u8)>)>{
let mut instance_time: HashMap<usize, (u64, Vec<(u32, u8)>)> = HashMap::new();
for (_i,interval) in trace.iter().enumerate() { // Iterate intervals
// sum up execution time and accesses per ABB
let temp = interval.abb.as_ref().map(|abb| abb.instance_id).unwrap_or(usize::MAX);
match instance_time.get_mut(&temp) {
Some(x) => {
x.0 += interval.get_exec_time();
x.1.extend(read_trace[_i].clone());
},
None => {
if temp != usize::MAX {
instance_time.insert(temp, (interval.get_exec_time(), read_trace[_i].clone()));
}
}
};
}
return instance_time;
}
impl StgFeedback {
pub fn new(dump_name: Option<PathBuf>) -> Self {
// Self {name: String::from("STGFeedback"), last_node_trace: None, last_edge_trace: None, last_intervals: None }
let mut s = Self::default();
s.dump_path = dump_name.map(|x| x.with_extension("stgsize"));
s
}
/// params:
/// tarce of intervals
/// hashtable of states
/// feedbackstate
/// produces:
/// tarce of node indexes representing the path trough the graph
/// newly discovered node?
/// side effect:
/// the graph gets new nodes and edge
fn update_stg_interval(trace: &Vec<ExecInterval>, read_trace: &Vec<Vec<(u32, u8)>>, table: &HashMap<u64, ReducedFreeRTOSSystemState>, fbs: &mut STGFeedbackState) -> (Vec<(NodeIndex, u64)>, Vec<(EdgeIndex, u64)>, bool, bool) {
let mut return_node_trace = vec![(fbs.entrypoint, 0)]; // Assuming entrypoint timestamp is 0
let mut return_edge_trace = vec![];
let mut interesting = false;
let mut updated = false;
if trace.is_empty() {
return (return_node_trace, return_edge_trace, interesting, updated);
}
let mut instance_time = execinterval_to_abb_instances(trace, read_trace);
// add all missing state+abb combinations to the graph
for (_i,interval) in trace.iter().enumerate() { // Iterate intervals
let node = STGNode {base: table[&interval.start_state].clone(), abb: interval.abb.as_ref().unwrap().clone()};
let h_node = node.get_hash();
let next_idx = if let Some(idx) = fbs.stgnode_index.get(&h_node) {
// already present
*idx
} else {
// not present
let h = (node.base.get_hash(), node.abb.get_hash());
let idx = fbs.graph.add_node(node);
fbs.stgnode_index.insert(h_node, idx);
fbs.state_abb_hash_index.insert(h, idx);
interesting |= INTEREST_NODE;
updated = true;
idx
};
// connect in graph if edge not present
let e = fbs.graph.edges_directed(return_node_trace[return_node_trace.len()-1].0, Direction::Outgoing).find(|x| petgraph::visit::EdgeRef::target(x) == next_idx);
if let Some(e_) = e {
return_edge_trace.push((petgraph::visit::EdgeRef::id(&e_), interval.start_tick));
if let Some((time, accesses)) = instance_time.get_mut(&interval.abb.as_ref().unwrap().instance_id) {
let ref_ = &mut fbs.graph.edge_weight_mut(e_.id()).unwrap().worst;
if ref_.is_some() {
let w = ref_.as_mut().unwrap();
if w.0 < *time {*w = (*time, accesses.clone())};
} else {
*ref_ = Some((*time, accesses.clone()));
}
}
} else {
let mut e__ = STGEdge{event: interval.start_capture.0, name: interval.start_capture.1.clone(), worst: None};
if e__.is_abb_end() {
if let Some((time,accesses)) = instance_time.get_mut(&interval.abb.as_ref().unwrap().instance_id) {
e__.worst = Some((*time, accesses.clone()));
}
}
let e_ = fbs.graph.add_edge(return_node_trace[return_node_trace.len()-1].0, next_idx, e__);
return_edge_trace.push((e_, interval.start_tick));
interesting |= INTEREST_EDGE;
updated = true;
}
return_node_trace.push((next_idx, interval.start_tick));
}
// every path terminates at the end
if !fbs.graph.neighbors_directed(return_node_trace[return_node_trace.len()-1].0, Direction::Outgoing).any(|x| x == fbs.exitpoint) {
let mut e__ = STGEdge { event: CaptureEvent::End, name: String::from("End"), worst: None };
if e__.is_abb_end() {
if let Some((time, accesses)) = instance_time.get_mut(&trace[trace.len()-1].abb.as_ref().unwrap().instance_id) {
e__.worst = Some((*time, accesses.clone()));
}
}
let e_ = fbs.graph.add_edge(return_node_trace[return_node_trace.len()-1].0, fbs.exitpoint, e__);
return_edge_trace.push((e_, trace[trace.len()-1].start_tick));
interesting |= INTEREST_EDGE;
updated = true;
}
return_node_trace.push((fbs.exitpoint, trace[trace.len()-1].start_tick));
(return_node_trace, return_edge_trace, interesting, updated)
}
fn abbs_in_exec_order(trace: &Vec<ExecInterval>) -> Vec<AtomicBasicBlock> {
let mut ret = Vec::new();
for i in 0..trace.len() {
if trace[i].abb != None &&
(trace[i].end_capture.0 == CaptureEvent::APIStart || trace[i].end_capture.0 == CaptureEvent::APIEnd || trace[i].end_capture.0 == CaptureEvent::End || trace[i].end_capture.0 == CaptureEvent::ISREnd) {
ret.push(trace[i].abb.as_ref().unwrap().clone());
}
}
ret
}
}
impl<S> Feedback<S> for StgFeedback
where
S: State + UsesInput + MaybeHasClientPerfMonitor + HasNamedMetadata,
S::Input: Default,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>(
&mut self,
state: &mut S,
_manager: &mut EM,
_input: &S::Input,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
S::Input: Default,
{
let observer = observers.match_name::<QemuSystemStateObserver<S::Input>>("systemstate")
.expect("QemuSystemStateObserver not found");
let clock_observer = observers.match_name::<QemuClockObserver>("clocktime")
.expect("QemuClockObserver not found");
#[cfg(feature = "trace_job_response_times")]
let last_runtime = observer.last_runtime();
#[cfg(not(feature = "trace_job_response_times"))]
let last_runtime = clock_observer.last_runtime();
let feedbackstate = match state
.named_metadata_map_mut()
.get_mut::<STGFeedbackState>("stgfeedbackstate") {
Some(s) => s,
Option::None => {
let n=STGFeedbackState::default();
state.named_metadata_map_mut().insert("stgfeedbackstate",n);
state.named_metadata_map_mut().get_mut::<STGFeedbackState>("stgfeedbackstate").unwrap()
}
};
// --------------------------------- Update STG
let (mut nodetrace, mut edgetrace, mut interesting, mut updated) = StgFeedback::update_stg_interval(&observer.last_trace, &observer.last_reads, &observer.last_states, feedbackstate);
#[cfg(feature = "trace_job_response_times")]
let worst_target_instance = observer.job_instances.iter().filter(|x| Some(x.name.clone()) == observer.select_task).max_by(|a,b| (a.response-a.release).cmp(&(b.response-b.release)));
#[cfg(feature = "trace_job_response_times")]
if let Some(worst_instance) = worst_target_instance {
edgetrace = edgetrace.into_iter().filter(|x| x.1 <= worst_instance.response && x.1 >= worst_instance.release ).collect();
nodetrace = nodetrace.into_iter().filter(|x| x.1 <= worst_instance.response && x.1 >= worst_instance.release ).collect();
} else {
if observer.select_task.is_none() { // if nothing was selected, just take the whole trace, otherwise there is nothing interesting here
edgetrace = Vec::new();
nodetrace = Vec::new();
}
}
#[cfg(feature = "feed_stg")]
set_observer_map(&edgetrace.iter().map(|x| x.0).collect::<Vec<_>>());
// --------------------------------- Update job instances
for i in observer.worst_job_instances.iter() {
interesting |= INTEREST_JOB_INSTANCE && if let Some(x) = feedbackstate.worst_task_jobs.get_mut(&i.1.get_hash_cached()) {
// eprintln!("Job instance already present");
x.try_update(i.1)
} else {
// eprintln!("New Job instance");
feedbackstate.worst_task_jobs.insert(i.1.get_hash_cached(), TaskJob::from_instance(&i.1));
true
}
};
self.last_job_trace = Some(observer.job_instances.clone());
// dbg!(&observer.job_instances);
{
let h = get_generic_hash(&edgetrace);
if let Some(x) = feedbackstate.worst_observed_per_stg_path.get_mut(&h) {
let t = last_runtime;
if t > *x {
*x = t;
interesting |= INTEREST_PATH;
}
} else {
feedbackstate.worst_observed_per_stg_path.insert(h, last_runtime);
updated = true;
interesting |= INTEREST_PATH;
}
}
#[cfg(not(feature = "trace_job_response_times"))]
let tmp = StgFeedback::abbs_in_exec_order(&observer.last_trace);
#[cfg(feature = "trace_job_response_times")]
let tmp = {
if let Some(worst_instance) = worst_target_instance {
let t = observer.last_trace.iter().filter(|x| x.start_tick < worst_instance.response && x.end_tick > worst_instance.release ).cloned().collect();
StgFeedback::abbs_in_exec_order(&t)
} else {
if observer.select_task.is_none() { // if nothing was selected, just take the whole trace, otherwise there is nothing interesting here
StgFeedback::abbs_in_exec_order(&observer.last_trace)
} else {
Vec::new()
}
}
};
if INTEREST_AGGREGATE || INTEREST_ABBPATH {
if INTEREST_ABBPATH {
let h = get_generic_hash(&tmp);
self.last_abbs_hash = Some(h);
// order of execution is relevant
if let Some(x) = feedbackstate.worst_observed_per_abb_path.get_mut(&h) {
let t = last_runtime;
if t > *x {
*x = t;
interesting |= INTEREST_ABBPATH;
}
} else {
feedbackstate.worst_observed_per_abb_path.insert(h, last_runtime);
interesting |= INTEREST_ABBPATH;
}
}
if INTEREST_AGGREGATE {
// aggegation by sorting, order of states is not relevant
let mut _tmp = tmp.clone();
_tmp.sort(); // use sort+count, because we need the sorted trace anyways
let counts = count_occurrences_sorted(&_tmp);
let mut top_indices = Vec::new();
for (k,c) in counts {
if let Some(reference) = feedbackstate.worst_abb_exec_count.get_mut(k) {
if *reference < c {
*reference = c;
top_indices.push(get_generic_hash(k));
}
} else {
top_indices.push(get_generic_hash(k));
feedbackstate.worst_abb_exec_count.insert(k.clone(), c);
}
}
self.last_top_abb_hashes = Some(top_indices);
self.last_aggregate_hash = Some(get_generic_hash(&_tmp));
if let Some(x) = feedbackstate.worst_observed_per_aggegated_path.get_mut(&_tmp) {
let t = last_runtime;
if t > *x {
*x = t;
interesting |= INTEREST_AGGREGATE;
}
} else {
feedbackstate.worst_observed_per_aggegated_path.insert(_tmp, last_runtime);
interesting |= INTEREST_AGGREGATE;
}
}
}
// let out = feedbackstate.graph.map(|i,x| x.pretty_print(), |_,_| "");
// let outs = Dot::with_config(&out, &[Config::EdgeNoLabel]).to_string();
// let outs = outs.replace(';',"\\n");
// fs::write("./mystg.dot",outs).expect("Failed to write graph");
self.last_node_trace = Some(nodetrace.into_iter().map(|x| x.0).collect::<Vec<_>>());
self.last_edge_trace = Some(edgetrace.into_iter().map(|x| x.0).collect::<Vec<_>>());
self.last_intervals = Some(observer.last_trace.clone());
self.last_abb_trace = Some(tmp);
if let Some(dp) = &self.dump_path {
if updated {
let timestamp = SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis();
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.append(true)
.open(dp).expect("Could not open stgsize");
writeln!(file, "{},{},{},{},{}", feedbackstate.graph.edge_count(), feedbackstate.graph.node_count(), feedbackstate.worst_observed_per_aggegated_path.len(),feedbackstate.worst_observed_per_stg_path.len(), timestamp).expect("Write to dump failed");
}
}
Ok(interesting)
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata<EM, OT>(&mut self, _state: &mut S, _manager: &mut EM, _observers: &OT, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
let meta = STGNodeMetadata::new(self.last_node_trace.take().unwrap_or_default(), self.last_edge_trace.take().unwrap_or_default(), self.last_abb_trace.take().unwrap_or_default(), self.last_abbs_hash.take().unwrap_or_default(), self.last_aggregate_hash.take().unwrap_or_default(), self.last_top_abb_hashes.take().unwrap_or_default(), self.last_intervals.take().unwrap_or_default(), self.last_job_trace.take().unwrap_or_default());
testcase.metadata_map_mut().insert(meta);
Ok(())
}
/// Discard the stored metadata in case that the testcase is not added to the corpus
#[inline]
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
Ok(())
}
}
impl Named for StgFeedback
{
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}

View File

@ -1,3 +0,0 @@
pub mod clock;
pub mod qemustate;
pub mod worst;

View File

@ -4,44 +4,39 @@ use libafl::inputs::BytesInput;
use libafl::inputs::HasTargetBytes; use libafl::inputs::HasTargetBytes;
use libafl::feedbacks::MapIndexesMetadata; use libafl::feedbacks::MapIndexesMetadata;
use libafl::corpus::Testcase; use libafl::corpus::Testcase;
use libafl::prelude::{ClientStats, Monitor, SimplePrintingMonitor, UsesInput}; use libafl::prelude::{UsesInput, AsSlice};
use core::marker::PhantomData; use core::marker::PhantomData;
use libafl::schedulers::{MinimizerScheduler, ProbabilitySamplingScheduler, TestcaseScore}; use libafl::schedulers::{MinimizerScheduler, TestcaseScore};
use std::path::PathBuf; use std::path::PathBuf;
use std::fs; use std::fs;
use hashbrown::{HashMap}; use hashbrown::{HashMap};
use libafl::observers::ObserversTuple; use libafl::observers::ObserversTuple;
use libafl::executors::ExitKind; use libafl::executors::ExitKind;
use libafl::events::EventFirer; use libafl::events::EventFirer;
use libafl::state::{MaybeHasClientPerfMonitor, HasCorpus, UsesState}; use libafl::state::{HasClientPerfMonitor, HasCorpus, UsesState};
use libafl::prelude::State;
use libafl::inputs::Input; use libafl::inputs::Input;
use libafl::feedbacks::Feedback; use libafl::feedbacks::Feedback;
use libafl::common::HasMetadata; use libafl::state::HasMetadata;
use libafl_qemu::edges::QemuEdgesMapMetadata; use libafl_qemu::edges::QemuEdgesMapMetadata;
use libafl::observers::MapObserver; use libafl::observers::MapObserver;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::cmp; use std::cmp;
use std::time::Duration;
use std::time::Instant;
use std::ops::Sub;
use libafl_bolts::{
AsSlice, ClientId, HasLen, Named
};
use libafl::{ use libafl::{
bolts::{
tuples::Named,
HasLen,
},
observers::Observer, observers::Observer,
Error, Error,
}; };
use crate::time::clock::QemuClockObserver; use crate::clock::QemuClockObserver;
use crate::systemstate::FreeRTOSSystemStateMetadata; use crate::systemstate::FreeRTOSSystemStateMetadata;
use std::borrow::Cow;
//=========================== Scheduler //=========================== Scheduler
pub type TimeMaximizerCorpusScheduler<CS, O> = pub type TimeMaximizerCorpusScheduler<CS> =
MinimizerScheduler<CS, MaxTimeFavFactor<<CS as UsesState>::State>, MapIndexesMetadata, O>; MinimizerScheduler<CS, MaxTimeFavFactor<<CS as UsesState>::State>, MapIndexesMetadata>;
/// Multiply the testcase size with the execution time. /// Multiply the testcase size with the execution time.
/// This favors small and quick testcases. /// This favors small and quick testcases.
@ -49,6 +44,7 @@ pub type TimeMaximizerCorpusScheduler<CS, O> =
pub struct MaxTimeFavFactor<S> pub struct MaxTimeFavFactor<S>
where where
S: HasCorpus + HasMetadata, S: HasCorpus + HasMetadata,
S::Input: HasLen,
{ {
phantom: PhantomData<S>, phantom: PhantomData<S>,
} }
@ -56,8 +52,9 @@ where
impl<S> TestcaseScore<S> for MaxTimeFavFactor<S> impl<S> TestcaseScore<S> for MaxTimeFavFactor<S>
where where
S: HasCorpus + HasMetadata, S: HasCorpus + HasMetadata,
S::Input: HasLen,
{ {
fn compute(state: &S, entry: &mut Testcase<<S as UsesInput>::Input>) -> Result<f64, Error> { fn compute(entry: &mut Testcase<<S as UsesInput>::Input>, state: &S) -> Result<f64, Error> {
// TODO maybe enforce entry.exec_time().is_some() // TODO maybe enforce entry.exec_time().is_some()
let et = entry.exec_time().expect("testcase.exec_time is needed for scheduler"); let et = entry.exec_time().expect("testcase.exec_time is needed for scheduler");
let tns : i64 = et.as_nanos().try_into().expect("failed to convert time"); let tns : i64 = et.as_nanos().try_into().expect("failed to convert time");
@ -65,11 +62,11 @@ where
} }
} }
pub type LenTimeMaximizerCorpusScheduler<CS, O> = pub type LenTimeMaximizerCorpusScheduler<CS> =
MinimizerScheduler<CS, MaxExecsLenFavFactor<<CS as UsesState>::State>, MapIndexesMetadata, O>; MinimizerScheduler<CS, MaxExecsLenFavFactor<<CS as UsesState>::State>, MapIndexesMetadata>;
pub type TimeStateMaximizerCorpusScheduler<CS, O> = pub type TimeStateMaximizerCorpusScheduler<CS> =
MinimizerScheduler<CS, MaxTimeFavFactor<<CS as UsesState>::State>, FreeRTOSSystemStateMetadata, O>; MinimizerScheduler<CS, MaxTimeFavFactor<<CS as UsesState>::State>, FreeRTOSSystemStateMetadata>;
/// Multiply the testcase size with the execution time. /// Multiply the testcase size with the execution time.
/// This favors small and quick testcases. /// This favors small and quick testcases.
@ -87,9 +84,9 @@ where
S: HasCorpus + HasMetadata, S: HasCorpus + HasMetadata,
S::Input: HasLen, S::Input: HasLen,
{ {
fn compute( state: &S, entry: &mut Testcase<S::Input>) -> Result<f64, Error> { fn compute(entry: &mut Testcase<S::Input>, state: &S) -> Result<f64, Error> {
let execs_per_hour = (3600.0/entry.exec_time().expect("testcase.exec_time is needed for scheduler").as_secs_f64()); let execs_per_hour = (3600.0/entry.exec_time().expect("testcase.exec_time is needed for scheduler").as_secs_f64());
let execs_times_length_per_hour = execs_per_hour*entry.load_len(state.corpus()).unwrap() as f64; let execs_times_length_per_hour = execs_per_hour*entry.cached_len()? as f64;
Ok(execs_times_length_per_hour) Ok(execs_times_length_per_hour)
} }
} }
@ -99,12 +96,11 @@ where
/// A Feedback reporting if the Input consists of strictly decreasing bytes. /// A Feedback reporting if the Input consists of strictly decreasing bytes.
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SortedFeedback { pub struct SortedFeedback {
name: Cow<'static, str>
} }
impl<S> Feedback<S> for SortedFeedback impl<S> Feedback<S> for SortedFeedback
where where
S: State + UsesInput + MaybeHasClientPerfMonitor, S: UsesInput + HasClientPerfMonitor,
S::Input: HasTargetBytes, S::Input: HasTargetBytes,
{ {
#[allow(clippy::wrong_self_convention)] #[allow(clippy::wrong_self_convention)]
@ -113,7 +109,7 @@ where
_state: &mut S, _state: &mut S,
_manager: &mut EM, _manager: &mut EM,
_input: &S::Input, _input: &S::Input,
_observers: &OT, observers: &OT,
_exit_kind: &ExitKind, _exit_kind: &ExitKind,
) -> Result<bool, Error> ) -> Result<bool, Error>
where where
@ -125,31 +121,19 @@ where
if tmp.len()<32 {return Ok(false);} if tmp.len()<32 {return Ok(false);}
let tmp = Vec::<u8>::from(&tmp[0..32]); let tmp = Vec::<u8>::from(&tmp[0..32]);
// tmp.reverse(); // tmp.reverse();
// if tmp.is_sorted_by(|a,b| match a.partial_cmp(b).unwrap_or(Less) { if tmp.is_sorted_by(|a,b| match a.partial_cmp(b).unwrap_or(Less) {
// Less => Some(Greater), Less => Some(Greater),
// Equal => Some(Greater), Equal => Some(Greater),
// Greater => Some(Less), Greater => Some(Less),
// }) {return Ok(true)}; }) {return Ok(true)};
let mut is_sorted = true; return Ok(false);
if tmp[0]<tmp[1] {
for i in 1..tmp.len() {
is_sorted &= tmp[i-1]<=tmp[i];
if !is_sorted {break;}
}
} else {
for i in 1..tmp.len() {
is_sorted &= tmp[i-1]>=tmp[i];
if !is_sorted {break;}
}
}
return Ok(is_sorted);
} }
} }
impl Named for SortedFeedback { impl Named for SortedFeedback {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name "Sorted"
} }
} }
@ -157,7 +141,7 @@ impl SortedFeedback {
/// Creates a new [`HitFeedback`] /// Creates a new [`HitFeedback`]
#[must_use] #[must_use]
pub fn new() -> Self { pub fn new() -> Self {
Self {name: Cow::from("Sorted".to_string()),} Self {}
} }
} }
@ -172,13 +156,12 @@ impl Default for SortedFeedback {
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExecTimeReachedFeedback pub struct ExecTimeReachedFeedback
{ {
name: Cow<'static, str>,
target_time: u64, target_time: u64,
} }
impl<S> Feedback<S> for ExecTimeReachedFeedback impl<S> Feedback<S> for ExecTimeReachedFeedback
where where
S: State + UsesInput + MaybeHasClientPerfMonitor, S: UsesInput + HasClientPerfMonitor,
{ {
#[allow(clippy::wrong_self_convention)] #[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>( fn is_interesting<EM, OT>(
@ -202,8 +185,8 @@ where
impl Named for ExecTimeReachedFeedback impl Named for ExecTimeReachedFeedback
{ {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name "ExecTimeReachedFeedback"
} }
} }
@ -213,7 +196,7 @@ where
/// Creates a new [`ExecTimeReachedFeedback`] /// Creates a new [`ExecTimeReachedFeedback`]
#[must_use] #[must_use]
pub fn new(target_time : u64) -> Self { pub fn new(target_time : u64) -> Self {
Self {name: Cow::from("ExecTimeReachedFeedback".to_string()), target_time: target_time} Self {target_time: target_time}
} }
} }
@ -223,12 +206,11 @@ pub static mut EXEC_TIME_COLLECTION : Vec<u32> = Vec::new();
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExecTimeCollectorFeedback pub struct ExecTimeCollectorFeedback
{ {
name: Cow<'static, str>
} }
impl<S> Feedback<S> for ExecTimeCollectorFeedback impl<S> Feedback<S> for ExecTimeCollectorFeedback
where where
S: State + UsesInput + MaybeHasClientPerfMonitor, S: UsesInput + HasClientPerfMonitor,
{ {
#[allow(clippy::wrong_self_convention)] #[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>( fn is_interesting<EM, OT>(
@ -253,8 +235,8 @@ where
impl Named for ExecTimeCollectorFeedback impl Named for ExecTimeCollectorFeedback
{ {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name "ExecTimeCollectorFeedback"
} }
} }
@ -264,27 +246,21 @@ where
/// Creates a new [`ExecTimeCollectorFeedback`] /// Creates a new [`ExecTimeCollectorFeedback`]
#[must_use] #[must_use]
pub fn new() -> Self { pub fn new() -> Self {
Self {name: Cow::from("ExecTimeCollectorFeedback".to_string())} Self {}
} }
} }
/// Shared Metadata for a SysStateFeedback /// Shared Metadata for a SysStateFeedback
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct ExecTimeCollectorFeedbackState pub struct ExecTimeCollectorFeedbackState
{ {
name: Cow<'static, str>,
collection: Vec<u32>, collection: Vec<u32>,
} }
impl Named for ExecTimeCollectorFeedbackState impl Named for ExecTimeCollectorFeedbackState
{ {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name "ExecTimeCollectorFeedbackState"
}
}
impl Default for ExecTimeCollectorFeedbackState {
fn default() -> Self {
Self {name: Cow::from("ExecTimeCollectorFeedbackState".to_string()), collection: Vec::new()}
} }
} }
@ -293,14 +269,13 @@ impl Default for ExecTimeCollectorFeedbackState {
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExecTimeIncFeedback pub struct ExecTimeIncFeedback
{ {
name: Cow<'static, str>,
longest_time: u64, longest_time: u64,
last_is_longest: bool last_is_longest: bool
} }
impl<S> Feedback<S> for ExecTimeIncFeedback impl<S> Feedback<S> for ExecTimeIncFeedback
where where
S: State + UsesInput + MaybeHasClientPerfMonitor, S: UsesInput + HasClientPerfMonitor,
{ {
#[allow(clippy::wrong_self_convention)] #[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>( fn is_interesting<EM, OT>(
@ -326,16 +301,14 @@ where
Ok(false) Ok(false)
} }
} }
fn append_metadata<EM, OT>( fn append_metadata(
&mut self, &mut self,
_state: &mut S, _state: &mut S,
_manager: &mut EM,
observers: &OT,
testcase: &mut Testcase<<S as UsesInput>::Input>, testcase: &mut Testcase<<S as UsesInput>::Input>,
) -> Result<(), Error> { ) -> Result<(), Error> {
#[cfg(feature = "feed_afl")] #[cfg(feature = "feed_afl")]
if self.last_is_longest { if self.last_is_longest {
let mim : Option<&mut MapIndexesMetadata>= testcase.metadata_map_mut().get_mut(); let mim : Option<&mut MapIndexesMetadata>= testcase.metadata_mut().get_mut();
// pretend that the longest input alone excercises some non-existing edge, to keep it relevant // pretend that the longest input alone excercises some non-existing edge, to keep it relevant
mim.unwrap().list.push(usize::MAX); mim.unwrap().list.push(usize::MAX);
}; };
@ -346,8 +319,8 @@ where
impl Named for ExecTimeIncFeedback impl Named for ExecTimeIncFeedback
{ {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name "ExecTimeReachedFeedback"
} }
} }
@ -357,7 +330,7 @@ where
/// Creates a new [`ExecTimeReachedFeedback`] /// Creates a new [`ExecTimeReachedFeedback`]
#[must_use] #[must_use]
pub fn new() -> Self { pub fn new() -> Self {
Self {name: Cow::from("ExecTimeReachedFeedback".to_string()), longest_time: 0, last_is_longest: false} Self {longest_time: 0, last_is_longest: false}
} }
} }
@ -365,12 +338,11 @@ where
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct AlwaysTrueFeedback pub struct AlwaysTrueFeedback
{ {
name: Cow<'static, str>
} }
impl<S> Feedback<S> for AlwaysTrueFeedback impl<S> Feedback<S> for AlwaysTrueFeedback
where where
S: State + UsesInput + MaybeHasClientPerfMonitor, S: UsesInput + HasClientPerfMonitor,
{ {
#[allow(clippy::wrong_self_convention)] #[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>( fn is_interesting<EM, OT>(
@ -392,8 +364,8 @@ where
impl Named for AlwaysTrueFeedback impl Named for AlwaysTrueFeedback
{ {
#[inline] #[inline]
fn name(&self) -> &Cow<'static, str> { fn name(&self) -> &str {
&self.name "AlwaysTrueFeedback"
} }
} }
@ -404,90 +376,6 @@ where
#[must_use] #[must_use]
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
name: Cow::from("AlwaysTrueFeedback".to_string())
} }
} }
} }
//=========================== Probability Mass Scheduler
pub type TimeProbMassScheduler<S> =
ProbabilitySamplingScheduler<TimeProbFactor<S>, S>;
#[derive(Debug, Clone)]
pub struct TimeProbFactor<S>
where
S: HasCorpus + HasMetadata,
{
phantom: PhantomData<S>,
}
impl<S> TestcaseScore<S> for TimeProbFactor<S>
where
S: HasCorpus + HasMetadata,
{
fn compute(_state: &S, entry: &mut Testcase<<S as UsesInput>::Input>) -> Result<f64, Error> {
// TODO maybe enforce entry.exec_time().is_some()
let et = entry.exec_time().expect("testcase.exec_time is needed for scheduler");
let tns : i64 = et.as_nanos().try_into().expect("failed to convert time");
Ok(((tns as f64)/1000.0).powf(2.0)) //microseconds
}
}
/// Monitor that prints with a limited rate.
#[derive(Debug, Clone)]
pub struct RateLimitedMonitor {
inner: SimplePrintingMonitor,
last: Instant,
}
impl Monitor for RateLimitedMonitor {
/// The client monitor, mutable
fn client_stats_mut(&mut self) -> &mut Vec<ClientStats> {
self.inner.client_stats_mut()
}
/// The client monitor
fn client_stats(&self) -> &[ClientStats] {
self.inner.client_stats()
}
/// Time this fuzzing run stated
fn start_time(&self) -> Duration {
self.inner.start_time()
}
/// Time this fuzzing run stated
fn set_start_time(&mut self, time: Duration) {
self.inner.set_start_time(time);
}
#[inline]
fn display(&mut self, event_msg: &str, sender_id: ClientId) {
let now = Instant::now();
const RATE : Duration = Duration::from_secs(5);
if (event_msg!="Testcase" && event_msg!="UserStats") || now.duration_since(self.last) > RATE {
self.inner.display(event_msg, sender_id);
self.last = now;
}
}
}
impl RateLimitedMonitor {
/// Create new [`NopMonitor`]
#[must_use]
pub fn new() -> Self {
Self {
inner: SimplePrintingMonitor::new(),
last: Instant::now().sub(Duration::from_secs(7200)),
}
}
}
impl Default for RateLimitedMonitor {
fn default() -> Self {
Self::new()
}
}

View File

@ -1,5 +0,0 @@
dump
demo*
*.dot
*.time
*.case

Some files were not shown because too many files have changed in this diff Show More