Compare commits

..

49 Commits

Author SHA1 Message Date
798aa2ceb9 remove dead code 2023-06-02 10:00:13 +02:00
183ff32beb igonre archives 2023-06-02 08:32:23 +02:00
89979b64d9 eval script wrangeling 2023-05-27 13:19:19 +02:00
35da9fdf24 HACK: interrupt limit for random fuzzing 2023-05-25 08:40:43 +02:00
1bca346b39 plot enpoints 2023-05-25 08:39:47 +02:00
8b90886299 paralellize plots 2023-05-23 12:06:14 +02:00
1bd7d853ac update plot script 2023-05-11 12:56:12 +02:00
253048e534 tweak time outputs 2023-05-10 09:25:22 +02:00
52cc00fedc add run_until_saturation 2023-05-08 18:23:32 +02:00
eec998c426 update snakefile 2023-05-04 11:47:56 +02:00
a328ddfd5f fix empty iterator crash, restart 2023-05-02 09:41:53 +02:00
6a042da5c1 set up configurations 2023-04-28 13:11:48 +02:00
2e20a22dc6 add missing use 2023-04-27 13:36:01 +02:00
bbc83ef6be randomize interrupts until wort 2023-04-24 15:33:03 +02:00
48466ac2d7 Test: remove pc from hash 2023-04-24 12:52:29 +02:00
ad8cecdba4 Test: hash notification states 2023-04-24 12:51:09 +02:00
c2afc0186e allow plotting from remote mount 2023-04-24 11:16:10 +02:00
4df67db479 update snakefile 2023-04-24 11:12:38 +02:00
402eff7b47 small fixes 2023-04-21 17:22:22 +02:00
a8a6c175c8 WIP: add simple interrupt time randomizer 2023-04-21 17:11:18 +02:00
8a79e12f91 update target_symbols 2023-04-21 14:12:04 +02:00
a3e38b6abb skip unchanged interrupts 2023-04-20 16:50:23 +02:00
eb04325f09 fix staeg setup 2023-04-20 16:32:19 +02:00
cfb8fa2b32 fix use 2023-04-20 16:04:45 +02:00
2889e9bf61 WIP: move interrupt mutation to new stage 2023-04-20 15:50:22 +02:00
960764cf85 wip: interrupt placement 2023-04-17 17:33:21 +02:00
e6816cc2de add interrupt mutator 2023-04-17 09:50:18 +02:00
f3180a35cc plot min and max lines 2023-03-23 13:20:23 +01:00
54312b2577 plot lines instead of points 2023-03-22 16:10:19 +01:00
6d920fd962 fixes 2023-03-21 16:58:44 +01:00
281979ecd8 revert changes 2023-03-21 16:39:21 +01:00
c628afaa81 add generation based genetic testing 2023-03-21 16:34:05 +01:00
c548c6bc09 snakefile: dump cases, fix random fuzzing 2023-03-17 11:15:55 +01:00
6e8769907d add a new scheduler for systemtraces 2023-03-16 16:13:16 +01:00
bf639e42fa fix snakefile, symbols 2023-03-14 17:08:05 +01:00
a05ff97d0c seed rng from SEED_RANDOM 2023-03-13 14:45:21 +01:00
f09034b7fe determinism fixes, scheduler precision, restarts 2023-03-13 14:43:58 +01:00
d118eeacbd switch to native breakpoints 2023-03-13 12:19:24 +01:00
57fc441118 fix interrupt config 2023-03-09 17:21:26 +01:00
10b5fe8a74 fix rng seed 2023-03-09 10:53:40 +01:00
7f987b037d configure restarting manager 2023-03-09 10:16:08 +01:00
58be280a62 add micro_longint 2023-03-03 12:30:36 +01:00
3c586f5047 fuzz multiple interrupts 2023-03-02 15:30:53 +01:00
9336b932d0 rework plotting 2023-02-28 17:01:04 +01:00
e0f73778e2 add interrupt fuzzing 2023-02-27 10:39:52 +01:00
e5ac5ba825 dump time for showmap 2023-02-24 12:25:08 +01:00
2acf3ef301 add plotting to snakefile 2023-02-24 12:07:53 +01:00
28bac2a850 add feed_longest to record random cases 2023-02-23 22:33:13 +01:00
41586dd8b1 plotting: respect types 2023-02-23 22:28:25 +01:00
17 changed files with 1285 additions and 348 deletions

View File

@ -5,17 +5,26 @@ authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenuk
edition = "2021" edition = "2021"
[features] [features]
default = ["std", "snapshot_restore", "singlecore", "feed_longest", "feed_afl"] default = ["std", "snapshot_restore", "singlecore", "restarting", "feed_systemtrace", "fuzz_int" ]
std = [] std = []
snapshot_restore = [] snapshot_restore = []
snapshot_fast = [ "snapshot_restore" ] snapshot_fast = [ "snapshot_restore" ]
singlecore = [] singlecore = []
restarting = ['singlecore']
trace_abbs = [] trace_abbs = []
systemstate = [] systemstate = []
feed_systemgraph = [ "systemstate" ] feed_systemgraph = [ "systemstate" ]
feed_systemtrace = [ "systemstate" ] feed_systemtrace = [ "systemstate" ]
feed_longest = [ ] feed_longest = [ ]
feed_afl = [ ] feed_afl = [ ]
feed_genetic = [ ]
fuzz_int = [ ]
gensize_1 = [ ]
gensize_10 = [ ]
gensize_100 = [ ]
observer_hitcounts = []
no_hash_state = []
run_until_saturation = []
[profile.release] [profile.release]
lto = true lto = true

View File

@ -8,3 +8,5 @@ mnt
*.pdf *.pdf
bins bins
.snakemake .snakemake
*.zip
*.tar.*

View File

@ -1,5 +1,16 @@
import csv import csv
def_flags="--no-default-features --features std,snapshot_restore,singlecore" import os
def_flags="--no-default-features --features std,snapshot_restore,singlecore,restarting,run_until_saturation"
remote="timedump_253048_1873f6_all/"
RUNTIME=10
TARGET_REPS_A=2
TARGET_REPS_B=2
NUM_NODES=2
REP_PER_NODE_A=int(TARGET_REPS_A/NUM_NODES)
REP_PER_NODE_B=int(TARGET_REPS_B/NUM_NODES)
NODE_ID= 0 if os.getenv('NODE_ID') == None else int(os.environ['NODE_ID'])
MY_RANGE_A=range(NODE_ID*REP_PER_NODE_A,(NODE_ID+1)*REP_PER_NODE_A)
MY_RANGE_B=range(NODE_ID*REP_PER_NODE_B,(NODE_ID+1)*REP_PER_NODE_B)
rule build_showmap: rule build_showmap:
output: output:
@ -19,17 +30,17 @@ rule build_feedlongest:
shell: shell:
"cargo build --target-dir {output} {def_flags},feed_longest" "cargo build --target-dir {output} {def_flags},feed_longest"
rule build_feedaflnolongest: rule build_frafl:
output: output:
directory("bins/target_feedaflnolongest") directory("bins/target_frafl")
shell: shell:
"cargo build --target-dir {output} {def_flags},feed_afl" "cargo build --target-dir {output} {def_flags},feed_afl,feed_longest"
rule build_afl: rule build_afl:
output: output:
directory("bins/target_afl") directory("bins/target_afl")
shell: shell:
"cargo build --target-dir {output} {def_flags},feed_afl,feed_longest" "cargo build --target-dir {output} {def_flags},feed_afl,observer_hitcounts"
rule build_state: rule build_state:
output: output:
@ -37,18 +48,102 @@ rule build_state:
shell: shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace" "cargo build --target-dir {output} {def_flags},feed_systemtrace"
rule build_nohashstate:
output:
directory("bins/target_nohashstate")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace,no_hash_state"
rule build_graph: rule build_graph:
output: output:
directory("bins/target_graph") directory("bins/target_graph")
shell: shell:
"cargo build --target-dir {output} {def_flags},feed_systemgraph" "cargo build --target-dir {output} {def_flags},feed_systemgraph"
rule build_showmap_int:
output:
directory("bins/target_showmap_int")
shell:
"cargo build --target-dir {output} {def_flags},systemstate,fuzz_int"
rule build_random_int:
output:
directory("bins/target_random_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_longest,fuzz_int"
rule build_state_int:
output:
directory("bins/target_state_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace,fuzz_int"
rule build_nohashstate_int:
output:
directory("bins/target_nohashstate_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace,fuzz_int,no_hash_state"
rule build_frafl_int:
output:
directory("bins/target_frafl_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_afl,feed_longest,fuzz_int"
rule build_afl_int:
output:
directory("bins/target_afl_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_afl,fuzz_int,observer_hitcounts"
rule build_feedlongest_int:
output:
directory("bins/target_feedlongest_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_longest,fuzz_int"
rule build_feedgeneration1:
output:
directory("bins/target_feedgeneration1")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,gensize_1"
rule build_feedgeneration1_int:
output:
directory("bins/target_feedgeneration1_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_1"
rule build_feedgeneration10:
output:
directory("bins/target_feedgeneration10")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,gensize_10"
rule build_feedgeneration10_int:
output:
directory("bins/target_feedgeneration10_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_10"
rule build_feedgeneration100:
output:
directory("bins/target_feedgeneration100")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,gensize_100"
rule build_feedgeneration100_int:
output:
directory("bins/target_feedgeneration100_int")
shell:
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_100"
rule run_bench: rule run_bench:
input: input:
"build/{target}.elf", "build/{target}.elf",
"bins/target_{fuzzer}" "bins/target_{fuzzer}"
output: output:
multiext("timedump/{fuzzer}/{target}.{num}", "", ".log", ".case") multiext("timedump/{fuzzer}/{target}.{num}", "", ".log") # , ".case"
run: run:
with open('target_symbols.csv') as csvfile: with open('target_symbols.csv') as csvfile:
reader = csv.DictReader(csvfile) reader = csv.DictReader(csvfile)
@ -67,26 +162,29 @@ rule run_bench:
export FUZZ_INPUT={fuzz_input} export FUZZ_INPUT={fuzz_input}
export FUZZ_INPUT_LEN={fuzz_len} export FUZZ_INPUT_LEN={fuzz_len}
export BREAKPOINT={bkp} export BREAKPOINT={bkp}
export SEED_RANDOM=1 export SEED_RANDOM={wildcards.num}
export TIME_DUMP=$(pwd)/{output[0]} export TIME_DUMP=$(pwd)/{output[0]}
export CASE_DUMP=$(pwd)/{output[2]} export CASE_DUMP=$(pwd)/{output[0]}.case
export FUZZ_ITERS=3600 export TRACE_DUMP=$(pwd)/{output[0]}.trace
export FUZZ_ITERS={RUNTIME}
export FUZZER=$(pwd)/{input[1]}/debug/fret export FUZZER=$(pwd)/{input[1]}/debug/fret
set +e set +e
../fuzzer.sh > {output[1]} 2>&1 ../fuzzer.sh > {output[1]} 2>&1
exit 0 exit 0
""" """
if wildcards.fuzzer == 'random': if wildcards.fuzzer.find('random') >= 0:
script="export FUZZ_RANDOM=1\n"+script script="export FUZZ_RANDOM={output[1]}\n"+script
shell(script) shell(script)
rule run_showmap: rule run_showmap:
input: input:
"build/{target}.elf", "{remote}build/{target}.elf",
"bins/target_showmap", "bins/target_showmap",
"timedump/{fuzzer}/{target}.{num}.case" "bins/target_showmap_int",
"{remote}timedump/{fuzzer}/{target}.{num}.case"
output: output:
"timedump/{fuzzer}/{target}.{num}.trace.ron" "{remote}timedump/{fuzzer}/{target}.{num}.trace.ron",
"{remote}timedump/{fuzzer}/{target}.{num}.case.time",
run: run:
with open('target_symbols.csv') as csvfile: with open('target_symbols.csv') as csvfile:
reader = csv.DictReader(csvfile) reader = csv.DictReader(csvfile)
@ -98,57 +196,86 @@ rule run_showmap:
fuzz_input=line['input_symbol'] fuzz_input=line['input_symbol']
fuzz_len=line['input_size'] fuzz_len=line['input_size']
bkp=line['return_function'] bkp=line['return_function']
script=""" script=""
if wildcards.fuzzer.find('_int') > -1:
script="export FUZZER=$(pwd)/{input[2]}/debug/fret\n"
else:
script="export FUZZER=$(pwd)/{input[1]}/debug/fret\n"
script+="""
mkdir -p $(dirname {output}) mkdir -p $(dirname {output})
export KERNEL=$(pwd)/{input[0]} export KERNEL=$(pwd)/{input[0]}
export FUZZ_MAIN={fuzz_main} export FUZZ_MAIN={fuzz_main}
export FUZZ_INPUT={fuzz_input} export FUZZ_INPUT={fuzz_input}
export FUZZ_INPUT_LEN={fuzz_len} export FUZZ_INPUT_LEN={fuzz_len}
export BREAKPOINT={bkp} export BREAKPOINT={bkp}
export TRACE_DUMP=$(pwd)/{output} export TRACE_DUMP=$(pwd)/{output[0]}
export FUZZER=$(pwd)/{input[1]}/debug/fret export DO_SHOWMAP=$(pwd)/{input[3]}
export DO_SHOWMAP=$(pwd)/{input[2]} export TIME_DUMP=$(pwd)/{output[1]}
set +e set +e
../fuzzer.sh ../fuzzer.sh
exit 0 exit 0
""" """
if wildcards.fuzzer == 'random': if wildcards.fuzzer.find('random') >= 0:
script="export FUZZ_RANDOM=1\n"+script script="export FUZZ_RANDOM=1\n"+script
shell(script) shell(script)
rule tarnsform_trace: rule tarnsform_trace:
input: input:
"timedump/{fuzzer}/{target}.{num}.trace.ron" "{remote}timedump/{fuzzer}/{target}.{num}.trace.ron"
output: output:
"timedump/{fuzzer}/{target}.{num}.trace.csv" "{remote}timedump/{fuzzer}/{target}.{num}.trace.csv"
shell: shell:
"$(pwd)/../../../../state2gantt/target/debug/state2gantt {input} > {output[0]}" "$(pwd)/../../../../state2gantt/target/debug/state2gantt {input} > {output[0]}"
rule trace2gantt: rule trace2gantt:
input: input:
"timedump/{fuzzer}/{target}.{num}.trace.csv" "{remote}timedump/{fuzzer}/{target}.{num}.trace.csv"
output: output:
"timedump/{fuzzer}/{target}.{num}.trace.csv.png" "{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png"
shell: shell:
"Rscript --vanilla $(pwd)/../../../../state2gantt/gantt.R {input}" "Rscript --vanilla $(pwd)/../../../../state2gantt/gantt.R {input}"
rule all_main:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random','afl','feedgeneration10','state'], target=['waters','watersv2'],num=range(0,3))
rule all_main_int:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random_int','afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=range(0,4))
rule all_compare_feedgeneration:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1','feedgeneration10','feedgeneration100'], target=['waters_int','watersv2'],num=range(0,10))
rule all_compare_feedgeneration_int:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1_int','feedgeneration10_int','feedgeneration100_int'], target=['waters_int','watersv2_int'],num=range(0,10))
rule all_compare_afl:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl','frafl','feedlongest'], target=['waters','watersv2'],num=range(0,10))
rule all_compare_afl_int:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl_int','frafl_int','feedlongest_int'], target=['waters_int','watersv2_int'],num=range(0,10))
rule all_images:
input:
expand("{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png",remote=remote, fuzzer=['afl','feedgeneration10','state'], target=['waters','watersv2'],num=range(0,3))
rule all_images_int:
input:
expand("{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png",remote=remote, fuzzer=['afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=range(0,3))
rule clusterfuzz:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random','afl','feedgeneration10','state'], target=['waters','watersv2'],num=MY_RANGE_A),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random_int','afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_A),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1','feedgeneration10','feedgeneration100'], target=['waters_int','watersv2'],num=MY_RANGE_B),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1_int','feedgeneration10_int','feedgeneration100_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_B),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl','frafl','feedlongest'], target=['waters','watersv2'],num=MY_RANGE_B),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl_int','frafl_int','feedlongest_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_B),
rule all_bins: rule all_bins:
input: input:
"bins/target_random", expand("bins/target_{target}{flag}",target=['random','afl','frafl','state','feedgeneration100'],flag=['','_int'])
"bins/target_feedlongest",
"bins/target_feedaflnolongest",
"bins/target_afl",
"bins/target_state",
"bins/target_graph"
rule all_constructed:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random','afl','state','graph','feedlongest','feedaflnolongest'], target=['tmr'],num=range(0,10))
rule all_periodic:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random','afl','state','graph'], target=['waters'],num=range(0,10))
rule all_compare_afl_longest:
input:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl','feedlongest','feedaflnolongest'], target=['waters'],num=range(0,10))

View File

@ -1,38 +1,55 @@
library("mosaic") library("mosaic")
library("dplyr")
library("foreach")
library("doParallel")
#setup parallel backend to use many processors
cores=detectCores()
cl <- makeCluster(cores[1]-1) #not to overload your computer
registerDoParallel(cl)
args = commandArgs(trailingOnly=TRUE) args = commandArgs(trailingOnly=TRUE)
#myolors=c("#339933","#0066ff","#993300") # grün, balu, rot
myolors=c("green","blue","red", "yellow", "pink", "black") # grün, balu, rot
if (length(args)==0) { if (length(args)==0) {
runtype="timedump" runtype="timedump_253048_1873f6_all/timedump"
target="waters" target="waters_int"
filename_1=sprintf("%s.png",target) outputpath="~/code/FRET/LibAFL/fuzzers/FRET/benchmark/"
filename_2=sprintf("%s_maxline.png",target) #MY_SELECTION <- c('state', 'afl', 'graph', 'random')
filename_3=sprintf("%s_hist.png",target) SAVE_FILE=TRUE
} else { } else {
runtype=args[1] runtype=args[1]
target=args[2] target=args[2]
filename_1=sprintf("%s.png",args[2]) outputpath=args[3]
filename_2=sprintf("%s_maxline.png",args[2]) MY_SELECTION <- args[4:length(args)]
filename_3=sprintf("%s_hist.png",args[2]) SAVE_FILE=TRUE
# filename_1=args[3] }
worst_cases <- list(waters=0, waters_int=0, tmr=405669, micro_longint=0)
worst_case <- worst_cases[[target]]
if (is.null(worst_case)) {
worst_case = 0
} }
library("dplyr") #MY_COLORS=c("green","blue","red", "orange", "pink", "black")
COLORS <- c("dark grey","blue", "red", "green", "magenta", "orange", "cyan", "pink", "black", "orange") MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
BENCHDIR=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s",runtype) BENCHDIR=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s",runtype)
BASENAMES=Filter(function(x) x!="",list.dirs(BENCHDIR,full.names=FALSE)) BASENAMES=Filter(function(x) x!="" && substr(x,1,1)!='.',list.dirs(BENCHDIR,full.names=FALSE))
PATTERNS=c(".*.0", PATTERNS="%s.[0-9]*$"
".*.1", #RIBBON='sd'
".*.2", #RIBBON='span'
".*.3", RIBBON='both'
".*.4", DRAW_WC = worst_case > 0
".*.5", LEGEND_POS="topright"
".*.6", #LEGEND_POS="bottomright"
".*.7", CONTINUE_LINE_TO_END=FALSE
".*.8",
".*.9") # https://www.r-bloggers.com/2013/04/how-to-change-the-alpha-value-of-colours-in-r/
alpha <- function(col, alpha=1){
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
# Trimm a list of data frames to common length # Trimm a list of data frames to common length
trim_data <- function(input,len=NULL) { trim_data <- function(input,len=NULL) {
@ -42,17 +59,19 @@ trim_data <- function(input,len=NULL) {
return(lapply(input, function(d) slice_head(d,n=len))) return(lapply(input, function(d) slice_head(d,n=len)))
} }
length_of_data <- function(input) { length_of_data <- function(input) {
min(sapply(input, function(v) dim(v)[1])) min(sapply(input, function(v) dim(v)[1]))
} }
# Takes a flat list # Takes a flat list
trace2maxline <- function(tr) { trace2maxline <- function(tr) {
maxline = tr maxline = tr
for (var in seq_len(length(maxline))[2:length(maxline)]) { for (var in seq_len(length(maxline))[2:length(maxline)]) {
maxline[var] = max(maxline[var],maxline[var-1]) #if (maxline[var]>1000000000) {
# maxline[var]=maxline[var-1]
#} else {
maxline[var] = max(maxline[var],maxline[var-1])
#}
} }
#plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET") #plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET")
return(maxline) return(maxline)
@ -75,91 +94,234 @@ frame2maxlines <- function(tr) {
return(tr) return(tr)
} }
maxlines2plot <- function(maxlines) { trace2maxpoints <- function(tr) {
min_length <- min(sapply(maxlines, function(v) dim(v)[1])) minval = tr[1,1]
ml_cut <- lapply(maxlines, function(v) v[1:min_length,]) collect = tr[1,]
plot(seq_len(min_length),unlist(ml_cut[[1]]),"l",xlab="Index",ylab="WOET",col=COLORS[1],ylim=c(best, worst)) for (i in seq_len(dim(tr)[1])) {
for (ml in seq_len(length(maxlines))[2:length(maxlines)]) { if (minval < tr[i,1]) {
lines(seq_len(min_length),unlist(ml_cut[ml]),"l",col=COLORS[ml]) collect = rbind(collect,tr[i,])
} minval = tr[i,1]
lines(seq_len(min_length),rep_len(best, min_length),col="black")
lines(seq_len(min_length),rep_len(best_success, min_length),col="black")
lines(seq_len(min_length),rep_len(worst_trace, min_length),col="green")
lines(seq_len(min_length),rep_len(worst, min_length),col="black")
legend("bottomright",legend=lapply(maxlines,names),col = COLORS[1:length(maxlines)], lwd=2)
}
frame2plot <- function(maxlines,colors=NULL,draw_extreme=TRUE,doint=FALSE,over_name=NULL) {
if (is.null(colors)) {
colors <- COLORS
}
min_length <- dim(maxlines)[1]
cur_worst <- worst
if (doint) {
cur_worst <- worst_int
}
plot(seq_len(min_length),unlist(maxlines[[1]]),"l",xlab="Execution",ylab="Worst observed response time",col=colors[1],ylim=c(min(best,min(maxlines)), max(cur_worst,max(maxlines))))
for (ml in seq_len(length(maxlines))[2:length(maxlines)]) {
lines(seq_len(min_length),unlist(maxlines[ml]),"l",col=colors[ml])
}
if (draw_extreme) {
if (!doint) {
nam <- c(names(maxlines),"worst case") #,"best case"
col <- c(colors[1:length(maxlines)],"black") #,"black"
ltp <- c(rep_len("solid",length(maxlines)),"longdash") # ,"longdash"
lines(seq_len(min_length),rep_len(worst, min_length),col=col[length(maxlines)+1],lty=ltp[length(maxlines)+1])
#nam <- c(names(maxlines),"worst trace","worst case") #,"best case"
#col <- c(colors[1:length(maxlines)],"black","black") #,"black"
#ltp <- c(rep_len("solid",length(maxlines)),"dotted","longdash") # ,"longdash"
#lines(seq_len(min_length),rep_len(best, min_length),col=col[length(maxlines)+1],lty=ltp[length(maxlines)+1])
#lines(seq_len(min_length),rep_len(best_success, min_length),col="black")
#lines(seq_len(min_length),rep_len(worst_trace, min_length),col=col[length(maxlines)+1],lty=ltp[length(maxlines)+1])
#lines(seq_len(min_length),rep_len(worst, min_length),col=col[length(maxlines)+2],lty=ltp[length(maxlines)+2])
} else {
nam <- c(names(maxlines),"worst case")
col <- c(colors[1:length(maxlines)],"black")
ltp <- c(rep_len("solid",length(maxlines)),"longdash")
lines(seq_len(min_length),rep_len(worst_int, min_length),col=col[length(maxlines)+1],lty=ltp[length(maxlines)+1])
} }
} else {
nam <- names(maxlines)
col <- colors[1:length(maxlines)]
ltp <- rep_len("solid",length(maxlines))
}
if (is.null(over_name)) {
legend("bottomright", legend=nam, col=col, lty=ltp)
} else {
legend("bottomright", legend=over_name, col=col, lty=ltp)
} }
tmp = tr[dim(tr)[1],]
tmp[1] = minval[1]
collect = rbind(collect,tmp)
return(collect)
} }
all_maxlines = c() sample_maxpoints <- function(tr,po) {
for (bn in BASENAMES) { index = 1
runtypefiles <- list.files(file.path(BENCHDIR,bn),pattern=sprintf("%s.[0-9]$",target),full.names = TRUE) collect=NULL
endpoint = dim(tr)[1]
for (p in po) {
if (p<=tr[1,2]) {
tmp = tr[index,]
tmp[2] = p
collect = rbind(collect, tmp)
} else if (p>=tr[endpoint,2]) {
tmp = tr[endpoint,]
tmp[2] = p
collect = rbind(collect, tmp)
} else {
for (i in seq(index,endpoint)-1) {
if (p >= tr[i,2] && p<tr[i+1,2]) {
tmp = tr[i,]
tmp[2] = p
collect = rbind(collect, tmp)
index = i
break
}
}
}
}
return(collect)
}
#https://www.r-bloggers.com/2012/01/parallel-r-loops-for-windows-and-linux/
all_runtypetables <- foreach (bn=BASENAMES) %do% {
runtypefiles <- list.files(file.path(BENCHDIR,bn),pattern=sprintf(PATTERNS,target),full.names = TRUE)
if (length(runtypefiles) > 0) { if (length(runtypefiles) > 0) {
runtypetables = lapply(runtypefiles, function(x) read.table(x, quote="\"", comment.char="", col.names=c(bn))) runtypetables_reduced <- foreach(i=seq_len(length(runtypefiles))) %dopar% {
runtypetables = trim_data(runtypetables) rtable = read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i)))
list_of_maxlines = data2maxlines(runtypetables) trace2maxpoints(rtable)
mean_maxline<-Reduce(function(a,b) a+b,list_of_maxlines,0)/length(runtypetables) }
all_maxlines=append(all_maxlines,mean_maxline) #runtypetables <- lapply(seq_len(length(runtypefiles)),
# function(i)read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i))))
#runtypetables_reduced <- lapply(runtypetables, trace2maxpoints)
runtypetables_reduced
#all_runtypetables = c(all_runtypetables, list(runtypetables_reduced))
} }
} }
min_length <- min(sapply(all_maxlines, length)) all_runtypetables = all_runtypetables[lapply(all_runtypetables, length) > 0]
all_maxlines=lapply(all_maxlines, function(v) v[1:min_length]) all_min_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% {
one_frame<-data.frame(all_maxlines) bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
one_frame[length(one_frame)+1] <- seq_len(length(one_frame[[1]])) ret = data.frame(min(unlist(lapply(rtt, function(v) v[dim(v)[1],2]))))
names(one_frame)[length(one_frame)] <- 'iters' names(ret)[1] = bn
ret/(3600 * 1000)
ylow=min(one_frame[1:length(one_frame)-1]) }
yhigh=max(one_frame[1:length(one_frame)-1]) all_max_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
plot(c(1,length(one_frame[[1]])),c(ylow,yhigh), col='white', xlab="iters", ylab="wcet", pch='.') ret = data.frame(max(unlist(lapply(rtt, function(v) v[dim(v)[1],2]))))
names(ret)[1] = bn
typenames = names(one_frame)[which(names(one_frame) != 'iters')] ret/(3600 * 1000)
for (t in seq_len(length(typenames))) { }
points(one_frame[c('iters',typenames[t])], col=myolors[t], pch='.') all_points = sort(unique(Reduce(c, lapply(all_runtypetables, function(v) Reduce(c, lapply(v, function(w) w[[2]]))))))
all_maxlines <- foreach (rtt=all_runtypetables) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
runtypetables_sampled = foreach(v=rtt) %dopar% {
sample_maxpoints(v, all_points)[1]
}
#runtypetables_sampled = lapply(rtt, function(v) sample_maxpoints(v, all_points)[1])
tmp_frame <- Reduce(cbind, runtypetables_sampled)
statframe <- data.frame(rowMeans(tmp_frame),apply(tmp_frame, 1, sd),apply(tmp_frame, 1, min),apply(tmp_frame, 1, max), apply(tmp_frame, 1, median))
names(statframe) <- c(bn, sprintf("%s_sd",bn), sprintf("%s_min",bn), sprintf("%s_max",bn), sprintf("%s_med",bn))
#statframe[sprintf("%s_times",bn)] = all_points
round(statframe)
#all_maxlines = c(all_maxlines, list(round(statframe)))
}
one_frame<-data.frame(all_maxlines)
one_frame[length(one_frame)+1] <- all_points/(3600 * 1000)
names(one_frame)[length(one_frame)] <- 'time'
typenames = names(one_frame)[which(names(one_frame) != 'time')]
typenames = typenames[which(!endsWith(typenames, "_sd"))]
typenames = typenames[which(!endsWith(typenames, "_med"))]
ylow=min(one_frame[typenames])
yhigh=max(one_frame[typenames],worst_case)
typenames = typenames[which(!endsWith(typenames, "_min"))]
typenames = typenames[which(!endsWith(typenames, "_max"))]
ml2lines <- function(ml,lim) {
lines = NULL
last = 0
for (i in seq_len(dim(ml)[1])) {
if (!CONTINUE_LINE_TO_END && lim<ml[i,2]) {
break
}
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
last = ml[i,2]
}
return(lines)
}
plotting <- function(selection, filename, MY_COLORS_) {
# filter out names of iters and sd cols
typenames = names(one_frame)[which(names(one_frame) != 'times')]
typenames = typenames[which(!endsWith(typenames, "_sd"))]
typenames = typenames[which(!endsWith(typenames, "_med"))]
typenames = typenames[which(!endsWith(typenames, "_min"))]
typenames = typenames[which(!endsWith(typenames, "_max"))]
typenames = selection[which(selection %in% typenames)]
if (length(typenames) == 0) {return()}
h_ = 500
w_ = h_*4/3
if (SAVE_FILE) {png(file=sprintf("%s%s_%s.png",outputpath,target,filename), width=w_, height=h_)}
par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0))
plot(c(1,max(one_frame['time'])),c(ylow,yhigh), col='white', xlab="Time [h]", ylab="WORT [insn]", pch='.')
for (t in seq_len(length(typenames))) {
#proj = one_frame[seq(1, dim(one_frame)[1], by=max(1, length(one_frame[[1]])/(10*w_))),]
#points(proj[c('iters',typenames[t])], col=MY_COLORS_[t], pch='.')
avglines = ml2lines(one_frame[c(typenames[t],'time')],all_max_points[typenames[t]])
#lines(avglines, col=MY_COLORS_[t])
medlines = ml2lines(one_frame[c(sprintf("%s_med",typenames[t]),'time')],all_max_points[typenames[t]])
lines(medlines, col=MY_COLORS_[t], lty='solid')
milines = NULL
malines = NULL
milines = ml2lines(one_frame[c(sprintf("%s_min",typenames[t]),'time')],all_max_points[typenames[t]])
malines = ml2lines(one_frame[c(sprintf("%s_max",typenames[t]),'time')],all_max_points[typenames[t]])
if (exists("RIBBON") && ( RIBBON=='max' )) {
#lines(milines, col=MY_COLORS_[t], lty='dashed')
lines(malines, col=MY_COLORS_[t], lty='dashed')
#points(proj[c('iters',sprintf("%s_min",typenames[t]))], col=MY_COLORS_[t], pch='.')
#points(proj[c('iters',sprintf("%s_max",typenames[t]))], col=MY_COLORS_[t], pch='.')
}
if (exists("RIBBON") && RIBBON != '') {
for (i in seq_len(dim(avglines)[1]-1)) {
if (RIBBON=='both') {
# draw boxes
x_l <- milines[i,][['X']]
x_r <- milines[i+1,][['X']]
y_l <- milines[i,][['Y']]
y_h <- malines[i,][['Y']]
rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
}
if (FALSE && RIBBON=='span') {
# draw boxes
x_l <- milines[i,][['X']]
x_r <- milines[i+1,][['X']]
y_l <- milines[i,][['Y']]
y_h <- malines[i,][['Y']]
rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
}
#if (FALSE && RIBBON=='both' || RIBBON=='sd') {
# # draw sd
# x_l <- avglines[i,][['X']]
# x_r <- avglines[i+1,][['X']]
# y_l <- avglines[i,][['Y']]-one_frame[ceiling(i/2),][[sprintf("%s_sd",typenames[t])]]
# y_h <- avglines[i,][['Y']]+one_frame[ceiling(i/2),][[sprintf("%s_sd",typenames[t])]]
# if (x_r != x_l) {
# rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
# }
#}
#sd_ <- row[sprintf("%s_sd",typenames[t])][[1]]
#min_ <- row[sprintf("%s_min",typenames[t])][[1]]
#max_ <- row[sprintf("%s_max",typenames[t])][[1]]
#if (exists("RIBBON")) {
# switch (RIBBON,
# 'sd' = arrows(x_, y_-sd_, x_, y_+sd_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.03)),
# 'both' = arrows(x_, y_-sd_, x_, y_+sd_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.05)),
# 'span' = #arrows(x_, min_, x_, max_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.03))
# )
#}
##arrows(x_, y_-sd_, x_, y_+sd_, length=0.05, angle=90, code=3, col=alpha(MY_COLORS[t], alpha=0.1))
}
}
}
leglines=typenames
if (DRAW_WC) {
lines(c(0,length(one_frame[[1]])),y=c(worst_case,worst_case), lty='dotted')
leglines=c(typenames, 'worst observed')
}
legend(LEGEND_POS, legend=leglines,#"topleft"
col=c(MY_COLORS_[1:length(typenames)],"black"),
lty=c(rep("solid",length(typenames)),"dotted"))
if (SAVE_FILE) {dev.off()}
}
stopCluster(cl)
par(mar=c(3.8,3.8,0,0))
par(oma=c(0,0,0,0))
#RIBBON='both'
#MY_SELECTION = c('state_int','generation100_int')
#MY_SELECTION = c('state_int','frafl_int')
if (exists("MY_SELECTION")) {
plotting(MY_SELECTION, 'custom', MY_COLORS[c(1,2)])
} else {
# MY_SELECTION=c('state', 'afl', 'random', 'feedlongest', 'feedgeneration', 'feedgeneration10')
#MY_SELECTION=c('state_int', 'afl_int', 'random_int', 'feedlongest_int', 'feedgeneration_int', 'feedgeneration10_int')
#MY_SELECTION=c('state', 'frAFL', 'statenohash', 'feedgeneration10')
#MY_SELECTION=c('state_int', 'frAFL_int', 'statenohash_int', 'feedgeneration10_int')
MY_SELECTION=typenames
RIBBON='both'
for (i in seq_len(length(MY_SELECTION))) {
n <- MY_SELECTION[i]
plotting(c(n), n, c(MY_COLORS[i]))
}
RIBBON='max'
plotting(MY_SELECTION,'all', MY_COLORS)
}
for (t in seq_len(length(typenames))) {
li = one_frame[dim(one_frame)[1],]
pear = (li[[typenames[[t]]]]-li[[sprintf("%s_med",typenames[[t]])]])/li[[sprintf("%s_sd",typenames[[t]])]]
print(sprintf("%s pearson: %g",typenames[[t]],pear))
} }
legend("bottomright", legend=typenames, col=myolors, lty="solid")

View File

@ -16,4 +16,9 @@ tmr,main,FUZZ_INPUT,32,trigger_Qemu_break
tacle_rtos,prvStage0,FUZZ_INPUT,604,trigger_Qemu_break tacle_rtos,prvStage0,FUZZ_INPUT,604,trigger_Qemu_break
lift,main_lift,FUZZ_INPUT,100,trigger_Qemu_break lift,main_lift,FUZZ_INPUT,100,trigger_Qemu_break
waters,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break waters,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
watersv2,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
waters_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
watersv2_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
micro_branchless,main_branchless,FUZZ_INPUT,4,trigger_Qemu_break micro_branchless,main_branchless,FUZZ_INPUT,4,trigger_Qemu_break
micro_int,main_int,FUZZ_INPUT,16,trigger_Qemu_break
micro_longint,main_micro_longint,FUZZ_INPUT,16,trigger_Qemu_break
1 kernel main_function input_symbol input_size return_function
16 tacle_rtos prvStage0 FUZZ_INPUT 604 trigger_Qemu_break
17 lift main_lift FUZZ_INPUT 100 trigger_Qemu_break
18 waters main_waters FUZZ_INPUT 4096 trigger_Qemu_break
19 watersv2 main_waters FUZZ_INPUT 4096 trigger_Qemu_break
20 waters_int main_waters FUZZ_INPUT 4096 trigger_Qemu_break
21 watersv2_int main_waters FUZZ_INPUT 4096 trigger_Qemu_break
22 micro_branchless main_branchless FUZZ_INPUT 4 trigger_Qemu_break
23 micro_int main_int FUZZ_INPUT 16 trigger_Qemu_break
24 micro_longint main_micro_longint FUZZ_INPUT 16 trigger_Qemu_break

View File

@ -14,7 +14,7 @@ use libafl::{
observers::ObserversTuple, prelude::UsesInput, impl_serdeany, observers::ObserversTuple, prelude::UsesInput, impl_serdeany,
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{cell::UnsafeCell, cmp::max, env, fs::OpenOptions, io::Write}; use std::{cell::UnsafeCell, cmp::max, env, fs::OpenOptions, io::Write, time::Instant};
use libafl::bolts::tuples::Named; use libafl::bolts::tuples::Named;
use libafl_qemu::{ use libafl_qemu::{
@ -34,6 +34,9 @@ use core::{fmt::Debug, time::Duration};
// use libafl::feedbacks::FeedbackState; // use libafl::feedbacks::FeedbackState;
// use libafl::state::HasFeedbackStates; // use libafl::state::HasFeedbackStates;
use libafl::bolts::tuples::MatchName; use libafl::bolts::tuples::MatchName;
use std::time::{SystemTime, UNIX_EPOCH};
pub static mut FUZZ_START_TIMESTAMP : SystemTime = UNIX_EPOCH;
//========== Metadata //========== Metadata
#[derive(Debug, SerdeAny, Serialize, Deserialize)] #[derive(Debug, SerdeAny, Serialize, Deserialize)]
@ -84,7 +87,7 @@ impl Default for MaxIcountMetadata {
/// A piece of metadata tracking all icounts /// A piece of metadata tracking all icounts
#[derive(Debug, SerdeAny, Serialize, Deserialize)] #[derive(Debug, SerdeAny, Serialize, Deserialize)]
pub struct IcHist(pub Vec<u64>); pub struct IcHist (pub Vec<(u64, u128)>, pub (u64,u128));
//========== Observer //========== Observer
@ -137,12 +140,17 @@ where
// println!("Number of Ticks: {} <- {} {}",self.end_tick - self.start_tick, self.end_tick, self.start_tick); // println!("Number of Ticks: {} <- {} {}",self.end_tick - self.start_tick, self.end_tick, self.start_tick);
let metadata =_state.metadata_mut(); let metadata =_state.metadata_mut();
let hist = metadata.get_mut::<IcHist>(); let hist = metadata.get_mut::<IcHist>();
let timestamp = SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis();
match hist { match hist {
None => { None => {
metadata.insert(IcHist(vec![self.end_tick - self.start_tick])); metadata.insert(IcHist(vec![(self.end_tick - self.start_tick, timestamp)],
(self.end_tick - self.start_tick, timestamp)));
} }
Some(v) => { Some(v) => {
v.0.push(self.end_tick - self.start_tick); v.0.push((self.end_tick - self.start_tick, timestamp));
if (v.1.0 < self.end_tick-self.start_tick) {
v.1 = (self.end_tick - self.start_tick, timestamp);
}
if v.0.len() >= 100 { if v.0.len() >= 100 {
if let Ok(td) = env::var("TIME_DUMP") { if let Ok(td) = env::var("TIME_DUMP") {
let mut file = OpenOptions::new() let mut file = OpenOptions::new()
@ -151,9 +159,9 @@ where
.create(true) .create(true)
.append(true) .append(true)
.open(td).expect("Could not open timedump"); .open(td).expect("Could not open timedump");
let newv : Vec<u64> = Vec::with_capacity(100); let newv : Vec<(u64, u128)> = Vec::with_capacity(100);
for i in std::mem::replace(&mut v.0, newv).into_iter() { for i in std::mem::replace(&mut v.0, newv).into_iter() {
writeln!(file, "{}", i).expect("Write to dump failed"); writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
} }
} else { } else {
// If we don't write out values we don't need to remember them at all // If we don't write out values we don't need to remember them at all
@ -212,7 +220,7 @@ where
{ {
// TODO Replace with match_name_type when stable // TODO Replace with match_name_type when stable
let observer = observers.match_name::<QemuClockObserver>(self.name()).unwrap(); let observer = observers.match_name::<QemuClockObserver>(self.name()).unwrap();
self.exec_time = Some(Duration::from_nanos(observer.last_runtime() << 3)); // Assume a somewhat realistic multiplier of clock, it does not matter self.exec_time = Some(Duration::from_nanos(observer.last_runtime() << 4)); // Assume a somewhat realistic multiplier of clock, it does not matter
Ok(false) Ok(false)
} }

View File

@ -1,7 +1,7 @@
//! A fuzzer using qemu in systemmode for binary-only coverage of kernels //! A fuzzer using qemu in systemmode for binary-only coverage of kernels
//! //!
use core::time::Duration; use core::time::Duration;
use std::{env, path::PathBuf, process::{self, abort}, io::{Read, Write}, fs::{self, OpenOptions}}; use std::{env, path::PathBuf, process::{self, abort}, io::{Read, Write}, fs::{self, OpenOptions}, cmp::{min, max}, mem::transmute_copy, collections::btree_map::Range};
use libafl::{ use libafl::{
bolts::{ bolts::{
@ -22,24 +22,33 @@ use libafl::{
fuzzer::{Fuzzer, StdFuzzer}, fuzzer::{Fuzzer, StdFuzzer},
inputs::{BytesInput, HasTargetBytes}, inputs::{BytesInput, HasTargetBytes},
monitors::MultiMonitor, monitors::MultiMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::{VariableMapObserver}, observers::{VariableMapObserver},
schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler},
stages::StdMutationalStage,
state::{HasCorpus, StdState, HasMetadata, HasNamedMetadata}, state::{HasCorpus, StdState, HasMetadata, HasNamedMetadata},
Error, Error,
prelude::{SimpleMonitor, SimpleEventManager, AsMutSlice, RandBytesGenerator, Generator, SimpleRestartingEventManager, HasBytesVec, minimizer::TopRatedsMetadata}, Evaluator, prelude::{SimpleMonitor, SimpleEventManager, AsMutSlice, RandBytesGenerator, Generator, SimpleRestartingEventManager, HasBytesVec, minimizer::TopRatedsMetadata, havoc_mutations, StdScheduledMutator, HitcountsMapObserver}, Evaluator, stages::StdMutationalStage,
}; };
use libafl_qemu::{ use libafl_qemu::{
edges, edges::QemuEdgeCoverageHelper, elf::EasyElf, emu::Emulator, GuestPhysAddr, QemuExecutor, edges, edges::QemuEdgeCoverageHelper, elf::EasyElf, emu::Emulator, GuestPhysAddr, QemuExecutor,
QemuHooks, Regs, QemuInstrumentationFilter, GuestAddr, QemuHooks, Regs, QemuInstrumentationFilter, GuestAddr,
emu::libafl_qemu_set_native_breakpoint, emu::libafl_qemu_remove_native_breakpoint,
}; };
use rand::{SeedableRng, StdRng, Rng};
use crate::{ use crate::{
clock::{QemuClockObserver, ClockTimeFeedback, QemuClockIncreaseFeedback, IcHist}, clock::{QemuClockObserver, ClockTimeFeedback, QemuClockIncreaseFeedback, IcHist, FUZZ_START_TIMESTAMP},
qemustate::QemuStateRestoreHelper, qemustate::QemuStateRestoreHelper,
systemstate::{helpers::QemuSystemStateHelper, observers::QemuSystemStateObserver, feedbacks::{DumpSystraceFeedback, NovelSystemStateFeedback}, graph::{SysMapFeedback, SysGraphFeedbackState, GraphMaximizerCorpusScheduler}}, worst::{TimeMaximizerCorpusScheduler, ExecTimeIncFeedback, TimeStateMaximizerCorpusScheduler}, systemstate::{helpers::QemuSystemStateHelper, observers::QemuSystemStateObserver, feedbacks::{DumpSystraceFeedback, NovelSystemStateFeedback}, graph::{SysMapFeedback, SysGraphFeedbackState, GraphMaximizerCorpusScheduler}, schedulers::{LongestTraceScheduler, GenerationScheduler}}, worst::{TimeMaximizerCorpusScheduler, ExecTimeIncFeedback, TimeStateMaximizerCorpusScheduler, AlwaysTrueFeedback},
mutational::MyStateStage,
mutational::{MINIMUM_INTER_ARRIVAL_TIME},
}; };
use std::time::{SystemTime, UNIX_EPOCH};
pub static mut RNG_SEED: u64 = 1;
pub static mut LIMIT : u32 = u32::MAX;
pub const MAX_NUM_INTERRUPT: usize = 32;
pub const DO_NUM_INTERRUPT: usize = 32;
pub static mut MAX_INPUT_SIZE: usize = 32; pub static mut MAX_INPUT_SIZE: usize = 32;
/// Read ELF program headers to resolve physical load addresses. /// Read ELF program headers to resolve physical load addresses.
fn virt2phys(vaddr: GuestPhysAddr, tab: &EasyElf) -> GuestPhysAddr { fn virt2phys(vaddr: GuestPhysAddr, tab: &EasyElf) -> GuestPhysAddr {
@ -55,16 +64,18 @@ fn virt2phys(vaddr: GuestPhysAddr, tab: &EasyElf) -> GuestPhysAddr {
} }
extern "C" { extern "C" {
static mut libafl_int_offset : u32; static mut libafl_interrupt_offsets : [u32; 32];
static mut libafl_num_interrupts : usize;
} }
pub fn fuzz() { pub fn fuzz() {
let starttime = std::time::Instant::now(); unsafe {FUZZ_START_TIMESTAMP = SystemTime::now();}
let mut starttime = std::time::Instant::now();
if let Ok(s) = env::var("FUZZ_SIZE") { if let Ok(s) = env::var("FUZZ_SIZE") {
str::parse::<usize>(&s).expect("FUZZ_SIZE was not a number"); str::parse::<usize>(&s).expect("FUZZ_SIZE was not a number");
}; };
// Hardcoded parameters // Hardcoded parameters
let timeout = Duration::from_secs(3); let timeout = Duration::from_secs(10);
let broker_port = 1337; let broker_port = 1337;
let cores = Cores::from_cmdline("1").unwrap(); let cores = Cores::from_cmdline("1").unwrap();
let corpus_dirs = [PathBuf::from("./corpus")]; let corpus_dirs = [PathBuf::from("./corpus")];
@ -147,7 +158,7 @@ pub fn fuzz() {
.expect("Symbol or env BREAKPOINT not found"); .expect("Symbol or env BREAKPOINT not found");
println!("Breakpoint address = {:#x}", breakpoint); println!("Breakpoint address = {:#x}", breakpoint);
unsafe { unsafe {
libafl_int_offset = 0; libafl_num_interrupts = 0;
} }
if let Ok(input_len) = env::var("FUZZ_INPUT_LEN") { if let Ok(input_len) = env::var("FUZZ_INPUT_LEN") {
@ -155,6 +166,10 @@ pub fn fuzz() {
} }
unsafe {dbg!(MAX_INPUT_SIZE);} unsafe {dbg!(MAX_INPUT_SIZE);}
if let Ok(seed) = env::var("SEED_RANDOM") {
unsafe {RNG_SEED = str::parse::<u64>(&seed).expect("SEED_RANDOM must be an integer.");}
}
let mut run_client = |state: Option<_>, mut mgr, _core_id| { let mut run_client = |state: Option<_>, mut mgr, _core_id| {
// Initialize QEMU // Initialize QEMU
let args: Vec<String> = env::args().collect(); let args: Vec<String> = env::args().collect();
@ -162,14 +177,14 @@ pub fn fuzz() {
let emu = Emulator::new(&args, &env); let emu = Emulator::new(&args, &env);
if let Some(main_addr) = main_addr { if let Some(main_addr) = main_addr {
emu.set_breakpoint(main_addr);
unsafe { unsafe {
libafl_qemu_set_native_breakpoint(main_addr);
emu.run(); emu.run();
libafl_qemu_remove_native_breakpoint(main_addr);
} }
emu.remove_breakpoint(main_addr);
} }
emu.set_breakpoint(breakpoint); // BREAKPOINT unsafe { libafl_qemu_set_native_breakpoint(breakpoint); }// BREAKPOINT
// The wrapped harness function, calling out to the LLVM-style harness // The wrapped harness function, calling out to the LLVM-style harness
let mut harness = |input: &BytesInput| { let mut harness = |input: &BytesInput| {
@ -177,6 +192,30 @@ pub fn fuzz() {
let mut buf = target.as_slice(); let mut buf = target.as_slice();
let mut len = buf.len(); let mut len = buf.len();
unsafe { unsafe {
#[cfg(feature = "fuzz_int")]
{
let mut start_tick : u32 = 0;
for i in 0..DO_NUM_INTERRUPT {
let mut t : [u8; 4] = [0,0,0,0];
if len > (i+1)*4 {
for j in 0 as usize..4 as usize {
t[j]=buf[i*4+j];
}
if i == 0 || true {
unsafe {start_tick = u32::from_le_bytes(t) % LIMIT;}
} else {
start_tick = u32::saturating_add(start_tick,max(MINIMUM_INTER_ARRIVAL_TIME,u32::from_le_bytes(t)));
}
libafl_interrupt_offsets[i] = start_tick;
libafl_num_interrupts = i+1;
}
}
if buf.len() > libafl_num_interrupts*4 {
buf = &buf[libafl_num_interrupts*4..];
len = buf.len();
}
// println!("Load: {:?}", libafl_interrupt_offsets[0..libafl_num_interrupts].to_vec());
}
if len > MAX_INPUT_SIZE { if len > MAX_INPUT_SIZE {
buf = &buf[0..MAX_INPUT_SIZE]; buf = &buf[0..MAX_INPUT_SIZE];
len = MAX_INPUT_SIZE; len = MAX_INPUT_SIZE;
@ -206,6 +245,8 @@ pub fn fuzz() {
let edges = unsafe { &mut edges::EDGES_MAP }; let edges = unsafe { &mut edges::EDGES_MAP };
let edges_counter = unsafe { &mut edges::MAX_EDGES_NUM }; let edges_counter = unsafe { &mut edges::MAX_EDGES_NUM };
let edges_observer = VariableMapObserver::new("edges", edges, edges_counter); let edges_observer = VariableMapObserver::new("edges", edges, edges_counter);
#[cfg(feature = "observer_hitcounts")]
let edges_observer = HitcountsMapObserver::new(edges_observer);
// Create an observation channel to keep track of the execution time // Create an observation channel to keep track of the execution time
let clock_time_observer = QemuClockObserver::new("clocktime"); let clock_time_observer = QemuClockObserver::new("clocktime");
@ -218,6 +259,11 @@ pub fn fuzz() {
// Time feedback, this one does not need a feedback state // Time feedback, this one does not need a feedback state
ClockTimeFeedback::new_with_observer(&clock_time_observer) ClockTimeFeedback::new_with_observer(&clock_time_observer)
); );
#[cfg(feature = "feed_genetic")]
let mut feedback = feedback_or!(
feedback,
AlwaysTrueFeedback::new()
);
#[cfg(feature = "feed_afl")] #[cfg(feature = "feed_afl")]
let mut feedback = feedback_or!( let mut feedback = feedback_or!(
feedback, feedback,
@ -239,6 +285,7 @@ pub fn fuzz() {
#[cfg(feature = "feed_systemtrace")] #[cfg(feature = "feed_systemtrace")]
let mut feedback = feedback_or!( let mut feedback = feedback_or!(
feedback, feedback,
// AlwaysTrueFeedback::new(),
NovelSystemStateFeedback::default() NovelSystemStateFeedback::default()
); );
#[cfg(feature = "feed_systemgraph")] #[cfg(feature = "feed_systemgraph")]
@ -254,7 +301,7 @@ pub fn fuzz() {
let mut state = state.unwrap_or_else(|| { let mut state = state.unwrap_or_else(|| {
StdState::new( StdState::new(
// RNG // RNG
StdRand::with_seed(current_nanos()), unsafe {StdRand::with_seed(RNG_SEED) },
// Corpus that will be evolved, we keep it in memory for performance // Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(), InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example), // Corpus in which we store solutions (crashes in this example),
@ -270,14 +317,16 @@ pub fn fuzz() {
}); });
// A minimization+queue policy to get testcasess from the corpus // A minimization+queue policy to get testcasess from the corpus
#[cfg(not(any(feature = "feed_afl",feature = "feed_systemgraph",feature = "feed_systemtrace")))] #[cfg(not(any(feature = "feed_afl",feature = "feed_systemgraph",feature = "feed_systemtrace", feature = "feed_genetic")))]
let scheduler = QueueScheduler::new(); let scheduler = QueueScheduler::new();
#[cfg(all(feature = "feed_afl",not(any(feature = "feed_systemgraph",feature = "feed_systemtrace"))))] #[cfg(all(feature = "feed_afl",not(any(feature = "feed_systemgraph",feature = "feed_systemtrace"))))]
let scheduler = TimeMaximizerCorpusScheduler::new(QueueScheduler::new()); let scheduler = TimeMaximizerCorpusScheduler::new(QueueScheduler::new());
#[cfg(feature = "feed_systemtrace")] #[cfg(feature = "feed_systemtrace")]
let scheduler = TimeStateMaximizerCorpusScheduler::new(QueueScheduler::new()); let scheduler = LongestTraceScheduler::new(TimeStateMaximizerCorpusScheduler::new(QueueScheduler::new()));
#[cfg(feature = "feed_systemgraph")] #[cfg(feature = "feed_systemgraph")]
let scheduler = GraphMaximizerCorpusScheduler::new(QueueScheduler::new()); let scheduler = GraphMaximizerCorpusScheduler::new(QueueScheduler::new());
#[cfg(feature = "feed_genetic")]
let scheduler = GenerationScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler // A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
@ -313,8 +362,15 @@ pub fn fuzz() {
// Wrap the executor to keep track of the timeout // Wrap the executor to keep track of the timeout
let mut executor = TimeoutExecutor::new(executor, timeout); let mut executor = TimeoutExecutor::new(executor, timeout);
let mutations = havoc_mutations();
// Setup an havoc mutator with a mutational stage // Setup an havoc mutator with a mutational stage
let mutator = StdScheduledMutator::new(havoc_mutations()); let mutator = StdScheduledMutator::new(mutations);
// #[cfg(not(all(feature = "feed_systemtrace", feature = "fuzz_int")))]
// let mut stages = tuple_list!(StdMutationalStage::new(mutator));
// #[cfg(all(feature = "feed_systemtrace", feature = "fuzz_int"))]
#[cfg(feature = "fuzz_int")]
let mut stages = tuple_list!(StdMutationalStage::new(mutator),MyStateStage::new());
#[cfg(not(feature = "fuzz_int"))]
let mut stages = tuple_list!(StdMutationalStage::new(mutator)); let mut stages = tuple_list!(StdMutationalStage::new(mutator));
if env::var("DO_SHOWMAP").is_ok() { if env::var("DO_SHOWMAP").is_ok() {
@ -337,22 +393,20 @@ pub fn fuzz() {
.create(true) .create(true)
.append(true) .append(true)
.open(td).expect("Could not open timedump"); .open(td).expect("Could not open timedump");
if let Some(ichist) = state.metadata().get::<IcHist>() { if let Some(ichist) = state.metadata_mut().get_mut::<IcHist>() {
for i in ichist.0.iter() { for i in ichist.0.drain(..) {
writeln!(file, "{}", i).expect("Write to dump failed"); writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
} }
} }
} }
} else { } else {
if let Ok(_) = env::var("SEED_RANDOM") { if let Ok(_) = env::var("SEED_RANDOM") {
unsafe { unsafe {
let mut generator = RandBytesGenerator::new(MAX_INPUT_SIZE); let mut rng = StdRng::seed_from_u64(RNG_SEED);
state for i in 0..100 {
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 100) let inp = BytesInput::new(vec![rng.gen::<u8>(); MAX_INPUT_SIZE]);
.unwrap_or_else(|_| { fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, inp).unwrap();
println!("Failed to load initial corpus at {:?}", &corpus_dirs); }
process::exit(0);
});
} }
} }
else if let Ok(sf) = env::var("SEED_DIR") { else if let Ok(sf) = env::var("SEED_DIR") {
@ -382,15 +436,23 @@ pub fn fuzz() {
Ok(t) => { Ok(t) => {
println!("Iterations {}",t); println!("Iterations {}",t);
let num = str::parse::<u64>(&t).expect("FUZZ_ITERS was not a number"); let num = str::parse::<u64>(&t).expect("FUZZ_ITERS was not a number");
if let Ok(_) = env::var("FUZZ_RANDOM") { unsafe { if let Ok(s) = env::var("FUZZ_RANDOM") { unsafe {
if s.contains("watersv2_int") {
println!("V2");
LIMIT=7000000;
} else {
println!("V1");
LIMIT=5000000;
}
println!("Random Fuzzing, ignore corpus"); println!("Random Fuzzing, ignore corpus");
// let mut generator = RandBytesGenerator::new(MAX_INPUT_SIZE); // let mut generator = RandBytesGenerator::new(MAX_INPUT_SIZE);
let target_duration = Duration::from_secs(num); let target_duration = Duration::from_secs(num);
let start_time = std::time::Instant::now(); let start_time = std::time::Instant::now();
let mut rng = StdRng::seed_from_u64(RNG_SEED);
while start_time.elapsed() < target_duration { while start_time.elapsed() < target_duration {
// let inp = generator.generate(&mut state).unwrap(); // let inp = generator.generate(&mut state).unwrap();
// libafl's generator is too slow // libafl's generator is too slow
let inp = BytesInput::new(vec![rand::random::<u8>(); MAX_INPUT_SIZE]); let inp = BytesInput::new(vec![rng.gen::<u8>(); MAX_INPUT_SIZE]);
fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, inp).unwrap(); fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, inp).unwrap();
} }
}} else { }} else {
@ -400,6 +462,118 @@ pub fn fuzz() {
fuzzer fuzzer
.fuzz_loop_until(&mut stages, &mut executor, &mut state, &mut mgr, starttime.checked_add(Duration::from_secs(num)).unwrap()) .fuzz_loop_until(&mut stages, &mut executor, &mut state, &mut mgr, starttime.checked_add(Duration::from_secs(num)).unwrap())
.unwrap(); .unwrap();
#[cfg(feature = "run_until_saturation")]
{
{
let mut dumper = |marker : String| {
if let Ok(td) = env::var("TIME_DUMP") {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.append(true)
.open(td).expect("Could not open timedump");
if let Some(ichist) = state.metadata_mut().get_mut::<IcHist>() {
for i in ichist.0.drain(..) {
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
}
}
}
if let Ok(td) = env::var("CASE_DUMP") {
println!("Dumping worst case to {:?}", td);
let corpus = state.corpus();
let mut worst = Duration::new(0,0);
let mut worst_input = None;
for i in 0..corpus.count() {
let tc = corpus.get(i).expect("Could not get element from corpus").borrow();
if worst < tc.exec_time().expect("Testcase missing duration") {
worst_input = Some(tc.input().as_ref().unwrap().bytes().to_owned());
worst = tc.exec_time().expect("Testcase missing duration");
}
}
match worst_input {
Some(wi) => {
// let cd = format!("{}.case",&td);
let mut cd = td.clone();
cd.push_str(&marker);
fs::write(&cd,wi).expect("Failed to write worst corpus element");
},
None => (),
}
#[cfg(feature = "feed_systemgraph")]
{
let mut gd = String::from(&td);
gd.push_str(&format!(".graph{}", marker));
if let Some(md) = state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap") {
fs::write(&gd,ron::to_string(&md).expect("Failed to serialize graph")).expect("Failed to write graph");
}
}
{
let mut gd = String::from(&td);
if let Some(md) = state.metadata_mut().get_mut::<TopRatedsMetadata>() {
let mut uniq: Vec<usize> = md.map.values().map(|x| x.clone()).collect();
uniq.sort();
uniq.dedup();
gd.push_str(&format!(".{}.toprated{}", uniq.len(), marker));
fs::write(&gd,ron::to_string(&md.map).expect("Failed to serialize metadata")).expect("Failed to write graph");
}
}
}
};
dumper(format!(".iter_{}",t));
}
println!("Start running until saturation");
let mut last = state.metadata().get::<IcHist>().unwrap().1;
while SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis() < last.1 + Duration::from_secs(10800).as_millis() {
starttime=starttime.checked_add(Duration::from_secs(30)).unwrap();
fuzzer
.fuzz_loop_until(&mut stages, &mut executor, &mut state, &mut mgr, starttime)
.unwrap();
let after = state.metadata().get::<IcHist>().unwrap().1;
if after.0 > last.0 {
last=after;
}
if let Ok(td) = env::var("CASE_DUMP") {
println!("Dumping worst case to {:?}", td);
let corpus = state.corpus();
let mut worst = Duration::new(0,0);
let mut worst_input = None;
for i in 0..corpus.count() {
let tc = corpus.get(i).expect("Could not get element from corpus").borrow();
if worst < tc.exec_time().expect("Testcase missing duration") {
worst_input = Some(tc.input().as_ref().unwrap().bytes().to_owned());
worst = tc.exec_time().expect("Testcase missing duration");
}
}
match worst_input {
Some(wi) => {
// let cd = format!("{}.case",&td);
let cd = td.clone();
fs::write(&cd,wi).expect("Failed to write worst corpus element");
},
None => (),
}
#[cfg(feature = "feed_systemgraph")]
{
let mut gd = String::from(&td);
gd.push_str(".graph" );
if let Some(md) = state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap") {
fs::write(&gd,ron::to_string(&md).expect("Failed to serialize graph")).expect("Failed to write graph");
}
}
{
let mut gd = String::from(&td);
if let Some(md) = state.metadata_mut().get_mut::<TopRatedsMetadata>() {
let mut uniq: Vec<usize> = md.map.values().map(|x| x.clone()).collect();
uniq.sort();
uniq.dedup();
gd.push_str(&format!(".{}.toprated", uniq.len()));
fs::write(&gd,ron::to_string(&md.map).expect("Failed to serialize metadata")).expect("Failed to write graph");
}
}
}
}
}
} }
if let Ok(td) = env::var("TIME_DUMP") { if let Ok(td) = env::var("TIME_DUMP") {
let mut file = OpenOptions::new() let mut file = OpenOptions::new()
@ -408,9 +582,9 @@ pub fn fuzz() {
.create(true) .create(true)
.append(true) .append(true)
.open(td).expect("Could not open timedump"); .open(td).expect("Could not open timedump");
if let Some(ichist) = state.metadata().get::<IcHist>() { if let Some(ichist) = state.metadata_mut().get_mut::<IcHist>() {
for i in ichist.0.iter() { for i in ichist.0.drain(..) {
writeln!(file, "{}", i).expect("Write to dump failed"); writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
} }
} }
} }
@ -468,7 +642,7 @@ pub fn fuzz() {
let emu = Emulator::new(&args, &env); let emu = Emulator::new(&args, &env);
if let Some(main_addr) = main_addr { if let Some(main_addr) = main_addr {
emu.set_breakpoint(main_addr); unsafe { libafl_qemu_set_native_breakpoint(main_addr); }// BREAKPOINT
} }
unsafe { unsafe {
emu.run(); emu.run();
@ -487,24 +661,30 @@ pub fn fuzz() {
#[cfg(feature = "singlecore")] #[cfg(feature = "singlecore")]
{ {
let monitor = SimpleMonitor::new(|s| println!("{}", s)); let monitor = SimpleMonitor::new(|s| println!("{}", s));
// let mgr = SimpleEventManager::new(monitor); #[cfg(not(feature = "restarting"))]
// run_client(None, mgr, 0);
let mut shmem_provider = StdShMemProvider::new().unwrap();
let (state, mut mgr) = match SimpleRestartingEventManager::launch(monitor, &mut shmem_provider)
{ {
// The restarting state will spawn the same process again as child, then restarted it each time it crashes. let mgr = SimpleEventManager::new(monitor);
Ok(res) => res, run_client(None, mgr, 0);
Err(err) => match err { }
Error::ShuttingDown => {
return; #[cfg(feature = "restarting")]
} {
_ => { let mut shmem_provider = StdShMemProvider::new().unwrap();
panic!("Failed to setup the restarter: {}", err); let (state, mut mgr) = match SimpleRestartingEventManager::launch(monitor, &mut shmem_provider)
} {
}, // The restarting state will spawn the same process again as child, then restarted it each time it crashes.
}; Ok(res) => res,
run_client(state, mgr, 0); Err(err) => match err {
Error::ShuttingDown => {
return;
}
_ => {
panic!("Failed to setup the restarter: {}", err);
}
},
};
run_client(state, mgr, 0);
}
} }
// else -> multicore // else -> multicore
#[cfg(not(feature = "singlecore"))] #[cfg(not(feature = "singlecore"))]

View File

@ -8,4 +8,6 @@ mod qemustate;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
pub mod systemstate; pub mod systemstate;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
mod mutational;
#[cfg(target_os = "linux")]
mod worst; mod worst;

View File

@ -10,6 +10,8 @@ mod qemustate;
mod systemstate; mod systemstate;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
mod worst; mod worst;
#[cfg(target_os = "linux")]
mod mutational;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
pub fn main() { pub fn main() {

View File

@ -0,0 +1,240 @@
//| The [`MutationalStage`] is the default stage used during fuzzing.
//! For the current input, it will perform a range of random mutations, and then run them in the executor.
use core::marker::PhantomData;
use std::cmp::{max, min};
use libafl::{
bolts::rands::Rand,
corpus::{Corpus, self},
fuzzer::Evaluator,
mark_feature_time,
stages::{Stage},
start_timer,
state::{HasClientPerfMonitor, HasCorpus, HasRand, UsesState, HasMetadata},
Error, prelude::{HasBytesVec, UsesInput, new_hash_feedback, StdRand, RandomSeed, MutationResult, Mutator},
};
use crate::{systemstate::{FreeRTOSSystemStateMetadata, RefinedFreeRTOSSystemState}, fuzzer::DO_NUM_INTERRUPT, clock::IcHist};
pub const MINIMUM_INTER_ARRIVAL_TIME : u32 = 700 * 1000 * (1 << 4);
//======================= Custom mutator
/// The default mutational stage
#[derive(Clone, Debug, Default)]
pub struct MyStateStage<E, EM, Z> {
#[allow(clippy::type_complexity)]
phantom: PhantomData<(E, EM, Z)>,
}
impl<E, EM, Z> MyStateStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: HasClientPerfMonitor + HasCorpus + HasRand,
{
pub fn new() -> Self {
Self { phantom: PhantomData }
}
}
impl<E, EM, Z> Stage<E, EM, Z> for MyStateStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata,
<Z::State as UsesInput>::Input: HasBytesVec
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut Self::State,
manager: &mut EM,
corpus_idx: usize,
) -> Result<(), Error> {
let mut _input = state
.corpus()
.get(corpus_idx)?
.borrow_mut().clone();
let mut newinput = _input.input_mut().as_mut().unwrap().clone();
// let mut tmpinput = _input.input_mut().as_mut().unwrap().clone();
let mut do_rerun = false;
{
// need our own random generator, because borrowing rules
let mut myrand = StdRand::new();
let mut target_bytes : Vec<u8> = vec![];
{
let input = _input.input_mut().as_ref().unwrap();
let tmp = &mut state.rand_mut();
myrand.set_seed(tmp.next());
target_bytes = input.bytes().to_vec();
}
// produce a slice of absolute interrupt times
let mut interrupt_offsets : [u32; 32] = [0u32; 32];
let mut num_interrupts : usize = 0;
{
let mut start_tick : u32 = 0;
for i in 0..DO_NUM_INTERRUPT {
let mut t : [u8; 4] = [0,0,0,0];
if target_bytes.len() > (i+1)*4 {
for j in 0 as usize..4 as usize {
t[j]=target_bytes[i*4+j];
}
if i == 0 || true {
start_tick = u32::from_le_bytes(t);
} else {
start_tick = u32::saturating_add(start_tick,max(MINIMUM_INTER_ARRIVAL_TIME,u32::from_le_bytes(t)));
}
interrupt_offsets[i] = start_tick;
num_interrupts = i+1;
}
}
}
interrupt_offsets.sort();
// println!("Vor Mutator: {:?}", interrupt_offsets[0..num_interrupts].to_vec());
// let num_i = min(target_bytes.len() / 4, DO_NUM_INTERRUPT);
let mut suffix = target_bytes.split_off(4 * num_interrupts);
let mut prefix : Vec<[u8; 4]> = vec![];
// let mut suffix : Vec<u8> = vec![];
#[cfg(feature = "feed_systemtrace")]
{
let tmp = _input.metadata().get::<FreeRTOSSystemStateMetadata>();
if tmp.is_some() {
let trace = tmp.expect("FreeRTOSSystemStateMetadata not found");
// calculate hits and identify snippets
let mut last_m = false;
let mut marks : Vec<(&RefinedFreeRTOSSystemState, usize, usize)>= vec![]; // 1: got interrupted, 2: interrupt handler
for i in 0..trace.inner.len() {
let curr = &trace.inner[i];
let m = interrupt_offsets[0..num_interrupts].iter().any(|x| (curr.start_tick..curr.end_tick).contains(&(*x as u64)));
if m {
marks.push((curr, i, 1));
// println!("1: {}",curr.current_task.task_name);
} else if last_m {
marks.push((curr, i, 2));
// println!("2: {}",curr.current_task.task_name);
} else {
marks.push((curr, i, 0));
}
last_m = m;
}
for i in 0..num_interrupts {
// bounds based on minimum inter-arrival time
let mut lb = 0;
let mut ub : u32 = marks[marks.len()-1].0.end_tick.try_into().expect("ticks > u32");
if i > 0 {
lb = u32::saturating_add(interrupt_offsets[i-1],MINIMUM_INTER_ARRIVAL_TIME);
}
if i < num_interrupts-1 {
ub = u32::saturating_sub(interrupt_offsets[i+1],MINIMUM_INTER_ARRIVAL_TIME);
}
// get old hit and handler
let old_hit = marks.iter().filter(
|x| x.0.start_tick < (interrupt_offsets[i] as u64) && (interrupt_offsets[i] as u64) < x.0.end_tick
).next();
let old_handler = match old_hit {
Some(s) => if s.1 < num_interrupts-1 && s.1 < marks.len()-1 {
Some(marks[s.1+1])
} else {None},
None => None
};
// find reachable alternatives
let alternatives : Vec<_> = marks.iter().filter(|x|
x.2 != 2 &&
(
x.0.start_tick < (lb as u64) && (lb as u64) < x.0.end_tick
|| x.0.start_tick < (ub as u64) && (ub as u64) < x.0.end_tick )
).collect();
// in cases there are no alternatives
if alternatives.len() == 0 {
if old_hit.is_none() {
// choose something random
let untouched : Vec<_> = marks.iter().filter(
|x| x.2 == 0
).collect();
if untouched.len() > 0 {
let tmp = interrupt_offsets[i];
let choice = myrand.choose(untouched);
interrupt_offsets[i] = myrand.between(choice.0.start_tick, choice.0.end_tick)
.try_into().expect("tick > u32");
do_rerun = true;
}
// println!("no alternatives, choose random i: {} {} -> {}",i,tmp,interrupt_offsets[i]);
continue;
} else {
// do nothing
// println!("no alternatives, do nothing i: {} {}",i,interrupt_offsets[i]);
continue;
}
}
let replacement = myrand.choose(alternatives);
if (old_hit.map_or(false, |x| x == replacement)) {
// use the old value
// println!("chose old value, do nothing i: {} {}",i,interrupt_offsets[i]);
continue;
} else {
let extra = if (old_hit.map_or(false, |x| x.1 < replacement.1)) {
// move futher back, respect old_handler
old_handler.map_or(0, |x| x.0.end_tick - x.0.start_tick)
} else { 0 };
let tmp = interrupt_offsets[i];
interrupt_offsets[i] = (myrand.between(replacement.0.start_tick,
replacement.0.end_tick) + extra).try_into().expect("ticks > u32");
// println!("chose new alternative, i: {} {} -> {}",i,tmp, interrupt_offsets[i]);
do_rerun = true;
}
}
let mut numbers : Vec<u32> = interrupt_offsets[0..num_interrupts].to_vec();
numbers.sort();
// println!("Mutator: {:?}", numbers);
let mut start : u32 = 0;
// for i in 0..numbers.len() {
// let tmp = numbers[i];
// numbers[i] = numbers[i]-start;
// start = tmp;
// }
for i in 0..numbers.len() {
prefix.push(u32::to_le_bytes(numbers[i]));
}
}
}
#[cfg(not(feature = "feed_systemtrace"))]
{
let metadata = state.metadata();
let hist = metadata.get::<IcHist>().unwrap();
let maxtick : u64 = hist.1.0;
// let maxtick : u64 = (_input.exec_time().expect("No duration found").as_nanos() >> 4).try_into().unwrap();
let mut numbers : Vec<u32> = vec![];
for i in 0..num_interrupts {
prefix.push(u32::to_le_bytes(myrand.between(0, min(maxtick, u32::MAX as u64)).try_into().expect("ticks > u32")));
}
}
let mut n : Vec<u8> = vec![];
n = [prefix.concat(), suffix].concat();
newinput.bytes_mut().clear();
newinput.bytes_mut().append(&mut n);
}
// InterruptShifterMutator::mutate(&mut mymut, state, &mut input, 0)?;
if do_rerun {
let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, newinput)?;
}
Ok(())
}
}
impl<E, EM, Z> UsesState for MyStateStage<E, EM, Z>
where
E: UsesState<State = Z::State>,
EM: UsesState<State = Z::State>,
Z: Evaluator<E, EM>,
Z::State: HasClientPerfMonitor + HasCorpus + HasRand,
{
type State = Z::State;
}

View File

@ -93,7 +93,7 @@ fn trigger_collection(emulator: &Emulator, h: &QemuSystemStateHelper) {
let mut systemstate = RawFreeRTOSSystemState::default(); let mut systemstate = RawFreeRTOSSystemState::default();
unsafe { unsafe {
// TODO: investigate why can_do_io is not set sometimes, as this is just a workaround // TODO: investigate why can_do_io is not set sometimes, as this is just a workaround
let c = emulator.current_cpu().unwrap(); let c = emulator.cpu_from_index(0);
let can_do_io = (*c.raw_ptr()).can_do_io; let can_do_io = (*c.raw_ptr()).can_do_io;
(*c.raw_ptr()).can_do_io = 1; (*c.raw_ptr()).can_do_io = 1;
systemstate.qemu_tick = emu::icount_get_raw(); systemstate.qemu_tick = emu::icount_get_raw();

View File

@ -14,13 +14,13 @@ pub mod helpers;
pub mod observers; pub mod observers;
pub mod feedbacks; pub mod feedbacks;
pub mod graph; pub mod graph;
// pub mod mutators; pub mod schedulers;
#[cfg(feature = "fuzz_interrupt")] // #[cfg(feature = "fuzz_interrupt")]
pub const IRQ_INPUT_BYTES_NUMBER : u32 = 2; // Offset for interrupt bytes // pub const IRQ_INPUT_BYTES_NUMBER : u32 = 2; // Offset for interrupt bytes
#[cfg(not(feature = "fuzz_interrupt"))] // #[cfg(not(feature = "fuzz_interrupt"))]
pub const IRQ_INPUT_BYTES_NUMBER : u32 = 0; // Offset for interrupt bytes // pub const IRQ_INPUT_BYTES_NUMBER : u32 = 0; // Offset for interrupt bytes
pub const IRQ_INPUT_OFFSET : u32 = 347780; // Tick offset for app code start // pub const IRQ_INPUT_OFFSET : u32 = 347780; // Tick offset for app code start
// Constants // Constants
const NUM_PRIOS: usize = 5; const NUM_PRIOS: usize = 5;
@ -53,9 +53,10 @@ pub struct RefinedTCB {
impl Hash for RefinedTCB { impl Hash for RefinedTCB {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
self.task_name.hash(state); self.task_name.hash(state);
// self.priority.hash(state); self.priority.hash(state);
// self.mutexes_held.hash(state); self.mutexes_held.hash(state);
// self.notify_state.hash(state); #[cfg(not(feature = "no_hash_state"))]
self.notify_state.hash(state);
// self.notify_value.hash(state); // self.notify_value.hash(state);
} }
} }
@ -112,7 +113,7 @@ impl Hash for RefinedFreeRTOSSystemState {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
self.current_task.hash(state); self.current_task.hash(state);
self.ready_list_after.hash(state); self.ready_list_after.hash(state);
self.last_pc.hash(state); // self.last_pc.hash(state);
} }
} }
impl RefinedFreeRTOSSystemState { impl RefinedFreeRTOSSystemState {
@ -124,14 +125,15 @@ impl RefinedFreeRTOSSystemState {
// Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata // Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata
#[derive(Debug, Default, Serialize, Deserialize, Clone)] #[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct FreeRTOSSystemStateMetadata { pub struct FreeRTOSSystemStateMetadata {
inner: Vec<RefinedFreeRTOSSystemState>, pub inner: Vec<RefinedFreeRTOSSystemState>,
trace_length: usize,
indices: Vec<usize>, // Hashed enumeration of States indices: Vec<usize>, // Hashed enumeration of States
tcref: isize, tcref: isize,
} }
impl FreeRTOSSystemStateMetadata { impl FreeRTOSSystemStateMetadata {
pub fn new(inner: Vec<RefinedFreeRTOSSystemState>) -> Self{ pub fn new(inner: Vec<RefinedFreeRTOSSystemState>) -> Self{
let tmp = inner.iter().enumerate().map(|x| compute_hash(x) as usize).collect(); let tmp = inner.iter().enumerate().map(|x| compute_hash(x) as usize).collect();
Self {inner: inner, indices: tmp, tcref: 0} Self {trace_length: inner.len(), inner: inner, indices: tmp, tcref: 0}
} }
} }
pub fn compute_hash<T>(obj: T) -> u64 pub fn compute_hash<T>(obj: T) -> u64

View File

@ -1,119 +0,0 @@
use crate::systemstate::graph::SysGraphMetadata;
use crate::systemstate::graph::SysGraphNode;
use crate::systemstate::IRQ_INPUT_OFFSET;
use crate::systemstate::IRQ_INPUT_BYTES_NUMBER;
use crate::systemstate::graph::SysGraphFeedbackState;
use libafl::inputs::HasBytesVec;
use libafl::bolts::rands::RandomSeed;
use libafl::bolts::rands::StdRand;
use libafl::mutators::Mutator;
use libafl::mutators::MutationResult;
use libafl::prelude::UsesInput;
use core::marker::PhantomData;
use libafl::state::HasCorpus;
use libafl::state::HasSolutions;
use libafl::state::HasRand;
use libafl::bolts::tuples::MatchName;
use libafl::bolts::tuples::Named;
use libafl::Error;
use libafl::{inputs::Input, state::HasMetadata};
use super::FreeRTOSSystemStateMetadata;
use libafl::bolts::rands::Rand;
//=============================== Interrupt
/// Sets up the interrupt to a random block in the trace. Works for both state and graph metadata
pub struct InterruptShifterMutator<S>
where
S: UsesInput,
{
phantom: PhantomData<S>,
}
impl<S> InterruptShifterMutator<S>
where
S: UsesInput,
{
pub fn new() -> Self {
InterruptShifterMutator{phantom: PhantomData}
}
}
impl<S> Mutator<S> for InterruptShifterMutator<S>
where
S: UsesInput,
{
fn mutate(
&mut self,
state: &mut S,
input: &mut S::Input,
_stage_idx: i32
) -> Result<MutationResult, Error>
{
// need our own random generator, because borrowing rules
let mut myrand = StdRand::new();
let tmp = &mut state.rand_mut();
myrand.set_seed(tmp.next());
drop(tmp);
let target_bytes = input.bytes_mut();
let mut target_tick = 0;
#[cfg(feature = "sched_state")]
{
let tmp = state.metadata().get::<FreeRTOSSystemStateMetadata>();
if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
return Ok(MutationResult::Skipped);
}
let trace =tmp.expect("FreeRTOSSystemStateMetadata not found");
let target_block = myrand.choose(trace.inner.iter());
target_tick = myrand.between(target_block.start_tick,target_block.end_tick)-IRQ_INPUT_OFFSET as u64;
}
#[cfg(feature = "sched_state")]
{
let feedbackstate = state
.feedback_states()
.match_name::<SysGraphFeedbackState>("SysMap")
.unwrap();
let g = &feedbackstate.graph;
let tmp = state.metadata().get::<SysGraphMetadata>();
if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
return Ok(MutationResult::Skipped);
}
let trace = tmp.expect("SysGraphMetadata not found");
let target_block : &SysGraphNode = &g[*myrand.choose(trace.inner.iter())];
target_tick = match target_block.variants.iter().find(|x| &x.input == target_bytes) {
Some(s) => myrand.between(s.start_tick,s.end_tick)-IRQ_INPUT_OFFSET as u64,
None => myrand.between(target_block.variants[0].start_tick,target_block.variants[0].end_tick)-IRQ_INPUT_OFFSET as u64,
};
}
if target_bytes.len() > IRQ_INPUT_BYTES_NUMBER as usize && IRQ_INPUT_BYTES_NUMBER > 0 {
for i in 0..IRQ_INPUT_BYTES_NUMBER as usize {
target_bytes[i] = u64::to_le_bytes(target_tick)[i];
}
return Ok(MutationResult::Mutated);
} else {
return Ok(MutationResult::Skipped);
}
}
fn post_exec(
&mut self,
_state: &mut S,
_stage_idx: i32,
_corpus_idx: Option<usize>
) -> Result<(), Error> {
Ok(())
}
}
impl<S> Named for InterruptShifterMutator<S>
where
S: UsesInput,
{
fn name(&self) -> &str {
"InterruptShifterMutator"
}
}

View File

@ -1,4 +1,4 @@
use crate::systemstate::IRQ_INPUT_BYTES_NUMBER; // use crate::systemstate::IRQ_INPUT_BYTES_NUMBER;
use libafl::prelude::{ExitKind, AsSlice}; use libafl::prelude::{ExitKind, AsSlice};
use libafl::{inputs::HasTargetBytes, prelude::UsesInput}; use libafl::{inputs::HasTargetBytes, prelude::UsesInput};
use libafl::bolts::HasLen; use libafl::bolts::HasLen;
@ -124,7 +124,7 @@ for mut i in input.drain(..) {
start_tick: start_tick, start_tick: start_tick,
end_tick: i.qemu_tick, end_tick: i.qemu_tick,
ready_list_after: collector, ready_list_after: collector,
input_counter: i.input_counter+IRQ_INPUT_BYTES_NUMBER, input_counter: i.input_counter,//+IRQ_INPUT_BYTES_NUMBER,
last_pc: i.last_pc, last_pc: i.last_pc,
}); });
start_tick=i.qemu_tick; start_tick=i.qemu_tick;

View File

@ -0,0 +1,267 @@
//! The Minimizer schedulers are a family of corpus schedulers that feed the fuzzer
//! with testcases only from a subset of the total corpus.
use core::{marker::PhantomData};
use std::{cmp::{max, min}, mem::swap, borrow::BorrowMut};
use serde::{Deserialize, Serialize};
use libafl::{
bolts::{rands::Rand, serdeany::SerdeAny, AsSlice, HasRefCnt},
corpus::{Corpus, Testcase},
inputs::UsesInput,
schedulers::{Scheduler, TestcaseScore, minimizer::DEFAULT_SKIP_NON_FAVORED_PROB },
state::{HasCorpus, HasMetadata, HasRand, UsesState, State},
Error, SerdeAny, prelude::HasLen,
};
use crate::worst::MaxTimeFavFactor;
use super::FreeRTOSSystemStateMetadata;
/// A state metadata holding a map of favoreds testcases for each map entry
#[derive(Debug, Serialize, Deserialize, SerdeAny, Default)]
pub struct LongestTracesMetadata {
/// map index -> corpus index
pub max_trace_length: usize,
}
impl LongestTracesMetadata {
fn new(l : usize) -> Self {
Self {max_trace_length: l}
}
}
/// The [`MinimizerScheduler`] employs a genetic algorithm to compute a subset of the
/// corpus that exercise all the requested features (e.g. all the coverage seen so far)
/// prioritizing [`Testcase`]`s` using [`TestcaseScore`]
#[derive(Debug, Clone)]
pub struct LongestTraceScheduler<CS> {
base: CS,
skip_non_favored_prob: u64,
}
impl<CS> UsesState for LongestTraceScheduler<CS>
where
CS: UsesState,
{
type State = CS::State;
}
impl<CS> Scheduler for LongestTraceScheduler<CS>
where
CS: Scheduler,
CS::State: HasCorpus + HasMetadata + HasRand,
{
/// Add an entry to the corpus and return its index
fn on_add(&self, state: &mut CS::State, idx: usize) -> Result<(), Error> {
let l = state.corpus()
.get(idx)?
.borrow()
.metadata()
.get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
self.get_update_trace_length(state,l);
self.base.on_add(state, idx)
}
/// Replaces the testcase at the given idx
fn on_replace(
&self,
state: &mut CS::State,
idx: usize,
testcase: &Testcase<<CS::State as UsesInput>::Input>,
) -> Result<(), Error> {
let l = state.corpus()
.get(idx)?
.borrow()
.metadata()
.get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
self.get_update_trace_length(state, l);
self.base.on_replace(state, idx, testcase)
}
/// Removes an entry from the corpus, returning M if M was present.
fn on_remove(
&self,
state: &mut CS::State,
idx: usize,
testcase: &Option<Testcase<<CS::State as UsesInput>::Input>>,
) -> Result<(), Error> {
self.base.on_remove(state, idx, testcase)?;
Ok(())
}
/// Gets the next entry
fn next(&self, state: &mut CS::State) -> Result<usize, Error> {
let mut idx = self.base.next(state)?;
while {
let l = state.corpus()
.get(idx)?
.borrow()
.metadata()
.get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
let m = self.get_update_trace_length(state,l);
state.rand_mut().below(m) > l as u64
} && state.rand_mut().below(100) < self.skip_non_favored_prob
{
idx = self.base.next(state)?;
}
Ok(idx)
}
}
impl<CS> LongestTraceScheduler<CS>
where
CS: Scheduler,
CS::State: HasCorpus + HasMetadata + HasRand,
{
pub fn get_update_trace_length(&self, state: &mut CS::State, par: usize) -> u64 {
// Create a new top rated meta if not existing
if let Some(td) = state.metadata_mut().get_mut::<LongestTracesMetadata>() {
let m = max(td.max_trace_length, par);
td.max_trace_length = m;
m as u64
} else {
state.add_metadata(LongestTracesMetadata::new(par));
par as u64
}
}
pub fn new(base: CS) -> Self {
Self {
base,
skip_non_favored_prob: DEFAULT_SKIP_NON_FAVORED_PROB,
}
}
}
//==========================================================================================
/// A state metadata holding a map of favoreds testcases for each map entry
#[derive(Debug, Serialize, Deserialize, SerdeAny, Default)]
pub struct GeneticMetadata {
pub current_gen: Vec<(usize, f64)>,
pub current_cursor: usize,
pub next_gen: Vec<(usize, f64)>,
pub gen: usize
}
impl GeneticMetadata {
fn new(current_gen: Vec<(usize, f64)>, next_gen: Vec<(usize, f64)>) -> Self {
Self {current_gen, current_cursor: 0, next_gen, gen: 0}
}
}
#[derive(Debug, Clone)]
pub struct GenerationScheduler<S> {
phantom: PhantomData<S>,
gen_size: usize,
}
impl<S> UsesState for GenerationScheduler<S>
where
S: UsesInput,
{
type State = S;
}
impl<S> Scheduler for GenerationScheduler<S>
where
S: HasCorpus + HasMetadata,
S::Input: HasLen,
{
/// get first element in current gen,
/// if current_gen is empty, swap lists, sort by FavFactor, take top k and return first
fn next(&self, state: &mut Self::State) -> Result<usize, Error> {
let mut to_remove : Vec<(usize, f64)> = vec![];
let mut to_return : usize = 0;
let c = state.corpus().count();
let gm = state.metadata_mut().get_mut::<GeneticMetadata>().expect("Corpus Scheduler empty");
// println!("index: {} curr: {:?} next: {:?} gen: {} corp: {}", gm.current_cursor, gm.current_gen.len(), gm.next_gen.len(), gm.gen,
// c);
match gm.current_gen.get(gm.current_cursor) {
Some(c) => {
gm.current_cursor+=1;
// println!("normal next: {}", (*c).0);
return Ok((*c).0)
},
None => {
swap(&mut to_remove, &mut gm.current_gen);
swap(&mut gm.next_gen, &mut gm.current_gen);
gm.current_gen.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
// gm.current_gen.reverse();
if gm.current_gen.len() == 0 {panic!("Corpus is empty");}
let d : Vec<(usize, f64)> = gm.current_gen.drain(min(gm.current_gen.len(), self.gen_size)..).collect();
to_remove.extend(d);
// move all indices to the left, since all other indices will be deleted
gm.current_gen.sort_by(|a,b| a.0.cmp(&(*b).0)); // in order of the corpus index
for i in 0..gm.current_gen.len() {
gm.current_gen[i] = (i, gm.current_gen[i].1);
}
to_return = gm.current_gen.get(0).unwrap().0;
gm.current_cursor=1;
gm.gen+=1;
}
};
// removing these elements will move all indices left by to_remove.len()
to_remove.sort_by(|x,y| x.0.cmp(&(*y).0));
to_remove.reverse();
for i in to_remove {
state.corpus_mut().remove(i.0).unwrap();
}
// println!("switch next: {to_return}");
return Ok(to_return);
}
/// Add the new input to the next generation
fn on_add(
&self,
state: &mut Self::State,
idx: usize
) -> Result<(), Error> {
// println!("On Add {idx}");
let mut tc = state.corpus_mut().get(idx).unwrap().borrow_mut().clone();
let ff = MaxTimeFavFactor::compute(&mut tc, state).unwrap();
if let Some(gm) = state.metadata_mut().get_mut::<GeneticMetadata>() {
gm.next_gen.push((idx,ff));
} else {
state.add_metadata(GeneticMetadata::new(vec![], vec![(idx,ff)]));
}
Ok(())
}
fn on_replace(
&self,
_state: &mut Self::State,
_idx: usize,
_prev: &Testcase<<Self::State as UsesInput>::Input>
) -> Result<(), Error> {
// println!("On Replace {_idx}");
Ok(())
}
fn on_remove(
&self,
state: &mut Self::State,
idx: usize,
_testcase: &Option<Testcase<<Self::State as UsesInput>::Input>>
) -> Result<(), Error> {
// println!("On Remove {idx}");
if let Some(gm) = state.metadata_mut().get_mut::<GeneticMetadata>() {
gm.next_gen = gm.next_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
gm.current_gen = gm.current_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
} else {
state.add_metadata(GeneticMetadata::new(vec![], vec![]));
}
Ok(())
}
}
impl<S> GenerationScheduler<S>
{
pub fn new() -> Self {
Self {
phantom: PhantomData,
gen_size: 100,
}
}
}

View File

@ -54,10 +54,11 @@ where
S: HasCorpus + HasMetadata, S: HasCorpus + HasMetadata,
S::Input: HasLen, S::Input: HasLen,
{ {
fn compute(entry: &mut Testcase<S::Input>, state: &S) -> Result<f64, Error> { fn compute(entry: &mut Testcase<<S as UsesInput>::Input>, state: &S) -> Result<f64, Error> {
// TODO maybe enforce entry.exec_time().is_some() // TODO maybe enforce entry.exec_time().is_some()
let execs_per_hour = 3600.0/entry.exec_time().expect("testcase.exec_time is needed for scheduler").as_secs_f64(); let et = entry.exec_time().expect("testcase.exec_time is needed for scheduler");
Ok(execs_per_hour) let tns : i64 = et.as_nanos().try_into().expect("failed to convert time");
Ok(-tns as f64)
} }
} }
@ -332,3 +333,49 @@ where
Self {longest_time: 0, last_is_longest: false} Self {longest_time: 0, last_is_longest: false}
} }
} }
/// A Noop Feedback which records a list of all execution times
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct AlwaysTrueFeedback
{
}
impl<S> Feedback<S> for AlwaysTrueFeedback
where
S: UsesInput + HasClientPerfMonitor,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting<EM, OT>(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &S::Input,
_observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<S>,
{
Ok(true)
}
}
impl Named for AlwaysTrueFeedback
{
#[inline]
fn name(&self) -> &str {
"AlwaysTrueFeedback"
}
}
impl AlwaysTrueFeedback
where
{
/// Creates a new [`ExecTimeCollectorFeedback`]
#[must_use]
pub fn new() -> Self {
Self {
}
}
}

View File

@ -314,6 +314,9 @@ extern "C" {
fn libafl_qemu_set_breakpoint(addr: u64) -> i32; fn libafl_qemu_set_breakpoint(addr: u64) -> i32;
fn libafl_qemu_remove_breakpoint(addr: u64) -> i32; fn libafl_qemu_remove_breakpoint(addr: u64) -> i32;
pub fn libafl_qemu_set_native_breakpoint(addr: u32);
pub fn libafl_qemu_remove_native_breakpoint(addr: u32);
fn libafl_flush_jit(); fn libafl_flush_jit();
fn libafl_qemu_trigger_breakpoint(cpu: CPUStatePtr); fn libafl_qemu_trigger_breakpoint(cpu: CPUStatePtr);