Compare commits
952 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
5a2652b984 | ||
![]() |
c3930b39fc | ||
![]() |
18a25d3b3d | ||
![]() |
c415b4d5f6 | ||
![]() |
2265d672fc | ||
![]() |
e12412fed6 | ||
![]() |
901572556f | ||
![]() |
09faec15f4 | ||
![]() |
0ed295842b | ||
![]() |
03d8d2eb08 | ||
![]() |
df40db5ae8 | ||
![]() |
f4699ba385 | ||
![]() |
454176427b | ||
![]() |
93085782d6 | ||
![]() |
e905743ef6 | ||
![]() |
30ae963c45 | ||
![]() |
8ef4e2aff0 | ||
![]() |
ba84170777 | ||
![]() |
e99fcad326 | ||
![]() |
1e2fac6f53 | ||
![]() |
14263b9c69 | ||
![]() |
b1bec42044 | ||
![]() |
477941e0e2 | ||
![]() |
2cc33464fa | ||
![]() |
1b008ae2e4 | ||
![]() |
bc5ae151bc | ||
![]() |
a455ad27a9 | ||
![]() |
af12b4ba24 | ||
![]() |
399fbccea2 | ||
![]() |
7dd345d18c | ||
![]() |
6373a1e1b3 | ||
![]() |
a7bb5196ea | ||
![]() |
620835a73f | ||
![]() |
1556cba426 | ||
![]() |
2a82e9c40f | ||
![]() |
58c39680c9 | ||
![]() |
0f9c82f893 | ||
![]() |
7b90873305 | ||
![]() |
1102ea0fe7 | ||
![]() |
e912216a37 | ||
![]() |
b96b9be674 | ||
![]() |
7fe43a28e8 | ||
![]() |
058e15f547 | ||
![]() |
65af5a7f78 | ||
![]() |
c3f67daefb | ||
![]() |
963afc3e5c | ||
![]() |
bce0f08294 | ||
![]() |
e4446b908c | ||
![]() |
2670049c2b | ||
![]() |
4d45100a63 | ||
![]() |
7c9bd1157c | ||
![]() |
e6eb6c48d2 | ||
![]() |
07dca4b59b | ||
![]() |
5fbe2415e1 | ||
![]() |
1fafaf6454 | ||
![]() |
b97a9a1398 | ||
![]() |
e317ad045e | ||
![]() |
1ed1c4eb31 | ||
![]() |
4b67b55b29 | ||
![]() |
336d7fcc4f | ||
![]() |
47d15ade81 | ||
![]() |
684b31279e | ||
![]() |
19ef29ed60 | ||
![]() |
b7e10ca7af | ||
![]() |
123f508fcc | ||
![]() |
aa47a5c5e6 | ||
![]() |
d6827b5aa2 | ||
![]() |
767f5dffec | ||
![]() |
864c2259e7 | ||
![]() |
f324c60b02 | ||
![]() |
79f3b69fa8 | ||
![]() |
6a9ea73865 | ||
![]() |
117741e1b1 | ||
![]() |
cc71979826 | ||
![]() |
30f65512d8 | ||
![]() |
2e5e2056b2 | ||
![]() |
31e1eee96e | ||
![]() |
cf01d04151 | ||
![]() |
dfd3b3278e | ||
![]() |
3a087301ac | ||
![]() |
6040133080 | ||
![]() |
8d54ee2c80 | ||
![]() |
baf07445e4 | ||
![]() |
22d8e92b08 | ||
![]() |
f8f9da2a15 | ||
![]() |
d05adf7f6e | ||
![]() |
0777873aae | ||
![]() |
e31658b914 | ||
![]() |
41d41f25bf | ||
![]() |
7cf821bd82 | ||
![]() |
b871037e70 | ||
![]() |
34ec907456 | ||
![]() |
c7303ace53 | ||
![]() |
6fe888d2bb | ||
![]() |
70333aaf94 | ||
![]() |
e70b23fc6b | ||
![]() |
167d25c3af | ||
![]() |
e14eb26e8b | ||
![]() |
b127f0579d | ||
![]() |
19087f3dab | ||
![]() |
dce0761b11 | ||
![]() |
9fa5f72080 | ||
![]() |
5662105e75 | ||
![]() |
667ce328fc | ||
![]() |
6b76b6ea98 | ||
![]() |
b0d95676f9 | ||
![]() |
c621a5e475 | ||
![]() |
7a464c4c87 | ||
![]() |
bf4d1de7cd | ||
![]() |
5872d24021 | ||
![]() |
edb6b509c2 | ||
![]() |
1cf3df665a | ||
![]() |
3328f06de4 | ||
![]() |
f17a45dcc3 | ||
![]() |
7c9ac6d4b5 | ||
![]() |
ee7dafae41 | ||
![]() |
614b149cae | ||
![]() |
f6d2eaacb4 | ||
![]() |
695efeab42 | ||
![]() |
c97c8cf83e | ||
![]() |
c1a55982b6 | ||
![]() |
e7e820868c | ||
![]() |
bed500471a | ||
![]() |
a16fb88f3e | ||
![]() |
4562f19bdc | ||
![]() |
b0d592dc3a | ||
![]() |
d4ff7b86b1 | ||
![]() |
502929d967 | ||
![]() |
b0ac25a7b5 | ||
![]() |
41ff7438eb | ||
![]() |
382673b173 | ||
![]() |
f75c5ff4d3 | ||
![]() |
7fe0c576db | ||
![]() |
2e81dc6c59 | ||
![]() |
02ef0c4dda | ||
![]() |
ec944a0ea4 | ||
![]() |
2302f1b35c | ||
![]() |
359e94ec97 | ||
![]() |
c79cdc89ac | ||
![]() |
935100e7a7 | ||
![]() |
2f7c19e2b1 | ||
![]() |
b23180394b | ||
![]() |
61ac4ea7be | ||
![]() |
b49ab999e3 | ||
![]() |
50f14d44fa | ||
![]() |
b0248461e2 | ||
![]() |
28c43b332f | ||
![]() |
084b9b5878 | ||
![]() |
32963be453 | ||
![]() |
c2e0e8ddbe | ||
![]() |
abcb2bf33e | ||
![]() |
4c95d9029a | ||
![]() |
dd0fbff819 | ||
![]() |
c622a28eba | ||
![]() |
1e8667a9f9 | ||
![]() |
0f42efa12b | ||
![]() |
176659821a | ||
![]() |
76a95bc5fd | ||
![]() |
d34965192d | ||
![]() |
e1b8c9b5d8 | ||
![]() |
5ff709f241 | ||
![]() |
2046cfe82d | ||
![]() |
ea6a36b0a5 | ||
![]() |
0f3ad288e7 | ||
![]() |
98863fbff5 | ||
![]() |
fef6c8d1b1 | ||
![]() |
fe64d10a5c | ||
![]() |
2df71e7a36 | ||
![]() |
fd364d15fe | ||
![]() |
ea549b9cfe | ||
![]() |
e8cc1ba93d | ||
![]() |
c238b69498 | ||
![]() |
04cd792df2 | ||
![]() |
c68b3816fb | ||
![]() |
d7ac802fb2 | ||
![]() |
276bfc6386 | ||
![]() |
84a432dab2 | ||
![]() |
2b18562b08 | ||
![]() |
39c32d55e7 | ||
![]() |
886519b10c | ||
![]() |
c50af44099 | ||
![]() |
467c6d7582 | ||
![]() |
7595b2e5c9 | ||
![]() |
edcff3889c | ||
![]() |
8bce605503 | ||
![]() |
bc3ef5952b | ||
![]() |
da6118e61e | ||
![]() |
4ec50d6a10 | ||
![]() |
0c68ff033f | ||
![]() |
cbb323f26c | ||
![]() |
3d702f403b | ||
![]() |
ec935bf95f | ||
![]() |
8b9b5a8767 | ||
![]() |
2f618b62d7 | ||
![]() |
0f778b5892 | ||
![]() |
631b1746e2 | ||
![]() |
94a2a2363a | ||
![]() |
287d1ac7c7 | ||
![]() |
48463d079b | ||
![]() |
0d5c6219d8 | ||
![]() |
374f8735fa | ||
![]() |
47c41c2925 | ||
![]() |
d3b3d5d462 | ||
![]() |
c4f076356f | ||
![]() |
33c6ea1ae4 | ||
![]() |
f252469d86 | ||
![]() |
2cc560e727 | ||
![]() |
ef25aef471 | ||
![]() |
7479726c3e | ||
![]() |
ed5a10d0b5 | ||
![]() |
a0a6d0e5cc | ||
![]() |
f19302c9b1 | ||
![]() |
e8fe5bb614 | ||
![]() |
ff938261df | ||
![]() |
fa58dff4b7 | ||
![]() |
e29897dd44 | ||
![]() |
159da0d311 | ||
![]() |
527b892c1d | ||
![]() |
4901846588 | ||
![]() |
24fd098e66 | ||
![]() |
17aae94efc | ||
![]() |
90c627a7e7 | ||
![]() |
1c85c3af13 | ||
![]() |
98d3dfe821 | ||
![]() |
e64233e203 | ||
![]() |
50be2f4aaf | ||
![]() |
d90d232e7a | ||
![]() |
2137ad0f8f | ||
![]() |
44dca17a6c | ||
![]() |
cca4e3daa9 | ||
![]() |
aaafe1fc6c | ||
![]() |
63a6d1aac0 | ||
![]() |
26122b20a0 | ||
![]() |
10f373d587 | ||
![]() |
45d47214c2 | ||
![]() |
69d7d7b1e3 | ||
![]() |
50e0f80d1e | ||
![]() |
0cafa5c72d | ||
![]() |
15aabfe65d | ||
![]() |
1fc5ba63fa | ||
![]() |
c221108916 | ||
![]() |
f0ee6e0587 | ||
![]() |
cd12546748 | ||
![]() |
91778961da | ||
![]() |
53bfae0a08 | ||
![]() |
7170b5391b | ||
![]() |
3cc8ea0336 | ||
![]() |
4d3c6a3b9a | ||
![]() |
f448b6fb24 | ||
![]() |
a0d4a5b240 | ||
![]() |
44c841ffb1 | ||
![]() |
50843b19d1 | ||
![]() |
6b94db2260 | ||
![]() |
15f4613894 | ||
![]() |
7abc26ebc9 | ||
![]() |
14fd3040bd | ||
![]() |
2efa747292 | ||
![]() |
5eab4fb78b | ||
![]() |
fde9cda5ee | ||
![]() |
928cf80782 | ||
![]() |
0f26f6ea32 | ||
![]() |
a0143d02a6 | ||
![]() |
fbd0d497d4 | ||
![]() |
44a37da680 | ||
![]() |
c6420c0987 | ||
![]() |
34b4a6ac1d | ||
![]() |
c6875b8cf6 | ||
![]() |
e30ff57456 | ||
![]() |
5cc0180835 | ||
![]() |
ee6385c25b | ||
![]() |
b3ddab3bce | ||
![]() |
04d87ccc89 | ||
![]() |
93f67aa405 | ||
![]() |
afa2965f3c | ||
![]() |
4f3d9d2e50 | ||
![]() |
44aec56f7e | ||
![]() |
2763d945a3 | ||
![]() |
9b96149f6c | ||
![]() |
5f67b9fbc4 | ||
![]() |
fe1c7a34b7 | ||
![]() |
dd410c590a | ||
![]() |
61046c4157 | ||
![]() |
9b780cc0a7 | ||
![]() |
e745401a39 | ||
![]() |
d6fe67c3c8 | ||
![]() |
1b9f4ea29c | ||
![]() |
781e830923 | ||
![]() |
6747a7dc2f | ||
![]() |
e48c8c0e23 | ||
![]() |
9a2e7b0e64 | ||
![]() |
2ac075024b | ||
![]() |
969aa12c7e | ||
![]() |
3b3e2f6efa | ||
![]() |
55a300d508 | ||
![]() |
44b0b0ad82 | ||
![]() |
cd8003ee69 | ||
![]() |
e3f837d712 | ||
![]() |
1a0e692f33 | ||
![]() |
95bed5b016 | ||
![]() |
d96a1426d5 | ||
![]() |
b7efe8eb7d | ||
![]() |
f3c37db2b7 | ||
![]() |
035c01b4a1 | ||
![]() |
1a41e65a5b | ||
![]() |
8c773a6b85 | ||
![]() |
7a4fb06d02 | ||
![]() |
1dcfe8ef56 | ||
![]() |
19f5081bd8 | ||
![]() |
b97598d239 | ||
![]() |
2bfe91e4bd | ||
![]() |
263af87652 | ||
![]() |
f48e281be8 | ||
![]() |
ef16e645b7 | ||
![]() |
fafe8f5ce8 | ||
![]() |
578a22a3d9 | ||
![]() |
50892ddc75 | ||
![]() |
0a995f241c | ||
![]() |
b999b4aac5 | ||
![]() |
97a83aba3b | ||
![]() |
d6d6a23f69 | ||
![]() |
5151f1e35c | ||
![]() |
49ad380068 | ||
![]() |
db88412738 | ||
![]() |
c837d0df7a | ||
![]() |
b9650045f6 | ||
![]() |
cefb73861f | ||
![]() |
973c4358e6 | ||
![]() |
af97033d85 | ||
![]() |
dede78159c | ||
![]() |
15c98de52f | ||
![]() |
40c8645ed3 | ||
![]() |
41d24ca375 | ||
![]() |
9b82af4539 | ||
![]() |
c3473e5631 | ||
![]() |
054d0a6b19 | ||
![]() |
7d9559b570 | ||
![]() |
99a70e5771 | ||
![]() |
241b93036e | ||
![]() |
5c18dca792 | ||
![]() |
c96d103b37 | ||
![]() |
13dd1cc4ec | ||
![]() |
a4f753b0f0 | ||
![]() |
519ea435ed | ||
![]() |
8b0068e39d | ||
![]() |
244c6b0da5 | ||
![]() |
07f9a9d06a | ||
![]() |
e7df233dc1 | ||
![]() |
e6184936ce | ||
![]() |
59945fbcc9 | ||
![]() |
e117b7199c | ||
![]() |
6a0ba7b647 | ||
![]() |
0cb7b25f39 | ||
![]() |
bb443027f7 | ||
![]() |
1458c3efff | ||
![]() |
a0e30d01d3 | ||
![]() |
2ac154d473 | ||
![]() |
058d2c0825 | ||
![]() |
72c862171e | ||
![]() |
e615cb4aed | ||
![]() |
df8fa71aa7 | ||
![]() |
001e132186 | ||
![]() |
61718c4e88 | ||
![]() |
c240cdec38 | ||
![]() |
e72ad4ba2d | ||
![]() |
3ed231db69 | ||
![]() |
6a72f8a1ad | ||
![]() |
aaeeead574 | ||
![]() |
b93a5bb414 | ||
![]() |
0f2cf80085 | ||
![]() |
ba8ca6723b | ||
![]() |
3d126f21cc | ||
![]() |
88f3ff4a28 | ||
![]() |
48c9fed2da | ||
![]() |
0a72d8abbc | ||
![]() |
207e7dbdff | ||
![]() |
989dd4d89a | ||
![]() |
616e507593 | ||
![]() |
99fd69acdc | ||
![]() |
75fcd47044 | ||
![]() |
9b2a17896f | ||
![]() |
8164bfca96 | ||
![]() |
b8d48013c0 | ||
![]() |
d143cac663 | ||
![]() |
925b33889f | ||
![]() |
4f93d7e894 | ||
![]() |
440e57dd6f | ||
![]() |
a07563def0 | ||
![]() |
38e16fbade | ||
![]() |
2717018601 | ||
![]() |
df96bb02ee | ||
![]() |
9ad7d9bb64 | ||
![]() |
ce71858100 | ||
![]() |
ea5ea3410c | ||
![]() |
16a152267e | ||
![]() |
c93291ab57 | ||
![]() |
4e7d2caa9f | ||
![]() |
9993cf1000 | ||
![]() |
830faec95f | ||
![]() |
ea61b79012 | ||
![]() |
a98805b4ca | ||
![]() |
68e7b203f5 | ||
![]() |
57a64e805f | ||
![]() |
78060ea308 | ||
![]() |
63e1523358 | ||
![]() |
ef8ebd5239 | ||
![]() |
2726a59711 | ||
![]() |
7894efe728 | ||
![]() |
fce5fd9a2b | ||
![]() |
a0a4dd60bb | ||
![]() |
ee447468c6 | ||
![]() |
2f036b72e4 | ||
![]() |
002656b076 | ||
![]() |
c2db7ad162 | ||
![]() |
b336411516 | ||
![]() |
686d29a3cb | ||
![]() |
fad59987d9 | ||
![]() |
210315da0f | ||
![]() |
108c04a8d2 | ||
![]() |
517d6962bd | ||
![]() |
5d83c9399a | ||
![]() |
bc458864f1 | ||
![]() |
5d5ee40329 | ||
![]() |
c84629a2f8 | ||
![]() |
1545514ed8 | ||
![]() |
00740190a7 | ||
![]() |
84a87f1da8 | ||
![]() |
bd12e060ca | ||
![]() |
16af5debbe | ||
![]() |
b4e987a640 | ||
![]() |
f1aee3c376 | ||
![]() |
43c9100f59 | ||
![]() |
28f34e076e | ||
![]() |
903a242e3e | ||
![]() |
4a94bcb806 | ||
![]() |
cad2ff6319 | ||
![]() |
9a1173d4a6 | ||
![]() |
9345b80256 | ||
![]() |
ba394c4acc | ||
![]() |
379e2ae89b | ||
![]() |
86cb187ef1 | ||
![]() |
281524dbf9 | ||
![]() |
1e96652ed2 | ||
![]() |
aa69436b64 | ||
![]() |
a278357ed9 | ||
![]() |
a013ad6085 | ||
![]() |
57296a6750 | ||
![]() |
cc1ebb29f7 | ||
![]() |
d606d9f4f6 | ||
![]() |
d7825851e9 | ||
![]() |
0750a6c3ca | ||
![]() |
b1888e12c9 | ||
![]() |
7acdeacae8 | ||
![]() |
8c62d339a2 | ||
![]() |
1138e6a341 | ||
![]() |
6300c5f493 | ||
![]() |
d53503b73e | ||
![]() |
c97d0fa7fc | ||
![]() |
6089cc21ce | ||
![]() |
c9e147de8c | ||
![]() |
8545778780 | ||
![]() |
65ddfa6acf | ||
![]() |
8a18ee4ece | ||
![]() |
1aede04af7 | ||
![]() |
bae24d9072 | ||
![]() |
56b37bb4bd | ||
![]() |
fd98eabfbf | ||
![]() |
2e980ca08d | ||
![]() |
745326ee26 | ||
![]() |
976d6b2e97 | ||
![]() |
ad33ea086b | ||
![]() |
c6c93b5785 | ||
![]() |
fdfa5b2d48 | ||
![]() |
f34bab2486 | ||
![]() |
406e77faa9 | ||
![]() |
02cd260af0 | ||
![]() |
c9403cbd00 | ||
![]() |
0db67dfc9a | ||
![]() |
0b38fabeb0 | ||
![]() |
77dfc7a26a | ||
![]() |
1b7e1da809 | ||
![]() |
cb9ace52e1 | ||
![]() |
f6ba9de044 | ||
![]() |
4c17da00b0 | ||
![]() |
67aa5b12c1 | ||
![]() |
35e387773c | ||
![]() |
6986317a03 | ||
![]() |
47cd4dfea6 | ||
![]() |
31f4669794 | ||
![]() |
bbb999f4d5 | ||
![]() |
c94c26cb56 | ||
![]() |
bc91436ef4 | ||
![]() |
0bba8535b8 | ||
![]() |
30686a2d28 | ||
![]() |
2ade1ee1f6 | ||
![]() |
f17e49e9aa | ||
![]() |
74783c2027 | ||
![]() |
cffbf069d2 | ||
![]() |
d4e9107fc2 | ||
![]() |
7f68f66d70 | ||
![]() |
cce25d0a94 | ||
![]() |
fc16b70a65 | ||
![]() |
a9014a9419 | ||
![]() |
25409119ff | ||
![]() |
20f1119bab | ||
![]() |
a27553daec | ||
![]() |
f35c59131d | ||
![]() |
b20fda2a4a | ||
![]() |
583c84ab4e | ||
![]() |
5854fd0c5b | ||
![]() |
ee9eb3eef1 | ||
![]() |
60b3408737 | ||
![]() |
d2ff88f6ff | ||
![]() |
2474691623 | ||
![]() |
f31c2cc4ce | ||
![]() |
70b75e7fdb | ||
![]() |
0932421020 | ||
![]() |
6f67919622 | ||
![]() |
5b0e3dd3bc | ||
![]() |
d3a4b726d8 | ||
![]() |
9755d189dd | ||
![]() |
78fd4e0d39 | ||
![]() |
9c3f8f4511 | ||
![]() |
19aac2fc04 | ||
![]() |
652c24cb2a | ||
![]() |
7cb1080e35 | ||
![]() |
4e6ad397ec | ||
![]() |
fd229328eb | ||
![]() |
b3483ddd42 | ||
![]() |
324db072a2 | ||
![]() |
74435e1461 | ||
![]() |
c1006c4d38 | ||
![]() |
c1eef33b5d | ||
![]() |
dc7d561621 | ||
![]() |
aa7993de10 | ||
![]() |
c103444396 | ||
![]() |
6251ad0051 | ||
![]() |
a092aed538 | ||
![]() |
f70a16a09a | ||
![]() |
761a77fce0 | ||
![]() |
0e149afd7a | ||
![]() |
7f0a4f1d7e | ||
![]() |
fdd2f53871 | ||
![]() |
3625e881a3 | ||
![]() |
8f6efe993d | ||
![]() |
27333f9ce8 | ||
![]() |
6d0d4e287a | ||
![]() |
d4f47340a3 | ||
![]() |
acecf46fb9 | ||
![]() |
b3e82ad36e | ||
![]() |
0b889312ae | ||
![]() |
defe9084ae | ||
![]() |
84beb61c3f | ||
![]() |
5311ce705a | ||
![]() |
b090bc7b4b | ||
![]() |
04aecd97f6 | ||
![]() |
c791a23456 | ||
![]() |
4c0e01c4aa | ||
![]() |
9645dca274 | ||
![]() |
2076fc0722 | ||
![]() |
a0bcdfa005 | ||
![]() |
1b6ef52a4e | ||
![]() |
f27107c5e4 | ||
![]() |
7e5a1dba05 | ||
![]() |
256d010981 | ||
![]() |
134fe6a992 | ||
![]() |
d0d378c174 | ||
![]() |
00033426e7 | ||
![]() |
d68c70b0f5 | ||
![]() |
bca14c041b | ||
![]() |
09295ae819 | ||
![]() |
81bdbc0dde | ||
![]() |
9149d69699 | ||
![]() |
5710c8b28a | ||
![]() |
51e4d814fb | ||
![]() |
c91fc9a521 | ||
![]() |
61ad4a6ee8 | ||
![]() |
ab837cbbf5 | ||
![]() |
638d315b57 | ||
![]() |
f3a4f4f664 | ||
![]() |
7d2c854b71 | ||
![]() |
e66eb33e96 | ||
![]() |
062ae9d544 | ||
![]() |
7dd7c1a485 | ||
![]() |
b45985c76b | ||
![]() |
1357b9f310 | ||
![]() |
0a0c4639a6 | ||
![]() |
6a2d6fa66d | ||
![]() |
713f0c5913 | ||
![]() |
fc6df5ef47 | ||
![]() |
8d8fcdd8db | ||
![]() |
6df415438d | ||
![]() |
760edbf0d2 | ||
![]() |
04c8d5208b | ||
![]() |
4a96354276 | ||
![]() |
209d38a768 | ||
![]() |
9aa40c0734 | ||
![]() |
f7c94f9a85 | ||
![]() |
9208531951 | ||
![]() |
862de53cf6 | ||
![]() |
f68fe95f09 | ||
![]() |
e89e8dbaab | ||
![]() |
c84c105fb9 | ||
![]() |
20cee8cd33 | ||
![]() |
2f840ef92d | ||
![]() |
454142c29e | ||
![]() |
d338b30c08 | ||
![]() |
8f27b14eb8 | ||
![]() |
65ec23fd35 | ||
![]() |
1922cb0a65 | ||
![]() |
942c6a42ac | ||
![]() |
174b852e0a | ||
![]() |
389c7c6554 | ||
![]() |
0b43711dc9 | ||
![]() |
a14363f1fc | ||
![]() |
6e5d102673 | ||
![]() |
a426b6fc3d | ||
![]() |
c31ca2c9f7 | ||
![]() |
c6bfb07832 | ||
![]() |
1d746b4074 | ||
![]() |
173b14258b | ||
![]() |
0eceafe0c5 | ||
![]() |
35fa881ff0 | ||
![]() |
b0179b4498 | ||
![]() |
b5774b2275 | ||
![]() |
5c05b3d32d | ||
![]() |
bc42880274 | ||
![]() |
0be4847cb7 | ||
![]() |
8f16001c47 | ||
![]() |
698ebb6b35 | ||
![]() |
dcdfa978a4 | ||
![]() |
b02592c5c7 | ||
![]() |
9650e06b45 | ||
![]() |
e0d90aa67f | ||
![]() |
b9879a8bfc | ||
![]() |
a55d40cd00 | ||
![]() |
ff2f325d68 | ||
![]() |
8ca2df8819 | ||
![]() |
4bee9a9039 | ||
![]() |
418d0dba91 | ||
![]() |
b877ed7e0e | ||
![]() |
51e2f64e5b | ||
![]() |
3bf3172928 | ||
![]() |
dfaf06a22e | ||
![]() |
a0c03fccc5 | ||
![]() |
83f739f010 | ||
![]() |
5dd5b1efa8 | ||
![]() |
dbba687b9b | ||
![]() |
f752acc2a4 | ||
![]() |
febb154e49 | ||
![]() |
e9e9c457d6 | ||
![]() |
d69cde896c | ||
![]() |
f4f55088e3 | ||
![]() |
fc809ccb33 | ||
![]() |
006dcac00c | ||
![]() |
90e9f3c786 | ||
![]() |
ac4a0e7330 | ||
![]() |
fc9caa8314 | ||
![]() |
37bfead4e5 | ||
![]() |
eae6f0436f | ||
![]() |
cb24b5dc2d | ||
![]() |
993eb62bb8 | ||
![]() |
81e9a9a60f | ||
![]() |
b064eb3994 | ||
![]() |
f0563475c3 | ||
![]() |
36b1d8aea2 | ||
![]() |
003b219826 | ||
![]() |
eb362c5c77 | ||
![]() |
11fc57a5d7 | ||
![]() |
f76331eac7 | ||
![]() |
a95b322b1c | ||
![]() |
1609960244 | ||
![]() |
72e54ac2f4 | ||
![]() |
3e0e753e9f | ||
![]() |
2002bbca35 | ||
![]() |
109755208e | ||
![]() |
1ad1b7cb17 | ||
![]() |
65368408dd | ||
![]() |
cbf0952ec7 | ||
![]() |
2885b10f70 | ||
![]() |
33aa012bdd | ||
![]() |
3f8c5f585b | ||
![]() |
2712430f93 | ||
![]() |
fe6daecf0b | ||
![]() |
52ab8c02d6 | ||
![]() |
44bbaf7d22 | ||
![]() |
8cca87f2bd | ||
![]() |
e5b3e5a677 | ||
![]() |
829b5049e6 | ||
![]() |
07f4c42ecf | ||
![]() |
4897c3f205 | ||
![]() |
c0e1236a07 | ||
![]() |
6f4955619a | ||
![]() |
07047cb3bb | ||
![]() |
97b3d3c7c7 | ||
![]() |
c6062889d5 | ||
![]() |
871dfa0a01 | ||
![]() |
71aa0221a0 | ||
![]() |
07530fea57 | ||
![]() |
dad8217c38 | ||
![]() |
dec202e6d6 | ||
![]() |
c66c3ff2b8 | ||
![]() |
f858e1a247 | ||
![]() |
751d96f45a | ||
![]() |
62b1bde7a9 | ||
![]() |
fa63493cee | ||
![]() |
7d67fc77b5 | ||
![]() |
60c1990f4a | ||
![]() |
747a636f4f | ||
![]() |
fa1e3fd504 | ||
![]() |
356698c24b | ||
![]() |
8445ae54b3 | ||
![]() |
0336eae908 | ||
![]() |
594ce420fc | ||
![]() |
53dd6c6be6 | ||
![]() |
258780370f | ||
![]() |
89876f2d89 | ||
![]() |
5a6d683fed | ||
![]() |
aa6d331110 | ||
![]() |
dc82a53bec | ||
![]() |
2be9686a80 | ||
![]() |
b9b70b0d51 | ||
![]() |
0d7d52decf | ||
![]() |
6f21cb3848 | ||
![]() |
cba9df9a28 | ||
![]() |
4029069640 | ||
![]() |
52557aefdd | ||
![]() |
3fd5671909 | ||
![]() |
66127d8492 | ||
![]() |
efc5756e3f | ||
![]() |
b7c1591b00 | ||
![]() |
20f8cb10eb | ||
![]() |
d2e4b9f206 | ||
![]() |
6d2284d8b9 | ||
![]() |
1da621456f | ||
![]() |
659e91fb68 | ||
![]() |
b9a540561b | ||
![]() |
91b10f8c40 | ||
![]() |
6883c776ef | ||
![]() |
a2719cf559 | ||
![]() |
0c7d42d28b | ||
![]() |
52d557aa8f | ||
![]() |
be1d3da159 | ||
![]() |
8bd18ef007 | ||
![]() |
fe8c06dd8f | ||
![]() |
721c02cd2c | ||
![]() |
a8e64be169 | ||
![]() |
53659f8a5c | ||
![]() |
6929c89b86 | ||
![]() |
c8c5d89f33 | ||
![]() |
abd8efabd3 | ||
![]() |
f9c74ed5d6 | ||
![]() |
cf79d13d17 | ||
![]() |
5b02fb420d | ||
![]() |
c8fad7833d | ||
![]() |
95d1069393 | ||
![]() |
b2f9e23975 | ||
![]() |
8ff8ae41f1 | ||
![]() |
dde7bc9b5c | ||
![]() |
f248a061ef | ||
![]() |
037b9551ea | ||
![]() |
eab7c32e9f | ||
![]() |
8ade809588 | ||
![]() |
39c0a2040b | ||
![]() |
e2f4e83890 | ||
![]() |
fc23782dc3 | ||
![]() |
96e24d1c8b | ||
![]() |
fd68c8a81f | ||
![]() |
fafa27a7e9 | ||
![]() |
c881dc996d | ||
![]() |
cdd3d8ace0 | ||
![]() |
863a6b8b7c | ||
![]() |
1b9ffcec74 | ||
![]() |
aa3f126100 | ||
![]() |
21ee8d2cae | ||
![]() |
b519d24981 | ||
![]() |
f73e2006fc | ||
![]() |
0f633962ff | ||
![]() |
6523341c4d | ||
![]() |
0d446bab20 | ||
![]() |
807a534121 | ||
![]() |
1bd21509af | ||
![]() |
ccd1211cd2 | ||
![]() |
590d3655cd | ||
![]() |
ea512f70f8 | ||
![]() |
a01863696e | ||
![]() |
702f163c13 | ||
![]() |
0c9933c3e4 | ||
![]() |
ae2caff990 | ||
![]() |
620b2861e3 | ||
![]() |
66b2867ba2 | ||
![]() |
a659dd821c | ||
![]() |
c9ea6ee6aa | ||
![]() |
76e4f6031d | ||
![]() |
02c6cab744 | ||
![]() |
d98384e582 | ||
![]() |
3f7d35bfdc | ||
![]() |
7c514c3669 | ||
![]() |
38ea17b426 | ||
![]() |
c9a78f154b | ||
![]() |
8f8e74d670 | ||
![]() |
f4f23de32b | ||
![]() |
bbe4e85768 | ||
![]() |
104c170ade | ||
![]() |
fd95560512 | ||
![]() |
d6ee2dbe12 | ||
![]() |
8245c7eda9 | ||
![]() |
306cdcd800 | ||
![]() |
b9970cbdac | ||
![]() |
a351e7a509 | ||
![]() |
08fe6ab791 | ||
![]() |
3c331e5a9b | ||
![]() |
51bc1d0328 | ||
![]() |
6c98945fc3 | ||
![]() |
0fa815f2b8 | ||
![]() |
c38405ef83 | ||
![]() |
8dfdee6fce | ||
![]() |
460787196a | ||
![]() |
6894a37ceb | ||
![]() |
44b798c07e | ||
![]() |
e8d99b9975 | ||
![]() |
786af9f6a9 | ||
![]() |
b72bf55555 | ||
![]() |
4d778dd64d | ||
![]() |
2ed6583041 | ||
![]() |
e8838ebebe | ||
![]() |
3ac439b345 | ||
![]() |
20c32316eb | ||
![]() |
3ffec79a17 | ||
![]() |
b96e194812 | ||
![]() |
c8254dbd0e | ||
![]() |
9df95bd936 | ||
![]() |
4f7b59aca4 | ||
![]() |
35e5b87188 | ||
![]() |
5b4ae61cdd | ||
![]() |
2a3f1d68f5 | ||
![]() |
672f4d1668 | ||
![]() |
2a926f6546 | ||
![]() |
452ca7a672 | ||
![]() |
df6271a0f3 | ||
![]() |
31357aa7e2 | ||
![]() |
3e7322e395 | ||
![]() |
fbe8cce1b8 | ||
![]() |
d36296c654 | ||
![]() |
dc800f0814 | ||
![]() |
59bf118a5a | ||
![]() |
0727c80347 | ||
![]() |
c0f229ec23 | ||
![]() |
cf02553ea7 | ||
![]() |
3dbea91a63 | ||
![]() |
95004aab7e | ||
![]() |
b3020d7296 | ||
![]() |
672d25e5ac | ||
![]() |
92842c8b04 | ||
![]() |
ff4e2f4192 | ||
![]() |
64a57ad3e3 | ||
![]() |
9e88e5734e | ||
![]() |
1b0cdab3e4 | ||
![]() |
b7296db406 | ||
![]() |
20958a979f | ||
![]() |
bdac876dd4 | ||
![]() |
46b75747ef | ||
![]() |
f454d17482 | ||
![]() |
8bffd28b4c | ||
![]() |
e7ef6ae8b7 | ||
![]() |
26aace6073 | ||
![]() |
e42cd9c12f | ||
![]() |
e61ac10656 | ||
![]() |
71d367af30 | ||
![]() |
fb0d3b07ea | ||
![]() |
cf0a0a0698 | ||
![]() |
44b69666da | ||
![]() |
f8a4a020e8 | ||
![]() |
53dba5f49d | ||
![]() |
30b51bb810 | ||
![]() |
bd2de16b4e | ||
![]() |
a74e5da268 | ||
![]() |
4d78878c02 | ||
![]() |
b7a0b823c6 | ||
![]() |
0173d722c6 | ||
![]() |
30e296968b | ||
![]() |
4e15be182e | ||
![]() |
4d5a759955 | ||
![]() |
e75f65080e | ||
![]() |
5d76707ede | ||
![]() |
48caffb802 | ||
![]() |
eaf5ff9de0 | ||
![]() |
86ab682e5a | ||
![]() |
db62c26eda | ||
![]() |
71f106be20 | ||
![]() |
d0b4c39acd | ||
![]() |
2a88a776bf | ||
![]() |
b77c0b78cc | ||
![]() |
f9dd67b59b | ||
![]() |
cc53da85fb | ||
![]() |
fdf579bcd5 | ||
![]() |
d73fb92ddf | ||
![]() |
fc8c92514f | ||
![]() |
7c4acb3b22 | ||
![]() |
2cd3fb8fea | ||
![]() |
ff9208f107 | ||
![]() |
afa506c0c8 | ||
![]() |
33ddce2cea | ||
![]() |
b927fc9b06 | ||
![]() |
e5c220519e | ||
![]() |
92c0c5eeab | ||
![]() |
00ec7e143c | ||
![]() |
fe51286586 | ||
![]() |
3b68399cc9 | ||
![]() |
7fd9ac0952 | ||
![]() |
5cdb7f7b05 | ||
![]() |
ebc886032f | ||
![]() |
333a51aeaa | ||
![]() |
3c8a00bc42 | ||
![]() |
1446692f02 | ||
![]() |
15c1c0fb5e | ||
![]() |
7cf7d545a6 | ||
![]() |
97e88af0c5 | ||
![]() |
28786c943a | ||
![]() |
ec84c71eae | ||
![]() |
3345727c94 | ||
![]() |
17cb317429 | ||
![]() |
159e6ea480 | ||
![]() |
266677bb88 | ||
![]() |
f27ca843e1 | ||
![]() |
1bb37e4b98 | ||
![]() |
7d412693c8 | ||
![]() |
d2985c5b2e | ||
![]() |
43425cf103 | ||
![]() |
9458549fef | ||
![]() |
3e38862837 | ||
![]() |
676a149497 | ||
![]() |
2b092f40fa | ||
![]() |
476cb7e7dc | ||
![]() |
75f12bd0eb | ||
![]() |
3a1e499d9d | ||
![]() |
de6ee8b161 | ||
![]() |
d77d9d5f31 | ||
![]() |
4d8b566a87 | ||
![]() |
3c7dcac41d | ||
![]() |
038732bd92 | ||
![]() |
f1b25fed65 | ||
![]() |
b0df0a26a1 | ||
![]() |
7ed1ac9c9b | ||
![]() |
e56d5318e4 | ||
![]() |
ccf6cc708a | ||
![]() |
016a4c3778 | ||
![]() |
d04346c870 | ||
![]() |
664e87809e | ||
![]() |
55e220f0e8 | ||
![]() |
162de0ceaf | ||
![]() |
9e4a0513c6 | ||
![]() |
08be5f732e |
@ -6,22 +6,27 @@
|
||||
"context": "..",
|
||||
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
|
||||
"dockerFile": "../Dockerfile",
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
"settings": {},
|
||||
// Add the IDs of extensions you want installed when the container is created.
|
||||
"extensions": [
|
||||
"matklad.rust-analyzer"
|
||||
],
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
// Add the IDs of extensions you want installed when the container is created.
|
||||
"extensions": ["matklad.rust-analyzer", "microsoft.Docker"],
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
"settings": {
|
||||
"rust-analyzer.cargo.noDefaultFeatures": true
|
||||
}
|
||||
}
|
||||
},
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
// Uncomment the next line to run commands after the container is created - for example installing curl.
|
||||
// "postCreateCommand": "apt-get update && apt-get install -y curl",
|
||||
// Install development components that shouldn't be in the main Dockerfile
|
||||
"postCreateCommand": "rustup component add --toolchain nightly rustfmt clippy llvm-tools-preview && cargo install --locked cargo-make",
|
||||
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
|
||||
"runArgs": [
|
||||
"--cap-add=SYS_PTRACE",
|
||||
"--security-opt",
|
||||
"seccomp=unconfined"
|
||||
],
|
||||
]
|
||||
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
|
||||
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
|
||||
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
|
||||
|
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -13,6 +13,8 @@ Thank you for making LibAFL better!
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
If you want to present the backtrace, don't forget to run with `errors_backtrace` feature and log from `RUST_LOG`
|
||||
In addition, please tell us what is your fuzzer's Cargo.toml
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
|
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
720
.github/workflows/build_and_test.yml
vendored
720
.github/workflows/build_and_test.yml
vendored
@ -2,25 +2,37 @@ name: build and test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
branches: [ main, "pr/**" ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
workflow_dispatch:
|
||||
merge_group:
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
common:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest, macOS-latest]
|
||||
os: [ ubuntu-latest, windows-latest, macOS-latest ]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Install mimetype
|
||||
if: runner.os == 'Linux'
|
||||
run: sudo apt-get install libfile-mimeinfo-perl
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly
|
||||
override: true
|
||||
- name: Install mimetype
|
||||
if: runner.os == 'Linux'
|
||||
run: sudo apt-get install libfile-mimeinfo-perl
|
||||
- name: install mdbook
|
||||
uses: baptiste0928/cargo-install@v1.3.0
|
||||
with:
|
||||
@ -31,16 +43,17 @@ jobs:
|
||||
crate: mdbook-linkcheck
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install mimetype
|
||||
with: { shared-key: "ubuntu" }
|
||||
if: runner.os == 'Linux'
|
||||
run: sudo apt-get install libfile-mimeinfo-perl
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
if: runner.os != 'Linux'
|
||||
- name: Check for binary blobs
|
||||
if: runner.os == 'Linux'
|
||||
run: ./scripts/check_for_blobs.sh
|
||||
- name: default nightly
|
||||
run: rustup default nightly
|
||||
- name: Build libafl debug
|
||||
run: cargo build -p libafl
|
||||
- name: Build the book
|
||||
run: cd docs && mdbook build
|
||||
- name: Test the book
|
||||
# TODO: fix books test fail with updated windows-rs
|
||||
if: runner.os != 'Windows'
|
||||
@ -49,244 +62,485 @@ jobs:
|
||||
run: cargo test
|
||||
- name: Test libafl no_std
|
||||
run: cd libafl && cargo test --no-default-features
|
||||
- name: Test libafl_bolts no_std no_alloc
|
||||
run: cd libafl_bolts && cargo test --no-default-features
|
||||
- name: Test libafl_targets no_std
|
||||
run: cd libafl_targets && cargo test --no-default-features
|
||||
|
||||
llvm-tester:
|
||||
runs-on: ubuntu-22.04
|
||||
continue-on-error: true
|
||||
strategy:
|
||||
matrix:
|
||||
llvm-version: [ "16", "17" ] # Add 18 when KyleMayes/install-llvm-action enables it
|
||||
steps:
|
||||
- name: Remove Dotnet & Haskell
|
||||
run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with: { shared-key: "llvm-tester" }
|
||||
- name: Install LLVM and Clang
|
||||
uses: KyleMayes/install-llvm-action@v2
|
||||
with:
|
||||
version: "${{matrix.llvm-version}}"
|
||||
- name: Build and test with llvm-${{ matrix.llvm-version }}
|
||||
run: pwd && ls & cd libafl_cc && cargo build --release
|
||||
|
||||
ubuntu-doc-build:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/ubuntu-prepare
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
# ---- doc check ----
|
||||
- name: Build Docs
|
||||
run: RUSTFLAGS="--cfg docsrs" cargo +nightly doc --all-features --no-deps
|
||||
|
||||
ubuntu-doc-test:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/ubuntu-prepare
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
# ---- doc check ----
|
||||
- name: Test Docs
|
||||
run: RUSTFLAGS="--cfg docsrs" cargo +nightly test --doc --all-features
|
||||
|
||||
ubuntu-miri:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: ubuntu
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/ubuntu-prepare
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Add nightly clippy
|
||||
run: rustup toolchain install nightly --component miri --allow-downgrade
|
||||
# --- miri undefined behavior test --
|
||||
- name: Run miri tests
|
||||
run: RUST_BACKTRACE=1 MIRIFLAGS="-Zmiri-disable-isolation" cargo +nightly miri test
|
||||
|
||||
ubuntu:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Remove Dotnet & Haskell
|
||||
run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- name: set mold linker as default linker
|
||||
uses: rui314/setup-mold@v1
|
||||
- name: Install and cache deps
|
||||
uses: awalsh128/cache-apt-pkgs-action@v1.1.0
|
||||
with:
|
||||
packages: llvm llvm-dev clang ninja-build clang-format-13 shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev
|
||||
- name: get clang version
|
||||
run: command -v llvm-config && clang -v
|
||||
- name: Install cargo-hack
|
||||
run: curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin
|
||||
- name: Add nightly rustfmt and clippy
|
||||
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Remove Dotnet & Haskell
|
||||
run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- name: Remove existing clang and LLVM
|
||||
run: sudo apt purge llvm* clang* lld* lldb* opt*
|
||||
- name: Install and cache deps
|
||||
run: sudo apt update && sudo apt install ninja-build shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev libz3-dev
|
||||
- name: Add nightly clippy
|
||||
run: rustup toolchain install nightly --component clippy --component miri --allow-downgrade
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with: { shared-key: "ubuntu" }
|
||||
- name: Install LLVM and Clang
|
||||
uses: KyleMayes/install-llvm-action@v2
|
||||
with:
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
version: 17
|
||||
# pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately
|
||||
- name: Check pcguard edges
|
||||
run: cargo check --features=sancov_pcguard_edges
|
||||
- name: run shellcheck
|
||||
run: shellcheck ./scripts/*.sh
|
||||
# ---- build normal and examples ----
|
||||
- name: Run a normal build
|
||||
run: cargo build --verbose
|
||||
- name: Build examples
|
||||
run: cargo build --examples --verbose
|
||||
|
||||
# ---- format check ----
|
||||
# pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately
|
||||
- name: Check pcguard edges
|
||||
run: cargo check --features=sancov_pcguard_edges
|
||||
- name: Format
|
||||
run: cargo fmt -- --check
|
||||
- name: Run clang-format style check for C/C++ programs.
|
||||
run: clang-format-13 -n -Werror --style=file $(find . -type f \( -name '*.cpp' -o -iname '*.hpp' -o -name '*.cc' -o -name '*.cxx' -o -name '*.cc' -o -name '*.h' \) | grep -v '/target/' | grep -v 'libpng-1\.6\.37' | grep -v 'stb_image\.h' | grep -v 'dlmalloc\.c' | grep -v 'QEMU-Nyx')
|
||||
- name: run shellcheck
|
||||
run: shellcheck ./scripts/*.sh
|
||||
- name: Run clippy
|
||||
run: ./scripts/clippy.sh
|
||||
ubuntu-clippy:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Remove Dotnet & Haskell
|
||||
run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
|
||||
# ---- doc check ----
|
||||
- name: Build Docs
|
||||
run: cargo doc
|
||||
- name: Test Docs
|
||||
run: cargo +nightly test --doc --all-features
|
||||
- name: Install and cache deps
|
||||
run: sudo apt update && sudo apt install ninja-build shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev libz3-dev
|
||||
- name: Add nightly clippy
|
||||
run: rustup toolchain install nightly --component clippy --allow-downgrade && rustup default nightly
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with: { shared-key: "ubuntu" }
|
||||
- name: Install LLVM and Clang
|
||||
uses: KyleMayes/install-llvm-action@v2
|
||||
with:
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
version: 17
|
||||
- name: Run clippy
|
||||
run: ./scripts/clippy.sh
|
||||
# --- test embedding the libafl_libfuzzer_runtime library
|
||||
# Fix me plz
|
||||
# - name: Test Build libafl_libfuzzer with embed
|
||||
# run: cargo +nightly test --features=embed-runtime --manifest-path libafl_libfuzzer/Cargo.toml
|
||||
|
||||
# ---- build and feature check ----
|
||||
- name: Run a normal build
|
||||
run: cargo build --verbose
|
||||
# cargo-hack's --feature-powerset would be nice here but libafl has a too many knobs
|
||||
- name: Check each feature
|
||||
# Skipping `python` as it has to be built with the `maturin` tool
|
||||
# `agpl`, `nautilus` require nightly
|
||||
# `sancov_pcguard_edges` is tested seperately
|
||||
run: cargo hack check --each-feature --clean-per-run --exclude-features=prelude,agpl,nautilus,python,sancov_pcguard_edges,arm,aarch64,i386,be,systemmode --no-dev-deps
|
||||
- name: Check nightly features
|
||||
run: cargo +nightly check --features=agpl && cargo +nightly check --features=nautilus
|
||||
- name: Build examples
|
||||
run: cargo build --examples --verbose
|
||||
ubuntu-check:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: ubuntu
|
||||
strategy:
|
||||
matrix:
|
||||
instance_idx: [ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/ubuntu-prepare
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with: { shared-key: "ubuntu" }
|
||||
# ---- build and feature check ----
|
||||
# cargo-hack's --feature-powerset would be nice here but libafl has a too many knobs
|
||||
- name: Check each feature
|
||||
# Skipping `python` as it has to be built with the `maturin` tool
|
||||
# `sancov_pcguard_edges` is tested seperatelyc
|
||||
run: python3 ./scripts/parallellize_cargo_check.py ${{ matrix.instance_idx }}
|
||||
|
||||
ubuntu-concolic:
|
||||
runs-on: ubuntu-latest
|
||||
needs: ubuntu
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install smoke test deps
|
||||
run: sudo ./libafl_concolic/test/smoke_test_ubuntu_deps.sh
|
||||
- name: Run smoke test
|
||||
run: ./libafl_concolic/test/smoke_test.sh
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with: { shared-key: "ubuntu" }
|
||||
- name: Install smoke test deps
|
||||
run: sudo ./libafl_concolic/test/smoke_test_ubuntu_deps.sh
|
||||
- name: Run smoke test
|
||||
run: ./libafl_concolic/test/smoke_test.sh
|
||||
|
||||
bindings:
|
||||
python-bindings:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- name: set mold linker as default linker
|
||||
uses: rui314/setup-mold@v1
|
||||
- name: Install deps
|
||||
run: sudo apt-get install -y llvm llvm-dev clang ninja-build python3-dev python3-pip python3-venv
|
||||
- name: Install maturin
|
||||
run: python3 -m pip install maturin
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Run a maturin build
|
||||
run: cd ./bindings/pylibafl && maturin build
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- name: Remove existing clang and LLVM
|
||||
run: sudo apt purge llvm* clang*
|
||||
- name: Install LLVM and Clang
|
||||
uses: KyleMayes/install-llvm-action@v2
|
||||
with:
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
version: 17
|
||||
- name: Install deps
|
||||
run: sudo apt-get install -y ninja-build python3-dev python3-pip python3-venv libz3-dev
|
||||
- name: Install maturin
|
||||
run: python3 -m pip install maturin
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Run a maturin build
|
||||
run: export LLVM_CONFIG=llvm-config-16 && cd ./bindings/pylibafl && python3 -m venv .env && . .env/bin/activate && pip install --upgrade --force-reinstall . && ./test.sh
|
||||
- name: Run python test
|
||||
run: . ./bindings/pylibafl/.env/bin/activate # && cd ./fuzzers/python_qemu/ && python3 fuzzer.py 2>&1 | grep "Bye"
|
||||
|
||||
cargo-fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly
|
||||
override: true
|
||||
components: rustfmt
|
||||
- uses: actions/checkout@v3
|
||||
- name: Remove existing clang and LLVM
|
||||
run: sudo apt purge llvm* clang*
|
||||
- name: Install LLVM and Clang
|
||||
uses: KyleMayes/install-llvm-action@v2
|
||||
with:
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
version: 17
|
||||
- name: Format Check
|
||||
run: ./scripts/fmt_all.sh check
|
||||
|
||||
fuzzers-preflight:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Fuzzer in CI Check
|
||||
run: ./scripts/check_tested_fuzzers.sh
|
||||
|
||||
fuzzers:
|
||||
needs:
|
||||
- ubuntu
|
||||
- fuzzers-preflight
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
os: [ ubuntu-latest ]
|
||||
fuzzer:
|
||||
- ./fuzzers/cargo_fuzz
|
||||
- ./fuzzers/fuzzbench_fork_qemu
|
||||
- ./fuzzers/libfuzzer_stb_image_sugar
|
||||
- ./fuzzers/nyx_libxml2_standalone
|
||||
- ./fuzzers/baby_fuzzer_gramatron
|
||||
- ./fuzzers/tinyinst_simple
|
||||
- ./fuzzers/baby_fuzzer_with_forkexecutor
|
||||
- ./fuzzers/baby_no_std
|
||||
- ./fuzzers/baby_fuzzer_swap_differential
|
||||
- ./fuzzers/baby_fuzzer_grimoire
|
||||
- ./fuzzers/baby_fuzzer
|
||||
- ./fuzzers/libfuzzer_libpng_launcher
|
||||
- ./fuzzers/libfuzzer_libpng_accounting
|
||||
- ./fuzzers/forkserver_libafl_cc
|
||||
- ./fuzzers/libfuzzer_libpng_tcp_manager
|
||||
- ./fuzzers/backtrace_baby_fuzzers
|
||||
- ./fuzzers/fuzzbench_qemu
|
||||
- ./fuzzers/nyx_libxml2_parallel
|
||||
- ./fuzzers/frida_gdiplus
|
||||
- ./fuzzers/libfuzzer_stb_image_concolic
|
||||
- ./fuzzers/nautilus_sync
|
||||
- ./fuzzers/push_harness
|
||||
- ./fuzzers/libfuzzer_libpng_centralized
|
||||
- ./fuzzers/baby_fuzzer_nautilus
|
||||
- ./fuzzers/fuzzbench_text
|
||||
- ./fuzzers/libfuzzer_libpng_cmin
|
||||
- ./fuzzers/forkserver_simple
|
||||
- ./fuzzers/baby_fuzzer_unicode
|
||||
- ./fuzzers/libfuzzer_libpng_norestart
|
||||
- ./fuzzers/baby_fuzzer_multi
|
||||
- ./fuzzers/libafl_atheris
|
||||
- ./fuzzers/frida_libpng
|
||||
- ./fuzzers/fuzzbench_ctx
|
||||
- ./fuzzers/fuzzbench_forkserver_cmplog
|
||||
- ./fuzzers/push_stage_harness
|
||||
- ./fuzzers/libfuzzer_libmozjpeg
|
||||
- ./fuzzers/libfuzzer_libpng_aflpp_ui
|
||||
- ./fuzzers/libfuzzer_libpng
|
||||
- ./fuzzers/baby_fuzzer_wasm
|
||||
- ./fuzzers/fuzzbench
|
||||
- ./fuzzers/libfuzzer_stb_image
|
||||
- ./fuzzers/fuzzbench_forkserver
|
||||
# - ./fuzzers/libfuzzer_windows_asan
|
||||
# - ./fuzzers/dynamic_analysis
|
||||
- ./fuzzers/baby_fuzzer_minimizing
|
||||
- ./fuzzers/frida_executable_libpng
|
||||
- ./fuzzers/tutorial
|
||||
- ./fuzzers/baby_fuzzer_tokens
|
||||
- ./fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor
|
||||
- ./fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor
|
||||
- ./fuzzers/backtrace_baby_fuzzers/command_executor
|
||||
- ./fuzzers/backtrace_baby_fuzzers/forkserver_executor
|
||||
- ./fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor
|
||||
- ./fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- name: set mold linker as default linker
|
||||
if: runner.os == 'Linux' # mold only support linux until now
|
||||
uses: rui314/setup-mold@v1
|
||||
- name: Add nightly rustfmt and clippy
|
||||
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
|
||||
- name: Add no_std toolchain
|
||||
run: rustup toolchain install nightly-x86_64-unknown-linux-gnu ; rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu
|
||||
- name: Install python
|
||||
if: runner.os == 'macOS'
|
||||
run: brew install --force-bottle --overwrite python@3.11
|
||||
- uses: lyricwulf/abc@v1
|
||||
with:
|
||||
# todo: remove afl++-clang when nyx support samcov_pcguard
|
||||
linux: llvm llvm-dev clang nasm ninja-build gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libgtk-3-dev afl++-clang pax-utils
|
||||
# update bash for macos to support `declare -A` command`
|
||||
macos: llvm libpng nasm coreutils z3 bash
|
||||
- name: pip install
|
||||
run: python3 -m pip install msgpack jinja2
|
||||
# Note that nproc needs to have coreutils installed on macOS, so the order of CI commands matters.
|
||||
- name: enable mult-thread for `make`
|
||||
run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)"
|
||||
- name: install cargo-make
|
||||
uses: baptiste0928/cargo-install@v1.3.0
|
||||
with:
|
||||
crate: cargo-make
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true # recursively checkout submodules
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Build and run example fuzzers (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: ./scripts/test_all_fuzzers.sh
|
||||
- name: Build and run example fuzzers (macOS)
|
||||
if: runner.os == 'macOS' # use bash v4
|
||||
run: /usr/local/bin/bash ./scripts/test_all_fuzzers.sh
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/fuzzer-tester-prepare
|
||||
- name: Build and run example fuzzers (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
shell: bash
|
||||
run: RUN_ON_CI=1 LLVM_CONFIG=llvm-config ./scripts/test_fuzzer.sh ${{ matrix.fuzzer }}
|
||||
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
outputs:
|
||||
qemu: ${{ steps.filter.outputs.qemu }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
qemu:
|
||||
- 'libafl_qemu/**'
|
||||
- 'fuzzers/*qemu*/**'
|
||||
|
||||
fuzzers-qemu:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.qemu == 'true' }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
fuzzer:
|
||||
- ./fuzzers/qemu_cmin
|
||||
- ./fuzzers/qemu_systemmode
|
||||
- ./fuzzers/qemu_coverage
|
||||
- ./fuzzers/qemu_launcher
|
||||
|
||||
runs-on: [ self-hosted, qemu ]
|
||||
container: registry.gitlab.com/qemu-project/qemu/qemu/ubuntu2204:latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/qemu-fuzzer-tester-prepare
|
||||
- name: Build and run example QEMU fuzzers (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
shell: bash
|
||||
run: RUN_ON_CI=1 LLVM_CONFIG=llvm-config ./scripts/test_fuzzer.sh ${{ matrix.fuzzer }}
|
||||
|
||||
nostd-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly
|
||||
override: true
|
||||
components: rustfmt, clippy, rust-src
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Add targets
|
||||
run: rustup target add arm-linux-androideabi && rustup target add thumbv6m-none-eabi
|
||||
- name: Build aarch64-unknown-none
|
||||
run: cd ./fuzzers/baby_no_std && cargo +nightly build -Zbuild-std=core,alloc --target aarch64-unknown-none -v --release && cd ../..
|
||||
- name: run x86_64 until panic!
|
||||
run: cd ./fuzzers/baby_no_std && cargo +nightly run || test $? -ne 0 || exit 1
|
||||
- name: no_std tests
|
||||
run: cd ./libafl && cargo test --no-default-features
|
||||
- name: libafl armv6m-none-eabi (32 bit no_std) clippy
|
||||
run: cd ./libafl && cargo clippy --target thumbv6m-none-eabi --no-default-features
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly
|
||||
override: true
|
||||
components: rust-src
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Add targets
|
||||
run: rustup target add arm-linux-androideabi && rustup target add thumbv6m-none-eabi
|
||||
- name: Build aarch64-unknown-none
|
||||
run: cd ./fuzzers/baby_no_std && cargo +nightly build -Zbuild-std=core,alloc --target aarch64-unknown-none -v --release && cd ../..
|
||||
- name: run x86_64 until panic!
|
||||
run: cd ./fuzzers/baby_no_std && cargo +nightly run || test $? -ne 0 || exit 1
|
||||
- name: no_std tests
|
||||
run: cd ./libafl && cargo test --no-default-features
|
||||
|
||||
nostd-clippy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly
|
||||
override: true
|
||||
components: clippy, rust-src
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Add targets
|
||||
run: rustup target add arm-linux-androideabi && rustup target add thumbv6m-none-eabi
|
||||
- name: libafl armv6m-none-eabi (32 bit no_std) clippy
|
||||
run: cd ./libafl && cargo clippy --target thumbv6m-none-eabi --no-default-features
|
||||
- name: Build no_std no_alloc bolts
|
||||
run: cd ./libafl_bolts && cargo +nightly build -Zbuild-std=core --target aarch64-unknown-none --no-default-features -v --release && cd ../
|
||||
|
||||
build-docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build docker
|
||||
run: docker build -t libafl .
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build docker
|
||||
run: docker build -t libafl .
|
||||
|
||||
windows:
|
||||
windows-frida-libpng:
|
||||
runs-on: windows-latest
|
||||
needs:
|
||||
- common
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Windows Build
|
||||
run: cargo build --verbose
|
||||
- name: Run clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
- name: Build docs
|
||||
run: cargo doc
|
||||
- name: Set LIBCLANG_PATH
|
||||
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
|
||||
- name: install cargo-make
|
||||
run: cargo install --force cargo-make
|
||||
- uses: ilammy/msvc-dev-cmd@v1
|
||||
- name: Build fuzzers/frida_libpng
|
||||
run: cd fuzzers/frida_libpng/ && cargo make test
|
||||
- name: Build fuzzers/frida_gdiplus
|
||||
run: cd fuzzers/frida_gdiplus/ && cargo make test
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/windows-tester-prepare
|
||||
- name: Build fuzzers/frida_libpng
|
||||
run: cd fuzzers/frida_libpng/ && cargo make test
|
||||
|
||||
windows-frida-libfuzzer-stb-image:
|
||||
runs-on: windows-latest
|
||||
needs:
|
||||
- common
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/windows-tester-prepare
|
||||
- name: Build fuzzers/libfuzzer_stb_image
|
||||
run: cd fuzzers/libfuzzer_stb_image && cargo build --release
|
||||
|
||||
windows-frida-gdiplus:
|
||||
runs-on: windows-latest
|
||||
needs:
|
||||
- common
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/windows-tester-prepare
|
||||
- name: Build fuzzers/frida_gdiplus
|
||||
run: cd fuzzers/frida_gdiplus/ && cargo make test && cargo make test_cmplog
|
||||
|
||||
windows-tinyinst-simple:
|
||||
runs-on: windows-latest
|
||||
needs:
|
||||
- common
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/windows-tester-prepare
|
||||
- name: install cxx bridge
|
||||
run: cargo install cxxbridge-cmd
|
||||
- name: Build fuzzers/tinyinst_simple
|
||||
run: cd fuzzers/tinyinst_simple/ && cargo make test
|
||||
|
||||
windows-clippy:
|
||||
runs-on: windows-latest
|
||||
needs:
|
||||
- common
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/workflows/windows-tester-prepare
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Run real clippy, not the fake one
|
||||
shell: pwsh
|
||||
run: .\scripts\clippy.ps1
|
||||
|
||||
macos:
|
||||
runs-on: macOS-latest
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- name: Add nightly rustfmt and clippy
|
||||
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
|
||||
- name: Install deps
|
||||
run: brew install z3 gtk+3
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: MacOS Build
|
||||
run: cargo build --verbose
|
||||
- name: Run clippy
|
||||
run: ./scripts/clippy.sh
|
||||
- name: Increase map sizes
|
||||
run: ./scripts/shmem_limits_macos.sh
|
||||
- name: Run Tests
|
||||
run: cargo test
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- name: Add nightly clippy
|
||||
run: rustup toolchain install nightly --component clippy --allow-downgrade && rustup default nightly
|
||||
- name: Install deps
|
||||
run: brew install z3 gtk+3
|
||||
- name: Install cxxbridge
|
||||
run: cargo install cxxbridge-cmd
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: MacOS Build
|
||||
run: cargo build --verbose
|
||||
- name: Increase map sizes
|
||||
run: ./scripts/shmem_limits_macos.sh
|
||||
- name: Run Tests
|
||||
run: cargo test
|
||||
- name: Clippy
|
||||
run: cargo +nightly clippy --tests --all --exclude libafl_nyx --exclude symcc_runtime --exclude runtime_test
|
||||
|
||||
other_targets:
|
||||
ios:
|
||||
runs-on: macOS-latest
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- uses: nttld/setup-ndk@v1
|
||||
with:
|
||||
ndk-version: r21e
|
||||
- name: install ios
|
||||
run: rustup target add aarch64-apple-ios
|
||||
- name: install android
|
||||
run: rustup target add aarch64-linux-android
|
||||
- name: install cargo ndk
|
||||
run: cargo install cargo-ndk
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Build iOS
|
||||
run: cargo build --target aarch64-apple-ios
|
||||
- name: Build Android
|
||||
run: cargo ndk -t arm64-v8a build --release
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- name: install ios
|
||||
run: rustup target add aarch64-apple-ios
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Build iOS
|
||||
run: cargo build --target aarch64-apple-ios && cd libafl_frida && cargo build --target aarch64-apple-ios && cd ..
|
||||
|
||||
android:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- uses: nttld/setup-ndk@v1
|
||||
with:
|
||||
ndk-version: r25b
|
||||
- name: install android
|
||||
run: rustup target add aarch64-linux-android
|
||||
- name: install cargo ndk
|
||||
run: cargo install cargo-ndk
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Build Android
|
||||
run: cd libafl && cargo ndk -t arm64-v8a build --release
|
||||
|
||||
#run: cargo build --target aarch64-linux-android
|
||||
# TODO: Figure out how to properly build stuff with clang
|
||||
#- name: Add clang path to $PATH env
|
||||
@ -298,34 +552,34 @@ jobs:
|
||||
# run: C:\Rust\.cargo\bin\cargo.exe test --verbose
|
||||
|
||||
freebsd:
|
||||
runs-on: macos-12
|
||||
runs-on: ubuntu-22.04
|
||||
name: Simple build in FreeBSD
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Test in FreeBSD
|
||||
id: test
|
||||
uses: vmactions/freebsd-vm@v0
|
||||
with:
|
||||
usesh: true
|
||||
sync: rsync
|
||||
copyback: false
|
||||
mem: 2048
|
||||
release: 13.1
|
||||
prepare: |
|
||||
pkg install -y curl bash sudo llvm14
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
- uses: actions/checkout@v3
|
||||
- name: Test in FreeBSD
|
||||
id: test
|
||||
uses: vmactions/freebsd-vm@v1
|
||||
with:
|
||||
usesh: true
|
||||
sync: rsync
|
||||
copyback: false
|
||||
mem: 2048
|
||||
release: 13.2
|
||||
prepare: |
|
||||
pkg install -y curl bash sudo llvm16
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
|
||||
run: |
|
||||
freebsd-version
|
||||
. "$HOME/.cargo/env"
|
||||
rustup toolchain install nightly
|
||||
export LLVM_CONFIG=/usr/local/bin/llvm-config14
|
||||
pwd
|
||||
ls -lah
|
||||
echo "local/bin"
|
||||
ls -lah /usr/local/bin/
|
||||
which llvm-config
|
||||
chmod +x ./scripts/clippy.sh
|
||||
bash ./scripts/shmem_limits_fbsd.sh
|
||||
bash ./scripts/clippy.sh
|
||||
cargo test
|
||||
run: |
|
||||
freebsd-version
|
||||
. "$HOME/.cargo/env"
|
||||
rustup toolchain install nightly
|
||||
export LLVM_CONFIG=/usr/local/bin/llvm-config16
|
||||
pwd
|
||||
ls -lah
|
||||
echo "local/bin"
|
||||
ls -lah /usr/local/bin/
|
||||
which llvm-config
|
||||
chmod +x ./scripts/clippy.sh
|
||||
bash ./scripts/shmem_limits_fbsd.sh
|
||||
bash ./scripts/clippy.sh
|
||||
cargo test
|
||||
|
65
.github/workflows/fuzzer-tester-prepare/action.yml
vendored
Normal file
65
.github/workflows/fuzzer-tester-prepare/action.yml
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
name: Setup Rust Environment
|
||||
description: Sets up the Rust environment for the CI workflow
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with: { shared-key: "${{ runner.os }}-shared-fuzzer-cache" }
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- name: Add stable clippy
|
||||
shell: bash
|
||||
run: rustup toolchain install stable --component clippy --allow-downgrade
|
||||
- name: Add nightly clippy
|
||||
shell: bash
|
||||
run: rustup toolchain install nightly --component clippy --allow-downgrade
|
||||
- name: Add no_std toolchain
|
||||
shell: bash
|
||||
run: rustup toolchain install nightly-x86_64-unknown-linux-gnu ; rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu
|
||||
- name: Add wasm target
|
||||
shell: bash
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
- name: Remove obsolete llvm (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
shell: bash
|
||||
run: sudo apt purge -y llvm* clang*
|
||||
- name: Install LLVM and Clang
|
||||
uses: KyleMayes/install-llvm-action@v2
|
||||
with:
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
version: 17
|
||||
- name: Install deps
|
||||
shell: bash
|
||||
run: sudo apt update && sudo apt install -y nasm nlohmann-json3-dev ninja-build gcc-arm-linux-gnueabi g++-arm-linux-gnueabi gcc-aarch64-linux-gnu g++-aarch64-linux-gnu gcc-mipsel-linux-gnu g++-mipsel-linux-gnu gcc-powerpc-linux-gnu g++-powerpc-linux-gnu libc6-dev-i386-cross libc6-dev libc6-dev-i386 lib32gcc-11-dev lib32stdc++-11-dev libgtk-3-dev pax-utils libz3-dev
|
||||
- name: pip install
|
||||
shell: bash
|
||||
run: python3 -m pip install msgpack jinja2 find_libpython
|
||||
- name: enable mult-thread for `make`
|
||||
shell: bash
|
||||
run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)"
|
||||
- name: install cargo-make
|
||||
uses: baptiste0928/cargo-install@v1.3.0
|
||||
with:
|
||||
crate: cargo-make
|
||||
- name: install wasm-pack
|
||||
uses: baptiste0928/cargo-install@v1.3.0
|
||||
with:
|
||||
crate: wasm-pack
|
||||
- name: install cxxbridge-cmd
|
||||
uses: baptiste0928/cargo-install@v1.3.0
|
||||
with:
|
||||
crate: cxxbridge-cmd
|
||||
- name: install chrome
|
||||
uses: browser-actions/setup-chrome@v1
|
||||
with:
|
||||
chrome-version: stable
|
||||
- name: Symlink Headers
|
||||
if: runner.os == 'Linux'
|
||||
shell: bash
|
||||
run: sudo ln -s /usr/include/asm-generic /usr/include/asm
|
47
.github/workflows/qemu-fuzzer-tester-prepare/action.yml
vendored
Normal file
47
.github/workflows/qemu-fuzzer-tester-prepare/action.yml
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
name: Setup QEMU Fuzzers environment
|
||||
description: Sets up the QEMU fuzzers environment
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
- name: Install deps
|
||||
shell: bash
|
||||
run: apt update && apt install -y nasm ninja-build libc6-dev libgtk-3-dev pax-utils libz3-dev wget qemu-utils libsqlite3-dev gcc-arm-none-eabi sudo gcc g++ build-essential gcc-arm-linux-gnueabi g++-arm-linux-gnueabi
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with: { shared-key: "${{ runner.os }}-shared-fuzzer-cache" }
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- name: Add stable clippy
|
||||
shell: bash
|
||||
run: rustup toolchain install stable --component clippy --allow-downgrade
|
||||
- name: Add nightly clippy
|
||||
shell: bash
|
||||
run: rustup toolchain install nightly --component clippy --allow-downgrade
|
||||
- name: Remove obsolete llvm (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
shell: bash
|
||||
run: sudo apt purge -y llvm* clang*
|
||||
- name: Install LLVM and Clang
|
||||
uses: KyleMayes/install-llvm-action@v2
|
||||
with:
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
version: 17
|
||||
- name: pip install
|
||||
shell: bash
|
||||
run: python3 -m pip install msgpack jinja2 find_libpython
|
||||
- name: enable mult-thread for `make`
|
||||
shell: bash
|
||||
run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)"
|
||||
- name: install cargo-make
|
||||
uses: baptiste0928/cargo-install@v1.3.0
|
||||
with:
|
||||
crate: cargo-make
|
||||
- name: Symlink Headers
|
||||
if: runner.os == 'Linux'
|
||||
shell: bash
|
||||
run: sudo ln -s /usr/include/asm-generic /usr/include/asm
|
27
.github/workflows/ubuntu-prepare/action.yml
vendored
Normal file
27
.github/workflows/ubuntu-prepare/action.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
name: Setup Rust Environment
|
||||
description: Sets up the Rust environment for the CI workflow
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
components: llvm-tools
|
||||
- name: Remove existing clang and LLVM
|
||||
shell: bash
|
||||
run: sudo apt purge llvm* clang*
|
||||
- name: Install and cache deps
|
||||
shell: bash
|
||||
run: sudo apt update && sudo apt install ninja-build clang-format shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev libz3-dev
|
||||
- name: Install cargo-hack
|
||||
shell: bash
|
||||
run: curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin
|
||||
- name: Add nightly
|
||||
shell: bash
|
||||
run: rustup toolchain install nightly --allow-downgrade
|
||||
- name: Install LLVM and Clang
|
||||
uses: KyleMayes/install-llvm-action@v2
|
||||
with:
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
version: 17
|
21
.github/workflows/windows-tester-prepare/action.yml
vendored
Normal file
21
.github/workflows/windows-tester-prepare/action.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
name: Setup Rust Environment
|
||||
description: Sets up the Rust environment for the CI workflow
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
- uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Build docs
|
||||
shell: pwsh
|
||||
run: cargo doc
|
||||
- uses: ilammy/msvc-dev-cmd@v1
|
||||
- name: Set LIBCLANG_PATH
|
||||
shell: pwsh
|
||||
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
|
||||
- name: install cargo-make
|
||||
shell: pwsh
|
||||
run: cargo install --force cargo-make
|
21
.gitignore
vendored
21
.gitignore
vendored
@ -21,6 +21,8 @@ vendor
|
||||
*.obj
|
||||
|
||||
.cur_input
|
||||
.cur_input_*
|
||||
cur_input
|
||||
.venv
|
||||
|
||||
crashes
|
||||
@ -32,6 +34,8 @@ perf.data.old
|
||||
.vscode
|
||||
test.dict
|
||||
|
||||
.idea/
|
||||
|
||||
# Ignore all built fuzzers
|
||||
fuzzer_*
|
||||
AFLplusplus
|
||||
@ -46,6 +50,7 @@ a
|
||||
forkserver_test
|
||||
__pycache__
|
||||
*.lafl_lock
|
||||
*.metadata
|
||||
|
||||
*atomic_file_testfile*
|
||||
**/libxml2
|
||||
@ -54,3 +59,19 @@ __pycache__
|
||||
|
||||
libafl_nyx/QEMU-Nyx
|
||||
libafl_nyx/packer
|
||||
|
||||
.z3-trace
|
||||
|
||||
# No gdb history
|
||||
.gdb_history
|
||||
# No llvm IR
|
||||
*.ll
|
||||
|
||||
*.tar.gz
|
||||
|
||||
# common harness names
|
||||
harness
|
||||
program
|
||||
fuzzer
|
||||
fuzzer_libpng*
|
||||
forkserver_simple
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +0,0 @@
|
||||
[submodule "libafl_concolic/symcc_runtime/symcc"]
|
||||
path = libafl_concolic/symcc_runtime/symcc
|
||||
url = https://github.com/AFLplusplus/symcc.git
|
9
.pre-commit-config.yaml
Normal file
9
.pre-commit-config.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: fmt
|
||||
name: fmt
|
||||
entry: scripts/fmt_all.sh check
|
||||
language: script
|
18
CONTRIBUTING.md
Normal file
18
CONTRIBUTING.md
Normal file
@ -0,0 +1,18 @@
|
||||
# How to Contribute to LibAFL
|
||||
|
||||
For bugs, feel free to open issues or contact us directly. Thank you for your support. <3
|
||||
|
||||
## Pull Request guideline
|
||||
|
||||
Even though we will gladly assist you in finishing up your PR, try to
|
||||
- keep all the crates compiling with *stable* rust (hide the eventual non-stable code under [`cfg`s](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/build.rs#L26))
|
||||
- run `cargo +nightly fmt` on your code before pushing
|
||||
- check the output of `cargo clippy --all` or `./clippy.sh`
|
||||
- run `cargo build --no-default-features` to check for `no_std` compatibility (and possibly add `#[cfg(feature = "std")]`) to hide parts of your code.
|
||||
|
||||
Some of the parts in this list may be hard, don't be afraid to open a PR if you cannot fix them by yourself, so we can help.
|
||||
|
||||
### Pre-commit hooks
|
||||
|
||||
Some of these checks can be performed automatically during commit using [pre-commit](https://pre-commit.com/).
|
||||
Once the package is installed, simply run `pre-commit install` to enable the hooks, the checks will run automatically before the commit becomes effective.
|
35
Cargo.toml
35
Cargo.toml
@ -1,42 +1,49 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"libafl",
|
||||
"libafl_derive",
|
||||
"libafl_bolts",
|
||||
"libafl_cc",
|
||||
"libafl_targets",
|
||||
"libafl_frida",
|
||||
"libafl_qemu",
|
||||
"libafl_tinyinst",
|
||||
"libafl_sugar",
|
||||
"libafl_nyx",
|
||||
"libafl_concolic/symcc_runtime",
|
||||
"libafl_concolic/symcc_libafl",
|
||||
"libafl_concolic/test/dump_constraints",
|
||||
"libafl_concolic/test/runtime_test",
|
||||
"libafl_derive",
|
||||
"libafl_frida",
|
||||
"libafl_libfuzzer",
|
||||
"libafl_nyx",
|
||||
"libafl_qemu",
|
||||
"libafl_sugar",
|
||||
"libafl_targets",
|
||||
"libafl_tinyinst",
|
||||
"utils/build_and_test_fuzzers",
|
||||
"utils/deexit",
|
||||
"utils/gramatron/construct_automata",
|
||||
"utils/libafl_benches",
|
||||
"utils/gramatron/construct_automata",
|
||||
]
|
||||
default-members = [
|
||||
"libafl",
|
||||
"libafl_derive",
|
||||
"libafl_bolts",
|
||||
"libafl_cc",
|
||||
"libafl_derive",
|
||||
"libafl_targets",
|
||||
]
|
||||
exclude = [
|
||||
"fuzzers",
|
||||
"bindings",
|
||||
"scripts",
|
||||
"fuzzers",
|
||||
"libafl_qemu/libafl_qemu_build",
|
||||
"libafl_qemu/libafl_qemu_sys"
|
||||
"libafl_qemu/libafl_qemu_sys",
|
||||
"utils/noaslr",
|
||||
"utils/gdb_qemu",
|
||||
"utils/libafl_fmt",
|
||||
"scripts",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.8.2"
|
||||
version = "0.13.0"
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 3
|
||||
debug = true
|
||||
|
||||
|
46
Dockerfile
46
Dockerfile
@ -1,10 +1,10 @@
|
||||
# syntax=docker/dockerfile:1.2
|
||||
FROM rust:bullseye AS libafl
|
||||
FROM rust:1.76.0 AS libafl
|
||||
LABEL "maintainer"="afl++ team <afl@aflplus.plus>"
|
||||
LABEL "about"="LibAFL Docker image"
|
||||
|
||||
# install sccache to cache subsequent builds of dependencies
|
||||
RUN cargo install sccache
|
||||
RUN cargo install --locked sccache
|
||||
|
||||
ENV HOME=/root
|
||||
ENV SCCACHE_CACHE_SIZE="1G"
|
||||
@ -16,10 +16,18 @@ RUN sh -c 'echo set encoding=utf-8 > /root/.vimrc' \
|
||||
mkdir ~/.cargo && \
|
||||
echo "[build]\nrustc-wrapper = \"${RUSTC_WRAPPER}\"" >> ~/.cargo/config
|
||||
|
||||
RUN rustup default nightly
|
||||
RUN rustup component add rustfmt clippy
|
||||
|
||||
# Install clang 11, common build tools
|
||||
RUN apt update && apt install -y build-essential gdb git wget clang clang-tools libc++-11-dev libc++abi-11-dev llvm
|
||||
# Install clang 18, common build tools
|
||||
ENV LLVM_VERSION=18
|
||||
RUN apt update && apt install -y build-essential gdb git wget python3-venv ninja-build lsb-release software-properties-common gnupg cmake
|
||||
# Workaround until https://github.com/llvm/llvm-project/issues/62475 is resolved
|
||||
RUN set -ex &&\
|
||||
echo "deb http://apt.llvm.org/bookworm/ llvm-toolchain-bookworm-${LLVM_VERSION} main" > /etc/apt/sources.list.d/apt.llvm.org.list &&\
|
||||
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc &&\
|
||||
apt update &&\
|
||||
apt-get install -y clang-${LLVM_VERSION} lldb-${LLVM_VERSION} lld-${LLVM_VERSION} clangd-${LLVM_VERSION} clang-tidy-${LLVM_VERSION} clang-format-${LLVM_VERSION} clang-tools-${LLVM_VERSION} llvm-${LLVM_VERSION}-dev lld-${LLVM_VERSION} lldb-${LLVM_VERSION} llvm-${LLVM_VERSION}-tools libomp-${LLVM_VERSION}-dev libc++-${LLVM_VERSION}-dev libc++abi-${LLVM_VERSION}-dev libclang-common-${LLVM_VERSION}-dev libclang-${LLVM_VERSION}-dev libclang-cpp${LLVM_VERSION}-dev libunwind-${LLVM_VERSION}-dev libclang-rt-${LLVM_VERSION}-dev libpolly-${LLVM_VERSION}-dev
|
||||
|
||||
# Copy a dummy.rs and Cargo.toml first, so that dependencies are cached
|
||||
WORKDIR /libafl
|
||||
@ -28,21 +36,24 @@ COPY Cargo.toml README.md ./
|
||||
COPY libafl_derive/Cargo.toml libafl_derive/Cargo.toml
|
||||
COPY scripts/dummy.rs libafl_derive/src/lib.rs
|
||||
|
||||
COPY libafl/Cargo.toml libafl/build.rs libafl/
|
||||
COPY libafl/examples libafl/examples
|
||||
COPY libafl/Cargo.toml libafl/build.rs libafl/README.md libafl/
|
||||
COPY scripts/dummy.rs libafl/src/lib.rs
|
||||
|
||||
COPY libafl_bolts/Cargo.toml libafl_bolts/build.rs libafl_bolts/README.md libafl_bolts/
|
||||
COPY libafl_bolts/examples libafl_bolts/examples
|
||||
COPY scripts/dummy.rs libafl_bolts/src/lib.rs
|
||||
|
||||
COPY libafl_frida/Cargo.toml libafl_frida/build.rs libafl_frida/
|
||||
COPY scripts/dummy.rs libafl_frida/src/lib.rs
|
||||
COPY libafl_frida/src/gettls.c libafl_frida/src/gettls.c
|
||||
|
||||
COPY libafl_qemu/Cargo.toml libafl_qemu/build.rs libafl_qemu/
|
||||
COPY libafl_qemu/Cargo.toml libafl_qemu/build.rs libafl_qemu/build_linux.rs libafl_qemu/
|
||||
COPY scripts/dummy.rs libafl_qemu/src/lib.rs
|
||||
|
||||
COPY libafl_qemu/libafl_qemu_build/Cargo.toml libafl_qemu/libafl_qemu_build/
|
||||
COPY scripts/dummy.rs libafl_qemu/libafl_qemu_build/src/lib.rs
|
||||
|
||||
COPY libafl_qemu/libafl_qemu_sys/Cargo.toml libafl_qemu/libafl_qemu_sys/build.rs libafl_qemu/libafl_qemu_sys/
|
||||
COPY libafl_qemu/libafl_qemu_sys/Cargo.toml libafl_qemu/libafl_qemu_sys/build.rs libafl_qemu/libafl_qemu_sys/build_linux.rs libafl_qemu/libafl_qemu_sys/
|
||||
COPY scripts/dummy.rs libafl_qemu/libafl_qemu_sys/src/lib.rs
|
||||
|
||||
COPY libafl_sugar/Cargo.toml libafl_sugar/
|
||||
@ -69,12 +80,16 @@ COPY scripts/dummy.rs libafl_concolic/symcc_runtime/src/lib.rs
|
||||
COPY libafl_concolic/symcc_libafl/Cargo.toml libafl_concolic/symcc_libafl/
|
||||
COPY scripts/dummy.rs libafl_concolic/symcc_libafl/src/lib.rs
|
||||
|
||||
COPY libafl_nyx/Cargo.toml libafl_nyx/build.rs libafl_nyx/
|
||||
COPY libafl_nyx/Cargo.toml libafl_nyx/build.rs libafl_nyx/build_nyx_support.sh libafl_nyx/
|
||||
COPY scripts/dummy.rs libafl_nyx/src/lib.rs
|
||||
|
||||
COPY libafl_tinyinst/Cargo.toml libafl_tinyinst/
|
||||
COPY scripts/dummy.rs libafl_tinyinst/src/lib.rs
|
||||
|
||||
# avoid pulling in the runtime, as this is quite an expensive build, until later
|
||||
COPY libafl_libfuzzer/Cargo.toml libafl_libfuzzer/
|
||||
COPY scripts/dummy.rs libafl_libfuzzer/src/lib.rs
|
||||
|
||||
COPY utils utils
|
||||
|
||||
RUN cargo build && cargo build --release
|
||||
@ -95,6 +110,8 @@ COPY libafl_cc/src libafl_cc/src
|
||||
RUN touch libafl_cc/src/lib.rs
|
||||
COPY libafl_derive/src libafl_derive/src
|
||||
RUN touch libafl_derive/src/lib.rs
|
||||
COPY libafl_bolts/src libafl_bolts/src
|
||||
RUN touch libafl_bolts/src/lib.rs
|
||||
COPY libafl/src libafl/src
|
||||
RUN touch libafl/src/lib.rs
|
||||
COPY libafl_targets/src libafl_targets/src
|
||||
@ -104,6 +121,8 @@ RUN touch libafl_qemu/libafl_qemu_build/src/lib.rs
|
||||
COPY libafl_qemu/libafl_qemu_build/src libafl_qemu/libafl_qemu_build/src
|
||||
RUN touch libafl_qemu/libafl_qemu_sys/src/lib.rs
|
||||
COPY libafl_qemu/libafl_qemu_sys/src libafl_qemu/libafl_qemu_sys/src
|
||||
COPY libafl_qemu/runtime libafl_qemu/runtime
|
||||
COPY libafl_qemu/libqasan libafl_qemu/libqasan
|
||||
RUN touch libafl_qemu/src/lib.rs
|
||||
COPY libafl_qemu/src libafl_qemu/src
|
||||
RUN touch libafl_frida/src/lib.rs
|
||||
@ -112,11 +131,16 @@ COPY libafl_concolic/symcc_runtime libafl_concolic/symcc_runtime
|
||||
COPY libafl_concolic/test libafl_concolic/test
|
||||
COPY libafl_nyx/src libafl_nyx/src
|
||||
RUN touch libafl_nyx/src/lib.rs
|
||||
COPY libafl_libfuzzer/src libafl_libfuzzer/src
|
||||
COPY libafl_libfuzzer/libafl_libfuzzer_runtime libafl_libfuzzer/libafl_libfuzzer_runtime
|
||||
COPY libafl_libfuzzer/build.rs libafl_libfuzzer/build.rs
|
||||
RUN touch libafl_libfuzzer/src/lib.rs
|
||||
RUN cargo build && cargo build --release
|
||||
|
||||
# Copy fuzzers over
|
||||
COPY fuzzers fuzzers
|
||||
|
||||
# RUN ./scripts/test_all_fuzzers.sh --no-fmt
|
||||
# RUN ./scripts/test_fuzzer.sh --no-fmt
|
||||
|
||||
ENTRYPOINT [ "/bin/bash" ]
|
||||
ENTRYPOINT [ "/bin/bash", "-c" ]
|
||||
CMD ["/bin/bash"]
|
||||
|
46
README.md
46
README.md
@ -1,6 +1,6 @@
|
||||
# LibAFL, the fuzzer library.
|
||||
|
||||
<img align="right" src="https://github.com/AFLplusplus/Website/raw/master/static/logo_256x256.png" alt="AFL++ Logo">
|
||||
<img align="right" src="https://raw.githubusercontent.com/AFLplusplus/Website/main/static/libafl_logo.svg" alt="LibAFL logo" width="250" heigh="250">
|
||||
|
||||
Advanced Fuzzing Library - Slot your own fuzzers together and extend their features using Rust.
|
||||
|
||||
@ -10,6 +10,7 @@ LibAFL is written and maintained by
|
||||
* [Dominik Maier](https://twitter.com/domenuk) <dominik@aflplus.plus>
|
||||
* [s1341](https://twitter.com/srubenst1341) <github@shmarya.net>
|
||||
* [Dongjia Zhang](https://github.com/tokatoka) <toka@aflplus.plus>
|
||||
* [Addison Crump](https://github.com/addisoncrump) <me@addisoncrump.info>
|
||||
|
||||
## Why LibAFL?
|
||||
|
||||
@ -33,7 +34,7 @@ LibAFL offers integrations with popular instrumentation frameworks. At the momen
|
||||
|
||||
+ SanitizerCoverage, in [libafl_targets](./libafl_targets)
|
||||
+ Frida, in [libafl_frida](./libafl_frida)
|
||||
+ QEMU user-mode, in [libafl_qemu](./libafl_qemu)
|
||||
+ QEMU user-mode and system mode, including hooks for emulation, in [libafl_qemu](./libafl_qemu)
|
||||
+ TinyInst, in [libafl_tinyinst](./libafl_tinyinst) by [elbiazo](https://github.com/elbiazo)
|
||||
|
||||
## Getting started
|
||||
@ -44,36 +45,39 @@ We highly recommend *not* to use e.g. your Linux distribition package as this is
|
||||
Rust directly, instructions can be found [here](https://www.rust-lang.org/tools/install).
|
||||
|
||||
- LLVM tools
|
||||
The LLVM tools are needed (newer than LLVM 11.0.0 but older than LLVM 15.0.0)
|
||||
The LLVM tools (including clang, clang++) are needed (newer than LLVM 15.0.0 up to LLVM 18.1.3)
|
||||
If you are using Debian/Ubuntu, again, we highly recommmend that you install the package from [here](https://apt.llvm.org/)
|
||||
|
||||
(In `libafl_concolic`, we only support LLVM version newer than 18)
|
||||
|
||||
- Cargo-make
|
||||
We use cargo-make to build the fuzzers in `fuzzers/` directory. You can install it with
|
||||
|
||||
```
|
||||
```sh
|
||||
cargo install cargo-make
|
||||
```
|
||||
|
||||
2. Clone the LibAFL repository with
|
||||
|
||||
```
|
||||
```sh
|
||||
git clone https://github.com/AFLplusplus/LibAFL
|
||||
```
|
||||
|
||||
3. Build the library using
|
||||
|
||||
```
|
||||
```sh
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
4. Build the API documentation with
|
||||
|
||||
```
|
||||
```sh
|
||||
cargo doc
|
||||
```
|
||||
|
||||
5. Browse the LibAFL book (WIP!) with (requires [mdbook](https://github.com/rust-lang/mdBook))
|
||||
5. Browse the LibAFL book (WIP!) with (requires [mdbook](https://rust-lang.github.io/mdBook/index.html))
|
||||
|
||||
```
|
||||
```sh
|
||||
cd docs && mdbook serve
|
||||
```
|
||||
|
||||
@ -81,9 +85,11 @@ We collect all example fuzzers in [`./fuzzers`](./fuzzers/).
|
||||
Be sure to read their documentation (and source), this is *the natural way to get started!*
|
||||
|
||||
You can run each example fuzzer with
|
||||
```
|
||||
|
||||
```sh
|
||||
cargo make run
|
||||
```
|
||||
|
||||
as long as the fuzzer directory has `Makefile.toml` file.
|
||||
|
||||
The best-tested fuzzer is [`./fuzzers/libfuzzer_libpng`](./fuzzers/libfuzzer_libpng), a multicore libfuzzer-like fuzzer using LibAFL for a libpng harness.
|
||||
@ -106,17 +112,11 @@ The best-tested fuzzer is [`./fuzzers/libfuzzer_libpng`](./fuzzers/libfuzzer_lib
|
||||
|
||||
+ Blogpost on binary-only fuzzing lib libaf_qemu, [Hacking TMNF - Fuzzing the game server](https://blog.bricked.tech/posts/tmnf/part1/), by [RickdeJager](https://github.com/RickdeJager).
|
||||
|
||||
+ [A LibAFL Introductory Workshop](https://www.atredis.com/blog/2023/12/4/a-libafl-introductory-workshop), by [Jordan Whitehead](https://github.com/jordan9001)
|
||||
|
||||
## Contributing
|
||||
|
||||
For bugs, feel free to open issues or contact us directly. Thank you for your support. <3
|
||||
|
||||
Even though we will gladly assist you in finishing up your PR, try to
|
||||
- keep all the crates compiling with *stable* rust (hide the eventual non-stable code under [`cfg`s](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/build.rs#L26))
|
||||
- run `cargo fmt` on your code before pushing
|
||||
- check the output of `cargo clippy --all` or `./clippy.sh`
|
||||
- run `cargo build --no-default-features` to check for `no_std` compatibility (and possibly add `#[cfg(feature = "std")]`) to hide parts of your code.
|
||||
|
||||
Some of the parts in this list may be hard, don't be afraid to open a PR if you cannot fix them by yourself, so we can help.
|
||||
Please check out [CONTRIBUTING.md](CONTRIBUTING.md) for the contributing guideline.
|
||||
|
||||
## Cite
|
||||
|
||||
@ -149,11 +149,3 @@ Unless you explicitly state otherwise, any contribution intentionally submitted
|
||||
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
|
||||
be dual licensed as above, without any additional terms or conditions.
|
||||
</sub>
|
||||
|
||||
<br>
|
||||
|
||||
<sub>
|
||||
Dependencies under more restrictive licenses, such as GPL or AGPL, can be enabled
|
||||
using the respective feature in each crate when it is present, such as the
|
||||
'agpl' feature of the libafl crate.
|
||||
</sub>
|
||||
|
17
TROPHIES.md
Normal file
17
TROPHIES.md
Normal file
@ -0,0 +1,17 @@
|
||||
# Bugs found by `libafl` and `libafl_libfuzzer`
|
||||
|
||||
* pdf-rs
|
||||
* <https://github.com/pdf-rs/pdf/issues/183>
|
||||
* <https://github.com/pdf-rs/pdf/issues/184>
|
||||
* <https://github.com/pdf-rs/pdf/issues/185>
|
||||
* <https://github.com/pdf-rs/pdf/issues/186>
|
||||
* <https://github.com/pdf-rs/pdf/issues/187>
|
||||
* <https://github.com/pdf-rs/pdf/issues/189>
|
||||
* nu-shell
|
||||
* https://github.com/nushell/nushell/issues/10365
|
||||
* https://github.com/nushell/nushell/issues/9417
|
||||
* exrs
|
||||
* https://github.com/johannesvollmer/exrs/pull/221
|
||||
* pcre2
|
||||
* https://github.com/PCRE2Project/pcre2/issues/275
|
||||
|
1
bindings/pylibafl/.gitignore
vendored
Normal file
1
bindings/pylibafl/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
dist/
|
@ -1,13 +1,16 @@
|
||||
[package]
|
||||
name = "pylibafl"
|
||||
version = "0.8.2"
|
||||
version = "0.13.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
pyo3 = { version = "0.17", features = ["extension-module"] }
|
||||
libafl_qemu = { path = "../../libafl_qemu", version = "0.8.2", features = ["python"] }
|
||||
libafl_sugar = { path = "../../libafl_sugar", version = "0.8.2", features = ["python"] }
|
||||
libafl = { path = "../../libafl", version = "0.8.2", features = ["python"] }
|
||||
pyo3 = { version = "0.18.3", features = ["extension-module"] }
|
||||
pyo3-log = "0.8.1"
|
||||
libafl_sugar = { path = "../../libafl_sugar", version = "0.13.0", features = ["python"] }
|
||||
libafl_bolts = { path = "../../libafl_bolts", version = "0.13.0", features = ["python"] }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
libafl_qemu = { path = "../../libafl_qemu", version = "0.13.0", features = ["python"] }
|
||||
|
||||
[build-dependencies]
|
||||
pyo3-build-config = { version = "0.17" }
|
||||
|
26
bindings/pylibafl/pyproject.toml
Normal file
26
bindings/pylibafl/pyproject.toml
Normal file
@ -0,0 +1,26 @@
|
||||
[build-system]
|
||||
requires = ["maturin[patchelf]>=0.14.10,<0.15"]
|
||||
build-backend = "maturin"
|
||||
|
||||
[project]
|
||||
name = "PyLibAFL"
|
||||
version = "0.10.1"
|
||||
description = "Advanced Fuzzing Library for Python"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.8"
|
||||
license = {text = "Apache-2.0"}
|
||||
classifiers = [
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Rust",
|
||||
"Topic :: Security",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
repository = "https://github.com/AFLplusplus/LibAFL.git"
|
||||
|
||||
[tool.maturin]
|
||||
bindings = "pylibafl"
|
||||
manifest-path = "Cargo.toml"
|
||||
python-source = "python"
|
||||
all-features = true
|
@ -1,121 +1,33 @@
|
||||
use libafl;
|
||||
#[cfg(target_os = "linux")]
|
||||
use libafl_qemu;
|
||||
use libafl_sugar;
|
||||
use pyo3::{prelude::*, types::PyDict};
|
||||
|
||||
const LIBAFL_CODE: &str = r#"
|
||||
class BaseObserver:
|
||||
def flush(self):
|
||||
pass
|
||||
def pre_exec(self, state, input):
|
||||
pass
|
||||
def post_exec(self, state, input, exit_kind):
|
||||
pass
|
||||
def pre_exec_child(self, state, input):
|
||||
pass
|
||||
def post_exec_child(self, state, input, exit_kind):
|
||||
pass
|
||||
def name(self):
|
||||
return type(self).__name__
|
||||
def as_observer(self):
|
||||
return Observer.new_py(self)
|
||||
|
||||
class BaseFeedback:
|
||||
def init_state(self, state):
|
||||
pass
|
||||
def is_interesting(self, state, mgr, input, observers, exit_kind) -> bool:
|
||||
return False
|
||||
def append_metadata(self, state, testcase):
|
||||
pass
|
||||
def discard_metadata(self, state, input):
|
||||
pass
|
||||
def name(self):
|
||||
return type(self).__name__
|
||||
def as_feedback(self):
|
||||
return Feedback.new_py(self)
|
||||
|
||||
class BaseExecutor:
|
||||
def observers(self) -> ObserversTuple:
|
||||
raise NotImplementedError('Implement this yourself')
|
||||
def run_target(self, fuzzer, state, mgr, input) -> ExitKind:
|
||||
raise NotImplementedError('Implement this yourself')
|
||||
def as_executor(self):
|
||||
return Executor.new_py(self)
|
||||
|
||||
class BaseStage:
|
||||
def perform(self, fuzzer, executor, state, manager, corpus_idx):
|
||||
pass
|
||||
def as_stage(self):
|
||||
return Stage.new_py(self)
|
||||
|
||||
class BaseMutator:
|
||||
def mutate(self, state, input, stage_idx):
|
||||
pass
|
||||
def post_exec(self, state, stage_idx, corpus_idx):
|
||||
pass
|
||||
def as_mutator(self):
|
||||
return Mutator.new_py(self)
|
||||
|
||||
class FnStage(BaseStage):
|
||||
def __init__(self, fn):
|
||||
self.fn = fn
|
||||
def __call__(self, fuzzer, executor, state, manager, corpus_idx):
|
||||
self.fn(fuzzer, executor, state, manager, corpus_idx)
|
||||
def perform(self, fuzzer, executor, state, manager, corpus_idx):
|
||||
self.fn(fuzzer, executor, state, manager, corpus_idx)
|
||||
|
||||
def feedback_not(a):
|
||||
return NotFeedback(a).as_feedback()
|
||||
|
||||
def feedback_and(a, b):
|
||||
return EagerAndFeedback(a, b).as_feedback()
|
||||
|
||||
def feedback_and_fast(a, b):
|
||||
return FastAndFeedback(a, b).as_feedback()
|
||||
|
||||
def feedback_or(a, b):
|
||||
return EagerOrFeedback(a, b).as_feedback()
|
||||
|
||||
def feedback_or_fast(a, b):
|
||||
return FastOrFeedback(a, b).as_feedback()
|
||||
"#;
|
||||
use pyo3::prelude::*;
|
||||
|
||||
/// Setup python modules for `libafl_qemu` and `libafl_sugar`.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns error if python libafl setup failed.
|
||||
#[pymodule]
|
||||
#[pyo3(name = "pylibafl")]
|
||||
pub fn python_module(py: Python, m: &PyModule) -> PyResult<()> {
|
||||
pyo3_log::init();
|
||||
|
||||
let modules = py.import("sys")?.getattr("modules")?;
|
||||
|
||||
let sugar_module = PyModule::new(py, "sugar")?;
|
||||
libafl_sugar::python_module(py, sugar_module)?;
|
||||
m.add_submodule(sugar_module)?;
|
||||
|
||||
modules.set_item("pylibafl.sugar", sugar_module)?;
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
let qemu_module = PyModule::new(py, "qemu")?;
|
||||
#[cfg(target_os = "linux")]
|
||||
libafl_qemu::python_module(py, qemu_module)?;
|
||||
#[cfg(target_os = "linux")]
|
||||
m.add_submodule(qemu_module)?;
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
modules.set_item("pylibafl.qemu", qemu_module)?;
|
||||
|
||||
let libafl_module = PyModule::new(py, "libafl")?;
|
||||
libafl::pybind::python_module(py, libafl_module)?;
|
||||
|
||||
libafl_module.add("__builtins__", py.import("builtins")?)?;
|
||||
|
||||
let locals = PyDict::new(py);
|
||||
py.run(LIBAFL_CODE, Some(libafl_module.dict()), Some(locals))?;
|
||||
for (key, val) in locals.iter() {
|
||||
libafl_module.add(key.extract::<&str>()?, val)?;
|
||||
{
|
||||
let qemu_module = PyModule::new(py, "qemu")?;
|
||||
libafl_qemu::python_module(py, qemu_module)?;
|
||||
m.add_submodule(qemu_module)?;
|
||||
modules.set_item("pylibafl.qemu", qemu_module)?;
|
||||
}
|
||||
|
||||
m.add_submodule(libafl_module)?;
|
||||
|
||||
modules.set_item("pylibafl.libafl", libafl_module)?;
|
||||
let bolts_module = PyModule::new(py, "libafl_bolts")?;
|
||||
libafl_bolts::pybind::python_module(py, bolts_module)?;
|
||||
m.add_submodule(bolts_module)?;
|
||||
modules.set_item("pylibafl.libafl_bolts", bolts_module)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,94 +1,7 @@
|
||||
from pylibafl.libafl import *
|
||||
import pylibafl.sugar as sugar
|
||||
import ctypes
|
||||
import platform
|
||||
|
||||
|
||||
class FooObserver(BaseObserver):
|
||||
def __init__(self):
|
||||
self.n = 0
|
||||
|
||||
def name(self):
|
||||
return "Foo"
|
||||
|
||||
def pre_exec(self, state, input):
|
||||
if self.n % 10000 == 0:
|
||||
print("FOO!", self.n, input)
|
||||
self.n += 1
|
||||
|
||||
|
||||
class FooFeedback(BaseFeedback):
|
||||
def is_interesting(self, state, mgr, input, observers, exit_kind):
|
||||
ob = observers.match_name("Foo").unwrap_py()
|
||||
return ob.n % 10000 == 0
|
||||
|
||||
|
||||
class FooExecutor(BaseExecutor):
|
||||
def __init__(self, harness, observers: ObserversTuple):
|
||||
self.h = harness
|
||||
self.o = observers
|
||||
|
||||
def observers(self):
|
||||
return self.o
|
||||
|
||||
def run_target(self, fuzzer, state, mgr, input) -> ExitKind:
|
||||
return (self.h)(input)
|
||||
|
||||
|
||||
libc = ctypes.cdll.LoadLibrary("libc.so.6")
|
||||
|
||||
area_ptr = libc.calloc(1, 4096)
|
||||
|
||||
observer = StdMapObserverI8("mymap", area_ptr, 4096)
|
||||
|
||||
m = observer.as_map_observer()
|
||||
|
||||
observers = ObserversTuple(
|
||||
[observer.as_map_observer().as_observer(), FooObserver().as_observer()]
|
||||
)
|
||||
|
||||
feedback = feedback_or(MaxMapFeedbackI8(m).as_feedback(), FooFeedback().as_feedback())
|
||||
|
||||
objective = feedback_and_fast(
|
||||
CrashFeedback().as_feedback(), MaxMapFeedbackI8(m).as_feedback()
|
||||
)
|
||||
|
||||
fuzzer = StdFuzzer(feedback, objective)
|
||||
|
||||
rand = StdRand.with_current_nanos()
|
||||
|
||||
state = StdState(
|
||||
rand.as_rand(),
|
||||
InMemoryCorpus().as_corpus(),
|
||||
InMemoryCorpus().as_corpus(),
|
||||
feedback,
|
||||
objective,
|
||||
)
|
||||
|
||||
monitor = SimpleMonitor(lambda s: print(s))
|
||||
|
||||
mgr = SimpleEventManager(monitor.as_monitor())
|
||||
|
||||
|
||||
def harness(buf) -> ExitKind:
|
||||
# print(buf)
|
||||
m[0] = 1
|
||||
if len(buf) > 0 and buf[0] == ord("a"):
|
||||
m[1] = 1
|
||||
if len(buf) > 1 and buf[1] == ord("b"):
|
||||
m[2] = 1
|
||||
if len(buf) > 2 and buf[2] == ord("c"):
|
||||
m[3] = 1
|
||||
return ExitKind.crash()
|
||||
return ExitKind.ok()
|
||||
|
||||
|
||||
# executor = InProcessExecutor(harness, observers, fuzzer, state, mgr.as_manager())
|
||||
|
||||
executor = FooExecutor(harness, observers)
|
||||
|
||||
stage = StdMutationalStage(StdHavocMutator().as_mutator())
|
||||
|
||||
stage_tuple_list = StagesTuple([stage.as_stage()])
|
||||
|
||||
fuzzer.add_input(state, executor.as_executor(), mgr.as_manager(), b"\0\0")
|
||||
|
||||
fuzzer.fuzz_loop(executor.as_executor(), state, mgr.as_manager(), stage_tuple_list)
|
||||
print("Starting to fuzz from python!")
|
||||
fuzzer = sugar.InMemoryBytesCoverageSugar(input_dirs=["./in"], output_dir="out", broker_port=1337, cores=[0,1])
|
||||
fuzzer.run(lambda b: print("foo"))
|
14
bindings/pylibafl/test.sh
Executable file
14
bindings/pylibafl/test.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
mkdir in || true
|
||||
echo "a" > ./in/a
|
||||
|
||||
timeout 10 python3 ./test.py
|
||||
export exit_code=$?
|
||||
if [ $exit_code -eq 124 ]; then
|
||||
# 124 = timeout happened. All good.
|
||||
exit 0
|
||||
else
|
||||
exit $exit_code
|
||||
fi
|
||||
|
1
docs/.gitignore
vendored
1
docs/.gitignore
vendored
@ -1 +1,2 @@
|
||||
book
|
||||
!listings/**/*
|
||||
|
9
docs/listings/baby_fuzzer/listing-01/Cargo.toml
Normal file
9
docs/listings/baby_fuzzer/listing-01/Cargo.toml
Normal file
@ -0,0 +1,9 @@
|
||||
[package]
|
||||
name = "baby_fuzzer_listing_01"
|
||||
version = "0.1.0"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
3
docs/listings/baby_fuzzer/listing-01/src/main.rs
Normal file
3
docs/listings/baby_fuzzer/listing-01/src/main.rs
Normal file
@ -0,0 +1,3 @@
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
}
|
21
docs/listings/baby_fuzzer/listing-02/Cargo.toml
Normal file
21
docs/listings/baby_fuzzer/listing-02/Cargo.toml
Normal file
@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "baby_fuzzer_listing_02"
|
||||
version = "0.1.0"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
libafl = { path = "path/to/libafl/" }
|
||||
libafl_bolts = { path = "path/to/libafl_bolts/" }
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 3
|
||||
debug = true
|
3
docs/listings/baby_fuzzer/listing-02/src/main.rs
Normal file
3
docs/listings/baby_fuzzer/listing-02/src/main.rs
Normal file
@ -0,0 +1,3 @@
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
}
|
24
docs/listings/baby_fuzzer/listing-03/Cargo.toml
Normal file
24
docs/listings/baby_fuzzer/listing-03/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "baby_fuzzer_listing_03"
|
||||
version = "0.1.0"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
libafl = { path = "path/to/libafl/" }
|
||||
libafl_bolts = { path = "path/to/libafl_bolts/" }
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 3
|
||||
debug = true
|
||||
|
||||
[features]
|
||||
panic = []
|
26
docs/listings/baby_fuzzer/listing-03/src/main.rs
Normal file
26
docs/listings/baby_fuzzer/listing-03/src/main.rs
Normal file
@ -0,0 +1,26 @@
|
||||
extern crate libafl;
|
||||
extern crate libafl_bolts;
|
||||
use libafl::{
|
||||
executors::ExitKind,
|
||||
inputs::{BytesInput, HasTargetBytes},
|
||||
};
|
||||
use libafl_bolts::AsSlice;
|
||||
|
||||
fn main() {
|
||||
let mut harness = |input: &BytesInput| {
|
||||
let target = input.target_bytes();
|
||||
let buf = target.as_slice();
|
||||
if buf.len() > 0 && buf[0] == 'a' as u8 {
|
||||
if buf.len() > 1 && buf[1] == 'b' as u8 {
|
||||
if buf.len() > 2 && buf[2] == 'c' as u8 {
|
||||
panic!("=)");
|
||||
}
|
||||
}
|
||||
}
|
||||
ExitKind::Ok
|
||||
};
|
||||
// To test the panic:
|
||||
let input = BytesInput::new(Vec::from("abc"));
|
||||
#[cfg(feature = "panic")]
|
||||
harness(&input);
|
||||
}
|
24
docs/listings/baby_fuzzer/listing-04/Cargo.toml
Normal file
24
docs/listings/baby_fuzzer/listing-04/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "baby_fuzzer_listing_04"
|
||||
version = "0.1.0"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
libafl = { path = "path/to/libafl/" }
|
||||
libafl_bolts = { path = "path/to/libafl_bolts/" }
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 3
|
||||
debug = true
|
||||
|
||||
[features]
|
||||
panic = []
|
87
docs/listings/baby_fuzzer/listing-04/src/main.rs
Normal file
87
docs/listings/baby_fuzzer/listing-04/src/main.rs
Normal file
@ -0,0 +1,87 @@
|
||||
/* ANCHOR: use */
|
||||
extern crate libafl;
|
||||
extern crate libafl_bolts;
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use libafl::{
|
||||
corpus::{InMemoryCorpus, OnDiskCorpus},
|
||||
events::SimpleEventManager,
|
||||
executors::{inprocess::InProcessExecutor, ExitKind},
|
||||
fuzzer::StdFuzzer,
|
||||
generators::RandPrintablesGenerator,
|
||||
inputs::{BytesInput, HasTargetBytes},
|
||||
monitors::SimpleMonitor,
|
||||
schedulers::QueueScheduler,
|
||||
state::StdState,
|
||||
};
|
||||
use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice};
|
||||
/* ANCHOR_END: use */
|
||||
|
||||
fn main() {
|
||||
let mut harness = |input: &BytesInput| {
|
||||
let target = input.target_bytes();
|
||||
let buf = target.as_slice();
|
||||
if buf.len() > 0 && buf[0] == 'a' as u8 {
|
||||
if buf.len() > 1 && buf[1] == 'b' as u8 {
|
||||
if buf.len() > 2 && buf[2] == 'c' as u8 {
|
||||
panic!("=)");
|
||||
}
|
||||
}
|
||||
}
|
||||
ExitKind::Ok
|
||||
};
|
||||
// To test the panic:
|
||||
let input = BytesInput::new(Vec::from("abc"));
|
||||
#[cfg(feature = "panic")]
|
||||
harness(&input);
|
||||
|
||||
/* ANCHOR: state */
|
||||
// create a State from scratch
|
||||
let mut state = StdState::new(
|
||||
// RNG
|
||||
StdRand::new(),
|
||||
// Corpus that will be evolved, we keep it in memory for performance
|
||||
InMemoryCorpus::new(),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
// on disk so the user can get them after stopping the fuzzer
|
||||
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
|
||||
&mut (),
|
||||
&mut (),
|
||||
)
|
||||
.unwrap();
|
||||
/* ANCHOR_END: state */
|
||||
|
||||
/* ANCHOR: event_manager */
|
||||
// The Monitor trait defines how the fuzzer stats are displayed to the user
|
||||
let mon = SimpleMonitor::new(|s| println!("{s}"));
|
||||
|
||||
// The event manager handles the various events generated during the fuzzing loop
|
||||
// such as the notification of the addition of a new item to the corpus
|
||||
let mut mgr = SimpleEventManager::new(mon);
|
||||
/* ANCHOR_END: event_manager */
|
||||
|
||||
/* ANCHOR: scheduler_fuzzer */
|
||||
// A queue policy to get testcases from the corpus
|
||||
let scheduler = QueueScheduler::new();
|
||||
|
||||
// A fuzzer with feedbacks and a corpus scheduler
|
||||
let mut fuzzer = StdFuzzer::new(scheduler, (), ());
|
||||
/* ANCHOR_END: scheduler_fuzzer */
|
||||
|
||||
/* ANCHOR: executor */
|
||||
// Create the executor for an in-process function
|
||||
let mut executor = InProcessExecutor::new(&mut harness, (), &mut fuzzer, &mut state, &mut mgr)
|
||||
.expect("Failed to create the Executor");
|
||||
/* ANCHOR_END: executor */
|
||||
|
||||
/* ANCHOR: generator */
|
||||
// Generator of printable bytearrays of max size 32
|
||||
let mut generator = RandPrintablesGenerator::new(32);
|
||||
|
||||
// Generate 8 initial inputs
|
||||
state
|
||||
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
|
||||
.expect("Failed to generate the initial corpus");
|
||||
/* ANCHOR_END: generator */
|
||||
}
|
23
docs/listings/baby_fuzzer/listing-05/Cargo.toml
Normal file
23
docs/listings/baby_fuzzer/listing-05/Cargo.toml
Normal file
@ -0,0 +1,23 @@
|
||||
[package]
|
||||
name = "baby_fuzzer_listing_05"
|
||||
version = "0.1.0"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
libafl = { path = "path/to/libafl/" }
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 3
|
||||
debug = true
|
||||
|
||||
[features]
|
||||
panic = []
|
116
docs/listings/baby_fuzzer/listing-05/src/main.rs
Normal file
116
docs/listings/baby_fuzzer/listing-05/src/main.rs
Normal file
@ -0,0 +1,116 @@
|
||||
/* ANCHOR: use */
|
||||
extern crate libafl;
|
||||
extern crate libafl_bolts;
|
||||
|
||||
use libafl::{
|
||||
corpus::{InMemoryCorpus, OnDiskCorpus},
|
||||
events::SimpleEventManager,
|
||||
executors::{inprocess::InProcessExecutor, ExitKind},
|
||||
feedbacks::{CrashFeedback, MaxMapFeedback},
|
||||
fuzzer::StdFuzzer,
|
||||
generators::RandPrintablesGenerator,
|
||||
inputs::{BytesInput, HasTargetBytes},
|
||||
monitors::SimpleMonitor,
|
||||
observers::StdMapObserver,
|
||||
schedulers::QueueScheduler,
|
||||
state::StdState,
|
||||
};
|
||||
use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice};
|
||||
use std::path::PathBuf;
|
||||
/* ANCHOR_END: use */
|
||||
|
||||
/* ANCHOR: signals */
|
||||
// Coverage map with explicit assignments due to the lack of instrumentation
|
||||
static mut SIGNALS: [u8; 16] = [0; 16];
|
||||
|
||||
fn signals_set(idx: usize) {
|
||||
unsafe { SIGNALS[idx] = 1 };
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// The closure that we want to fuzz
|
||||
let mut harness = |input: &BytesInput| {
|
||||
let target = input.target_bytes();
|
||||
let buf = target.as_slice();
|
||||
signals_set(0); // set SIGNALS[0]
|
||||
if buf.len() > 0 && buf[0] == 'a' as u8 {
|
||||
signals_set(1); // set SIGNALS[1]
|
||||
if buf.len() > 1 && buf[1] == 'b' as u8 {
|
||||
signals_set(2); // set SIGNALS[2]
|
||||
if buf.len() > 2 && buf[2] == 'c' as u8 {
|
||||
panic!("=)");
|
||||
}
|
||||
}
|
||||
}
|
||||
ExitKind::Ok
|
||||
};
|
||||
/* ANCHOR_END: signals */
|
||||
// To test the panic:
|
||||
let input = BytesInput::new(Vec::from("abc"));
|
||||
#[cfg(feature = "panic")]
|
||||
harness(&input);
|
||||
|
||||
/* ANCHOR: observer */
|
||||
// Create an observation channel using the signals map
|
||||
let observer = unsafe { StdMapObserver::new("signals", &mut SIGNALS) };
|
||||
/* ANCHOR_END: observer */
|
||||
|
||||
/* ANCHOR: state_with_feedback_and_objective */
|
||||
// Feedback to rate the interestingness of an input
|
||||
let mut feedback = MaxMapFeedback::new(&observer);
|
||||
|
||||
// A feedback to choose if an input is a solution or not
|
||||
let mut objective = CrashFeedback::new();
|
||||
|
||||
// create a State from scratch
|
||||
let mut state = StdState::new(
|
||||
// RNG
|
||||
StdRand::new(),
|
||||
// Corpus that will be evolved, we keep it in memory for performance
|
||||
InMemoryCorpus::new(),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
// on disk so the user can get them after stopping the fuzzer
|
||||
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
|
||||
&mut feedback,
|
||||
&mut objective,
|
||||
)
|
||||
.unwrap();
|
||||
/* ANCHOR_END: state_with_feedback_and_objective */
|
||||
|
||||
// The Monitor trait defines how the fuzzer stats are displayed to the user
|
||||
let mon = SimpleMonitor::new(|s| println!("{s}"));
|
||||
|
||||
// The event manager handles the various events generated during the fuzzing loop
|
||||
// such as the notification of the addition of a new item to the corpus
|
||||
let mut mgr = SimpleEventManager::new(mon);
|
||||
|
||||
// A queue policy to get testcasess from the corpus
|
||||
let scheduler = QueueScheduler::new();
|
||||
/* ANCHOR: state_with_feedback_and_objective */
|
||||
|
||||
// A fuzzer with feedbacks and a corpus scheduler
|
||||
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
|
||||
/* ANCHOR_END: state_with_feedback_and_objective */
|
||||
|
||||
/* ANCHOR: executor_with_observer */
|
||||
// Create the executor for an in-process function with just one observer
|
||||
let mut executor = InProcessExecutor::new(
|
||||
&mut harness,
|
||||
tuple_list!(observer),
|
||||
&mut fuzzer,
|
||||
&mut state,
|
||||
&mut mgr,
|
||||
)
|
||||
.expect("Failed to create the Executor");
|
||||
/* ANCHOR_END: executor_with_observer */
|
||||
|
||||
// Generator of printable bytearrays of max size 32
|
||||
let mut generator = RandPrintablesGenerator::new(32);
|
||||
|
||||
// Generate 8 initial inputs
|
||||
state
|
||||
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
|
||||
.expect("Failed to generate the initial corpus");
|
||||
/* ANCHOR: signals */
|
||||
}
|
||||
/* ANCHOR_END: signals */
|
24
docs/listings/baby_fuzzer/listing-06/Cargo.toml
Normal file
24
docs/listings/baby_fuzzer/listing-06/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "baby_fuzzer_listing_06"
|
||||
version = "0.1.0"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
libafl = { path = "path/to/libafl/" }
|
||||
libafl_bolts = { path = "path/to/libafl_bolts/" }
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 3
|
||||
debug = true
|
||||
|
||||
[features]
|
||||
panic = []
|
116
docs/listings/baby_fuzzer/listing-06/src/main.rs
Normal file
116
docs/listings/baby_fuzzer/listing-06/src/main.rs
Normal file
@ -0,0 +1,116 @@
|
||||
/* ANCHOR: use */
|
||||
extern crate libafl;
|
||||
extern crate libafl_bolts;
|
||||
|
||||
use libafl::{
|
||||
corpus::{InMemoryCorpus, OnDiskCorpus},
|
||||
events::SimpleEventManager,
|
||||
executors::{inprocess::InProcessExecutor, ExitKind},
|
||||
feedbacks::{CrashFeedback, MaxMapFeedback},
|
||||
fuzzer::{Fuzzer, StdFuzzer},
|
||||
generators::RandPrintablesGenerator,
|
||||
inputs::{BytesInput, HasTargetBytes},
|
||||
monitors::SimpleMonitor,
|
||||
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
|
||||
observers::StdMapObserver,
|
||||
schedulers::QueueScheduler,
|
||||
stages::mutational::StdMutationalStage,
|
||||
state::StdState,
|
||||
};
|
||||
use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice};
|
||||
use std::path::PathBuf;
|
||||
/* ANCHOR_END: use */
|
||||
|
||||
// Coverage map with explicit assignments due to the lack of instrumentation
|
||||
static mut SIGNALS: [u8; 16] = [0; 16];
|
||||
|
||||
fn signals_set(idx: usize) {
|
||||
unsafe { SIGNALS[idx] = 1 };
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// The closure that we want to fuzz
|
||||
let mut harness = |input: &BytesInput| {
|
||||
let target = input.target_bytes();
|
||||
let buf = target.as_slice();
|
||||
signals_set(0); // set SIGNALS[0]
|
||||
if buf.len() > 0 && buf[0] == 'a' as u8 {
|
||||
signals_set(1); // set SIGNALS[1]
|
||||
if buf.len() > 1 && buf[1] == 'b' as u8 {
|
||||
signals_set(2); // set SIGNALS[2]
|
||||
if buf.len() > 2 && buf[2] == 'c' as u8 {
|
||||
panic!("=)");
|
||||
}
|
||||
}
|
||||
}
|
||||
ExitKind::Ok
|
||||
};
|
||||
// To test the panic:
|
||||
let input = BytesInput::new(Vec::from("abc"));
|
||||
#[cfg(feature = "panic")]
|
||||
harness(&input);
|
||||
|
||||
// Create an observation channel using the signals map
|
||||
let observer = unsafe { StdMapObserver::new("signals", &mut SIGNALS) };
|
||||
|
||||
// Feedback to rate the interestingness of an input
|
||||
let mut feedback = MaxMapFeedback::new(&observer);
|
||||
|
||||
// A feedback to choose if an input is a solution or not
|
||||
let mut objective = CrashFeedback::new();
|
||||
|
||||
// create a State from scratch
|
||||
let mut state = StdState::new(
|
||||
// RNG
|
||||
StdRand::new(),
|
||||
// Corpus that will be evolved, we keep it in memory for performance
|
||||
InMemoryCorpus::new(),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
// on disk so the user can get them after stopping the fuzzer
|
||||
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
|
||||
&mut feedback,
|
||||
&mut objective,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// The Monitor trait defines how the fuzzer stats are displayed to the user
|
||||
let mon = SimpleMonitor::new(|s| println!("{s}"));
|
||||
|
||||
// The event manager handles the various events generated during the fuzzing loop
|
||||
// such as the notification of the addition of a new item to the corpus
|
||||
let mut mgr = SimpleEventManager::new(mon);
|
||||
|
||||
// A queue policy to get testcasess from the corpus
|
||||
let scheduler = QueueScheduler::new();
|
||||
|
||||
// A fuzzer with feedbacks and a corpus scheduler
|
||||
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
|
||||
|
||||
// Create the executor for an in-process function with just one observer
|
||||
let mut executor = InProcessExecutor::new(
|
||||
&mut harness,
|
||||
tuple_list!(observer),
|
||||
&mut fuzzer,
|
||||
&mut state,
|
||||
&mut mgr,
|
||||
)
|
||||
.expect("Failed to create the Executor");
|
||||
|
||||
// Generator of printable bytearrays of max size 32
|
||||
let mut generator = RandPrintablesGenerator::new(32);
|
||||
|
||||
// Generate 8 initial inputs
|
||||
state
|
||||
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
|
||||
.expect("Failed to generate the initial corpus");
|
||||
|
||||
/* ANCHOR: mutational_stage */
|
||||
// Setup a mutational stage with a basic bytes mutator
|
||||
let mutator = StdScheduledMutator::new(havoc_mutations());
|
||||
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
|
||||
|
||||
fuzzer
|
||||
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
|
||||
.expect("Error in the fuzzing loop");
|
||||
/* ANCHOR_END: mutational_stage */
|
||||
}
|
32
docs/src/DEBUGGING.md
Normal file
32
docs/src/DEBUGGING.md
Normal file
@ -0,0 +1,32 @@
|
||||
# General debugging tips
|
||||
This file answers some commmon questions that arise when you are writing a fuzzer using LibAFL.
|
||||
|
||||
## Q. My fuzzer crashed but the stack trace is useless.
|
||||
You can enable the `errors_backtrace` feature of the `libafl` crate. With this the stacktrace is meaningful.
|
||||
|
||||
## Q. I started the fuzzer but the corpus count is 0.
|
||||
Unless the initial corpus is loaded with the "load_initial_inputs_forced" function, we only store the interesting inputs, which is the inputs that triggered the feedback. So this usually means that your input was not interesting or your target was simply not properly implemented.
|
||||
Either way, what you can do is attach to the executable with gdb and set a breakpoint at where the new edges should be reported. If no instrumentation code is executed, then the the problem is in the instrumentation. If the instrumentation code is hit, but still the your input is not instrumented, then the problem could be that you are not passign the observer/feedback correctly to the fuzzer.
|
||||
|
||||
## Q. I started the fuzzer but the coverage is 0.
|
||||
This could mean two things. Perhaps your target was not properly instrumented, or you are not using the correct observer, feedback feature.
|
||||
In this case, again, what usually should do is to run the fuzzer with gdb and set a breakpoint at where the coverage is recorded (e.g. __sanitizer_coverage_trace_pcguard), and validate that the target is giving the feedback to the fuzzer.
|
||||
|
||||
## Q. I started the fuzzer but there's no output.
|
||||
First, verify that your stdout and stderr are not redirected to `/dev/null`. If you get the log, then it should either fall into the previous 2 cases. Either the fuzzer crashed because you didn't have the initial seeds, or the coverage feedback is not working.
|
||||
|
||||
## Q. My fuzzer is slow.
|
||||
Try running the fuzzer with the `introspection` feature of the `libafl`. This will show how much time is spent on each module of your fuzzer. Also you might be using a wrong size of the coverage map. If you see `2621440` for the size of the coverage map, you are doing it wrong. One possible mistake is the misuse of `libafl_targets::coverage::EDGES_MAP`
|
||||
```
|
||||
let map = StdMapObserver::from_mut_ptr("edges", EDGES_MAP.as_mut_ptr(), EDGES_MAP.len());
|
||||
```
|
||||
You should *never* use the `EDGES_MAP`'s size as this is just the size of the allocated size of the coverage map. Consider using something smaller or our default value `libafl_targets::LIBAFL_EDGES_MAP_SIZE_IN_USE`.
|
||||
|
||||
## Q. I still have problems with my fuzzer.
|
||||
Finally, if you really have no idea what is going on, run your fuzzer with logging enabled. (You can use `env_logger`, `SimpleStdoutLogger`, `SimpleStderrLogger` from `libafl_bolts`. `fuzzbench_text` has an example to show how to use it.) (Don't forget to enable stdout and stderr), and you can open an issue or ask us in Discord.
|
||||
|
||||
## Q. My fuzzer died of ``Storing state in crashed fuzzer instance did not work''.
|
||||
If the exit code is zero, then this is because either your harness exited or you are using fuzzer_loop_for and forgot to add `mgr.on_restart` at the end of the fuzzer. In the first case, you should patch your harness not to exit. (or use `utils/deexit`).
|
||||
|
||||
## Q. I can't leave the TUI screen
|
||||
Type `q` then you leave TUI.
|
@ -25,6 +25,7 @@
|
||||
- [Architecture](./design/architecture.md)
|
||||
- [Metadata](./design/metadata.md)
|
||||
- [Migrating from LibAFL <0.9 to 0.9](./design/migration-0.9.md)
|
||||
- [Migrating from LibAFL <0.11 to 0.11](./design/migration-0.11.md)
|
||||
|
||||
- [Message Passing](./message_passing/message_passing.md)
|
||||
- [Spawning Instances](./message_passing/spawn_instances.md)
|
||||
|
@ -1,7 +1,8 @@
|
||||
# Concolic Tracing and Hybrid Fuzzing
|
||||
|
||||
LibAFL has support for concolic tracing based on the [SymCC](https://github.com/eurecom-s3/symcc) instrumenting compiler.
|
||||
|
||||
For those uninitiated, the following attempts to describe concolic tracing from the ground up using an example.
|
||||
For those uninitiated, the following text attempts to describe concolic tracing from the ground up using an example.
|
||||
Then, we'll go through the relationship of SymCC and LibAFL concolic tracing.
|
||||
Finally, we'll walk through building a basic hybrid fuzzer using LibAFL.
|
||||
|
||||
@ -92,18 +93,18 @@ In hybrid fuzzing, we combine this tracing + solving approach with more traditio
|
||||
The concolic tracing support in LibAFL is implemented using SymCC.
|
||||
SymCC is a compiler plugin for clang that can be used as a drop-in replacement for a normal C or C++ compiler.
|
||||
SymCC will instrument the compiled code with callbacks into a runtime that can be supplied by the user.
|
||||
These callbacks allow the runtime to construct a trace that similar to the previous example.
|
||||
These callbacks allow the runtime to construct a trace that is similar to the previous example.
|
||||
|
||||
### SymCC and its Runtimes
|
||||
|
||||
SymCC ships with 2 runtimes:
|
||||
|
||||
* a 'simple' runtime that attempts to solve any branches it comes across using [Z3](https://github.com/Z3Prover/z3/wiki) and
|
||||
* a [QSym](https://github.com/sslab-gatech/qsym)-based runtime, which does a bit more filtering on the expressions and also solves using Z3.
|
||||
* A 'simple' runtime that attempts to negate and analytically solve any branch conditions it comes across using [Z3](https://github.com/Z3Prover/z3/wiki) and
|
||||
* A [QSym](https://github.com/sslab-gatech/qsym)-based runtime, which does a bit more filtering on the expressions and also solves them using Z3.
|
||||
|
||||
The integration with LibAFL, however, requires you to **BYORT** (_bring your own runtime_) using the [`symcc_runtime`](https://docs.rs/symcc_runtime/0.1/symcc_runtime) crate.
|
||||
This crate allows you to easily build a custom runtime out of the built-in building blocks or create entirely new runtimes with full flexibility.
|
||||
Checkout out the `symcc_runtime` docs for more information on how to build your own runtime.
|
||||
Check out the `symcc_runtime` docs for more information on how to build your own runtime.
|
||||
|
||||
### SymQEMU
|
||||
|
||||
@ -123,7 +124,7 @@ There are three main steps involved with building a hybrid fuzzer using LibAFL:
|
||||
3. building the fuzzer.
|
||||
|
||||
Note that the order of these steps is important.
|
||||
For example, we need to have runtime ready before we can do instrumentation with SymCC.
|
||||
For example, we need to have a runtime ready before we can do instrumentation with SymCC.
|
||||
|
||||
### Building a Runtime
|
||||
|
||||
@ -134,10 +135,12 @@ Check out the [example hybrid fuzzer's runtime](https://github.com/AFLplusplus/L
|
||||
### Instrumentation
|
||||
|
||||
There are two main instrumentation methods to make use of concolic tracing in LibAFL:
|
||||
* Using an **compile-time** instrumented target with **SymCC**.
|
||||
|
||||
* Using a **compile-time** instrumented target with **SymCC**.
|
||||
This only works when the source is available for the target and the target is reasonably easy to build using the SymCC compiler wrapper.
|
||||
* Using **SymQEMU** to dynamically instrument the target at **runtime**.
|
||||
This avoids a separate instrumented target with concolic tracing instrumentation and does not require source code.
|
||||
This avoids building a separate instrumented target with concolic tracing instrumentation and so does not require source code.
|
||||
|
||||
It should be noted, however, that the 'quality' of the generated expressions can be significantly worse and SymQEMU generally produces significantly more and significantly more convoluted expressions than SymCC.
|
||||
Therefore, it is recommended to use SymCC over SymQEMU when possible.
|
||||
|
||||
@ -158,25 +161,25 @@ Make sure you satisfy the [build requirements](https://github.com/eurecom-s3/sym
|
||||
|
||||
Build SymQEMU according to its [build instructions](https://github.com/eurecom-s3/symqemu#readme).
|
||||
By default, SymQEMU looks for the runtime in a sibling directory.
|
||||
Since we don't have a runtime there, we need to let it know the path to your runtime by setting `--symcc-build` argument of the `configure` script to the path of your runtime.
|
||||
Since we don't have a runtime there, we need to explicitly set the `--symcc-build` argument of the `configure` script to the path of your runtime.
|
||||
|
||||
### Building the Fuzzer
|
||||
|
||||
No matter the instrumentation method, the interface between the fuzzer and the instrumented target should now be consistent.
|
||||
The only difference between using SymCC and SymQEMU should be the binary that represents the target:
|
||||
In the case of SymCC it will be the binary that was build with instrumentation and with SymQEMU it will be the emulator binary (eg. `x86_64-linux-user/symqemu-x86_64`), followed by your uninstrumented target binary and arguments.
|
||||
In the case of SymCC it will be the binary that was build with instrumentation and with SymQEMU it will be the emulator binary (eg. `x86_64-linux-user/symqemu-x86_64`), followed by your uninstrumented target binary and its arguments.
|
||||
|
||||
You can use the [`CommandExecutor`](https://docs.rs/libafl/0.6.0/libafl/executors/command/struct.CommandExecutor.html) to execute your target ([example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs#L244)).
|
||||
When configuring the command, make sure you pass the `SYMCC_INPUT_FILE` environment variable the input file path, if your target reads input from a file (instead of standard input).
|
||||
You can use the [`CommandExecutor`](https://docs.rs/libafl/latest/libafl/executors/command/struct.CommandExecutor.html) to execute your target ([example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs#L244)).
|
||||
When configuring the command, make sure you pass the `SYMCC_INPUT_FILE` environment variable (set to the input file path), if your target reads input from a file (instead of standard input).
|
||||
|
||||
#### Serialization and Solving
|
||||
|
||||
While it is perfectly possible to build a custom runtime that also performs the solving step of hybrid fuzzing in the context of the target process, the intended use of the LibAFL concolic tracing support is to serialize the (filtered and pre-processed) branch conditions using the [`TracingRuntime`](https://docs.rs/symcc_runtime/0.1/symcc_runtime/tracing/struct.TracingRuntime.html).
|
||||
This serialized representation can be deserialized in the fuzzer process for solving using a [`ConcolicObserver`](https://docs.rs/libafl/0.6.0/libafl/observers/concolic/struct.ConcolicObserver.html) wrapped in a [`ConcolicTracingStage`](https://docs.rs/libafl/0.6.0/libafl/stages/concolic/struct.ConcolicTracingStage.html), which will attach a [`ConcolicMetadata`](https://docs.rs/libafl/0.6.0/libafl/observers/concolic/struct.ConcolicMetadata.html) to every [`TestCase`](https://docs.rs/libafl/0.6.0/libafl/corpus/testcase/struct.Testcase.html).
|
||||
This serialized representation can be deserialized in the fuzzer process for solving using a [`ConcolicObserver`](https://docs.rs/libafl/latest/libafl/observers/concolic/struct.ConcolicObserver.html) wrapped in a [`ConcolicTracingStage`](https://docs.rs/libafl/latest/libafl/stages/concolic/struct.ConcolicTracingStage.html), which will attach a [`ConcolicMetadata`](https://docs.rs/libafl/latest/libafl/observers/concolic/struct.ConcolicMetadata.html) to every [`TestCase`](https://docs.rs/libafl/latest/libafl/corpus/testcase/struct.Testcase.html).
|
||||
|
||||
The `ConcolicMetadata` can be used to replay the concolic trace and solved using an SMT-Solver.
|
||||
The `ConcolicMetadata` can be used to replay the concolic trace and to solve the conditions using an SMT-Solver.
|
||||
Most use-cases involving concolic tracing, however, will need to define some policy around which branches they want to solve.
|
||||
The [`SimpleConcolicMutationalStage`](https://docs.rs/libafl/0.6.0//libafl/stages/concolic/struct.SimpleConcolicMutationalStage.html) can be used for testing purposes.
|
||||
The [`SimpleConcolicMutationalStage`](https://docs.rs/libafl/latest/libafl/stages/concolic/struct.SimpleConcolicMutationalStage.html) can be used for testing purposes.
|
||||
It will attempt to solve all branches, like the original simple backend from SymCC, using Z3.
|
||||
|
||||
### Example
|
||||
|
@ -17,7 +17,7 @@ If you are on Windows, you'll need to install llvm tools.
|
||||
LibAFL uses Frida's [__Stalker__](https://frida.re/docs/stalker/) to trace the execution of your program and instrument your harness.
|
||||
Thus, you have to compile your harness to a dynamic library. Frida instruments your PUT after dynamically loading it.
|
||||
|
||||
For example in our `frida_libpng` example, we load the dynamic library and find the symbol to harness as follows:
|
||||
In our `frida_libpng` example, we load the dynamic library and find the symbol to harness as follows:
|
||||
|
||||
```rust,ignore
|
||||
let lib = libloading::Library::new(module_name).unwrap();
|
||||
@ -28,9 +28,9 @@ For example in our `frida_libpng` example, we load the dynamic library and find
|
||||
|
||||
## `FridaInstrumentationHelper` and Runtimes
|
||||
|
||||
To use functionalities that Frida offers, we'll first need to obtain `Gum` object by `Gum::obtain()`.
|
||||
To use functionalities that Frida offers, we'll first need to obtain a `Gum` object by `Gum::obtain()`.
|
||||
|
||||
In LibAFL, we use the `FridaInstrumentationHelper` struct to manage frida-related state. `FridaInstrumentationHelper` is a key component that sets up the [__Transformer__](https://frida.re/docs/stalker/#transformer) that is used to generate the instrumented code. It also initializes the `Runtimes` that offer various instrumentation.
|
||||
In LibAFL, we use the `FridaInstrumentationHelper` struct to manage frida-related state. `FridaInstrumentationHelper` is a key component that sets up the [__Transformer__](https://frida.re/docs/stalker/#transformer) that is used to generate the instrumented code. It also initializes the `Runtimes` that offer various instrumentations.
|
||||
|
||||
We have `CoverageRuntime` that can track the edge coverage, `AsanRuntime` for address sanitizer, `DrCovRuntime` that uses [__DrCov__](https://dynamorio.org/page_drcov.html) for coverage collection (to be imported in coverage tools like Lighthouse, bncov, dragondance,...), and `CmpLogRuntime` for cmplog instrumentation.
|
||||
All of these runtimes can be slotted into `FridaInstrumentationHelper` at build time.
|
||||
@ -53,12 +53,12 @@ Combined with any `Runtime` you'd like to use, you can initialize the `FridaInst
|
||||
|
||||
## Running the Fuzzer
|
||||
|
||||
After setting up the `FridaInstrumentationHelper`. You can obtain the pointer to the coverage map by calling `map_ptr_mut()`.
|
||||
After setting up the `FridaInstrumentationHelper` you can obtain the pointer to the coverage map by calling `map_mut_ptr()`.
|
||||
|
||||
```rust,ignore
|
||||
let edges_observer = HitcountsMapObserver::new(StdMapObserver::new_from_ptr(
|
||||
let edges_observer = HitcountsMapObserver::new(StdMapObserver::from_mut_ptr(
|
||||
"edges",
|
||||
frida_helper.map_ptr_mut().unwrap(),
|
||||
frida_helper.map_mut_ptr().unwrap(),
|
||||
MAP_SIZE,
|
||||
));
|
||||
```
|
||||
@ -73,7 +73,7 @@ You can then link this observer to `FridaInProcessExecutor` as follows:
|
||||
tuple_list!(
|
||||
edges_observer,
|
||||
time_observer,
|
||||
AsanErrorsObserver::new(&ASAN_ERRORS)
|
||||
AsanErrorsObserver::from_static_asan_errors()
|
||||
),
|
||||
&mut fuzzer,
|
||||
&mut state,
|
||||
@ -83,5 +83,5 @@ You can then link this observer to `FridaInProcessExecutor` as follows:
|
||||
);
|
||||
```
|
||||
|
||||
And, finally you can run the fuzzer.
|
||||
And finally you can run the fuzzer.
|
||||
See the `frida_` examples in [`./fuzzers`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/) for more information and, for linux or full-system, play around with `libafl_qemu`, another binary-only tracer.
|
@ -1,6 +1,6 @@
|
||||
# Using LibAFL in `no_std` environments
|
||||
|
||||
It is possible to use LibAFL in `no_std` environments e.g. custom platforms like microcontrollers, kernels, hypervisors, and more.
|
||||
It is possible to use LibAFL in `no_std` environments e.g. on custom platforms like microcontrollers, kernels, hypervisors, and more.
|
||||
|
||||
You can simply add LibAFL to your `Cargo.toml` file:
|
||||
|
||||
@ -16,7 +16,7 @@ cargo build --no-default-features --target aarch64-unknown-none
|
||||
|
||||
## Use custom timing
|
||||
|
||||
The minimum amount of input LibAFL needs for `no_std` is a monotonically increasing timestamp.
|
||||
The minimum amount of support LibAFL needs for a `no_std` environment is a monotonically increasing timestamp.
|
||||
For this, anywhere in your project you need to implement the `external_current_millis` function, which returns the current time in milliseconds.
|
||||
|
||||
```c
|
||||
|
@ -2,12 +2,12 @@
|
||||
|
||||
NYX supports both source-based and binary-only fuzzing.
|
||||
|
||||
Currently, `libafl_nyx` only supports [afl++](https://github.com/AFLplusplus/AFLplusplus)'s instruction. To install it, you can use `sudo apt install aflplusplus`. Or compile from the source:
|
||||
Currently, `libafl_nyx` only supports [afl++](https://github.com/AFLplusplus/AFLplusplus)'s instruction type. To install it, you can use `sudo apt install aflplusplus`. Or compile from the source:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/AFLplusplus/AFLplusplus
|
||||
cd AFLplusplus
|
||||
make all # this will not compile afl's additional extension
|
||||
make all # this will not compile afl's additional extensions
|
||||
```
|
||||
|
||||
Then you should compile the target with the afl++ compiler wrapper:
|
||||
@ -20,9 +20,9 @@ export CXX=afl-clang-fast++
|
||||
make
|
||||
```
|
||||
|
||||
For binary-only fuzzing, Nyx uses intel-PT(Intel® Processor Trace). You can find the supported CPU at <https://www.intel.com/content/www/us/en/support/articles/000056730/processors.html>.
|
||||
For binary-only fuzzing, Nyx uses intel-PT(Intel® Processor Trace). You can find the list of supported CPUs at <https://www.intel.com/content/www/us/en/support/articles/000056730/processors.html>.
|
||||
|
||||
## Preparing Nyx working directory
|
||||
## Preparing the Nyx working directory
|
||||
|
||||
This step is used to pack the target into Nyx's kernel. Don't worry, we have a template shell script in our [example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/setup_libxml2.sh):
|
||||
|
||||
@ -49,7 +49,7 @@ python3 ./packer/packer/nyx_config_gen.py /tmp/nyx_libxml2/ Kernel || exit
|
||||
|
||||
## Standalone fuzzing
|
||||
|
||||
In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_standalone/src/main.rs). First you need to run `./setup_libxml2.sh`, It will prepare your target and create your nyx work directory in `/tmp/libxml2`. After that, you can start write your code.
|
||||
In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_standalone/src/main.rs) you first need to run `./setup_libxml2.sh`. It will prepare your target and create your nyx work directory in `/tmp/libxml2`. After that, you can start to write your code.
|
||||
|
||||
First, to create `Nyxhelper`:
|
||||
|
||||
@ -57,22 +57,21 @@ First, to create `Nyxhelper`:
|
||||
let share_dir = Path::new("/tmp/nyx_libxml2/");
|
||||
let cpu_id = 0; // use first cpu
|
||||
let parallel_mode = false; // close parallel_mode
|
||||
let mut helper = NyxHelper::new(share_dir, cpu_id, true, parallel_mode, None).unwrap(); // we don't the set the last parameter in standalone mode, we just use None, here
|
||||
let mut helper = NyxHelper::new(share_dir, cpu_id, true, parallel_mode, None).unwrap(); // we don't need to set the last parameter in standalone mode, we just use None, here
|
||||
```
|
||||
|
||||
Then, fetch `trace_bits`, create an observer and the `NyxExecutor`:
|
||||
|
||||
```rust,ignore
|
||||
let trace_bits = unsafe { std::slice::from_raw_parts_mut(helper.trace_bits, helper.map_size) };
|
||||
let observer = StdMapObserver::new("trace", trace_bits);
|
||||
let observer = unsafe { StdMapObserver::from_mut_ptr("trace", helper.trace_bits, helper.map_size) };
|
||||
let mut executor = NyxExecutor::new(&mut helper, tuple_list!(observer)).unwrap();
|
||||
```
|
||||
|
||||
Finally, use them as normal and pass them into `fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)` to start fuzzing.
|
||||
Finally, use them normally and pass them into `fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)` to start fuzzing.
|
||||
|
||||
## Parallel fuzzing
|
||||
|
||||
In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/src/main.rs). First you need to run `./setup_libxml2.sh` as described before.
|
||||
In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/src/main.rs) you first need to run `./setup_libxml2.sh` as described before.
|
||||
|
||||
Parallel fuzzing relies on [`Launcher`](../message_passing/spawn_instances.md), so spawn logic should be written in the scoop of anonymous function `run_client`:
|
||||
|
||||
@ -91,7 +90,7 @@ let mut helper = NyxHelper::new(
|
||||
cpu_id, // current cpu id
|
||||
true, // open snap_mode
|
||||
parallel_mode, // open parallel mode
|
||||
Some(parent_cpu_id.id as u32), // the cpu-id of master instance, there is only one master instance, other instances will be treated as slaved
|
||||
Some(parent_cpu_id.id as u32), // the cpu-id of main instance, there is only one main instance, other instances will be treated as secondaries
|
||||
)
|
||||
.unwrap();
|
||||
```
|
||||
@ -99,13 +98,11 @@ let mut helper = NyxHelper::new(
|
||||
Then you can fetch the trace_bits and create an observer and `NyxExecutor`
|
||||
|
||||
```rust,ignore
|
||||
let trace_bits =
|
||||
unsafe { std::slice::from_raw_parts_mut(helper.trace_bits, helper.map_size) };
|
||||
let observer = StdMapObserver::new("trace", trace_bits);
|
||||
let observer = unsafe { StdMapObserver::from_mut_ptr("trace", helper.trace_bits, helper.map_size) }
|
||||
let mut executor = NyxExecutor::new(&mut helper, tuple_list!(observer)).unwrap();
|
||||
```
|
||||
|
||||
Finally, open a `Launcher` as normal to start fuzzing:
|
||||
Finally, open a `Launcher` as usual to start fuzzing:
|
||||
|
||||
```rust,ignore
|
||||
match Launcher::builder()
|
||||
@ -121,6 +118,6 @@ match Launcher::builder()
|
||||
{
|
||||
Ok(()) => (),
|
||||
Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."),
|
||||
Err(err) => panic!("Failed to run launcher: {:?}", err),
|
||||
Err(err) => panic!("Failed to run launcher: {err:?}"),
|
||||
}
|
||||
```
|
||||
|
@ -17,7 +17,7 @@ You can find a complete version of this tutorial as an example fuzzer in [`fuzze
|
||||
|
||||
We use cargo to create a new Rust project with LibAFL as a dependency.
|
||||
|
||||
```sh
|
||||
```console
|
||||
$ cargo new baby_fuzzer
|
||||
$ cd baby_fuzzer
|
||||
```
|
||||
@ -25,18 +25,11 @@ $ cd baby_fuzzer
|
||||
The generated `Cargo.toml` looks like the following:
|
||||
|
||||
```toml
|
||||
[package]
|
||||
name = "baby_fuzzer"
|
||||
version = "0.1.0"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
{{#include ../../listings/baby_fuzzer/listing-01/Cargo.toml}}
|
||||
```
|
||||
|
||||
In order to use LibAFl we must add it as dependency adding `libafl = { path = "path/to/libafl/" }` under `[dependencies]`.
|
||||
That path actually needs to point to the `libafl` directory within the cloned repo, not the root of the repo itself.
|
||||
You can use the LibAFL version from [crates.io](https://crates.io/crates/libafl) if you want, in this case, you have to use `libafl = "*"` to get the latest version (or set it to the current version).
|
||||
|
||||
As we are going to fuzz Rust code, we want that a panic does not simply cause the program to exit, but raise an `abort` that can then be caught by the fuzzer.
|
||||
@ -47,28 +40,10 @@ Alongside this setting, we add some optimization flags for the compilation, when
|
||||
The final `Cargo.toml` should look similar to the following:
|
||||
|
||||
```toml
|
||||
[package]
|
||||
name = "baby_fuzzer"
|
||||
version = "0.1.0"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
libafl = { path = "path/to/libafl/" }
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 3
|
||||
debug = true
|
||||
{{#include ../../listings/baby_fuzzer/listing-02/Cargo.toml}}
|
||||
```
|
||||
|
||||
|
||||
## The function under test
|
||||
|
||||
Opening `src/main.rs`, we have an empty `main` function.
|
||||
@ -76,52 +51,32 @@ To start, we create the closure that we want to fuzz. It takes a buffer as input
|
||||
`ExitKind` is used to inform the fuzzer about the harness' exit status.
|
||||
|
||||
```rust
|
||||
extern crate libafl;
|
||||
use libafl::{
|
||||
bolts::AsSlice,
|
||||
inputs::{BytesInput, HasTargetBytes},
|
||||
executors::ExitKind,
|
||||
};
|
||||
|
||||
fn main(){
|
||||
let mut harness = |input: &BytesInput| {
|
||||
let target = input.target_bytes();
|
||||
let buf = target.as_slice();
|
||||
if buf.len() > 0 && buf[0] == 'a' as u8 {
|
||||
if buf.len() > 1 && buf[1] == 'b' as u8 {
|
||||
if buf.len() > 2 && buf[2] == 'c' as u8 {
|
||||
panic!("=)");
|
||||
}
|
||||
}
|
||||
}
|
||||
ExitKind::Ok
|
||||
};
|
||||
// To test the panic:
|
||||
let input = BytesInput::new(Vec::from("abc"));
|
||||
#[cfg(feature = "panic")]
|
||||
harness(&input);
|
||||
}
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-03/src/main.rs}}
|
||||
```
|
||||
|
||||
To test the crash manually, you can add a feature in `Cargo.toml` that enables the call that triggers the panic:
|
||||
|
||||
```toml
|
||||
{{#include ../../listings/baby_fuzzer/listing-03/Cargo.toml:23:25}}
|
||||
```
|
||||
|
||||
And then run the program with that feature activated:
|
||||
|
||||
```console
|
||||
$ cargo run -F panic
|
||||
```
|
||||
|
||||
And you should see the program crash as expected.
|
||||
|
||||
## Generating and running some tests
|
||||
|
||||
One of the main components that a LibAFL-based fuzzer uses is the State, a container of the data that is evolved during the fuzzing process.
|
||||
Includes all State, such as the Corpus of inputs, the current RNG state, and potential Metadata for the testcases and run.
|
||||
One of the main components that a LibAFL-based fuzzer uses is the State, a container of the data that will evolve during the fuzzing process.
|
||||
It includes all state, such as the Corpus of inputs, the current RNG state, and potential Metadata for the testcases and run.
|
||||
In our `main` we create a basic State instance like the following:
|
||||
|
||||
```rust,ignore
|
||||
// create a State from scratch
|
||||
let mut state = StdState::new(
|
||||
// RNG
|
||||
StdRand::with_seed(current_nanos()),
|
||||
// Corpus that will be evolved, we keep it in memory for performance
|
||||
InMemoryCorpus::new(),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
// on disk so the user can get them after stopping the fuzzer
|
||||
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
|
||||
&mut (),
|
||||
&mut ()
|
||||
).unwrap();
|
||||
|
||||
```rust
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:state}}
|
||||
```
|
||||
|
||||
- The first parameter is a random number generator, that is part of the fuzzer state, in this case, we use the default one `StdRand`, but you can choose a different one. We seed it with the current nanoseconds.
|
||||
@ -129,43 +84,26 @@ let mut state = StdState::new(
|
||||
|
||||
To avoid type annotation error, you can use `InMemoryCorpus::<BytesInput>::new()` to replace `InMemoryCorpus::new()`. If not, type annotation will be automatically inferred when adding `executor`.
|
||||
|
||||
- third parameter is another corpus that stores the "solution" testcases for the fuzzer. For our purpose, the solution is the input that triggers the panic. In this case, we want to store it to disk under the `crashes` directory, so we can inspect it.
|
||||
- last two parameters are feedback and objective, we will discuss them later.
|
||||
- The third parameter is another Corpus that stores the "solution" testcases for the fuzzer. For our purpose, the solution is the input that triggers the panic. In this case, we want to store it to disk under the `crashes` directory, so we can inspect it.
|
||||
- The last two parameters are feedback and objective, we will discuss them later.
|
||||
|
||||
Another required component is the **EventManager**. It handles some events such as the addition of a testcase to the corpus during the fuzzing process. For our purpose, we use the simplest one that just displays the information about these events to the user using a `Monitor` instance.
|
||||
|
||||
```rust,ignore
|
||||
// The Monitor trait defines how the fuzzer stats are displayed to the user
|
||||
let mon = SimpleMonitor::new(|s| println!("{}", s));
|
||||
|
||||
// The event manager handle the various events generated during the fuzzing loop
|
||||
// such as the notification of the addition of a new item to the corpus
|
||||
let mut mgr = SimpleEventManager::new(mon);
|
||||
```rust
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:event_manager}}
|
||||
```
|
||||
|
||||
In addition, we have the **Fuzzer**, an entity that contains some actions that alter the State. One of these actions is the scheduling of the testcases to the fuzzer using a **Scheduler**.
|
||||
We create it as `QueueScheduler`, a scheduler that serves testcases to the fuzzer in a FIFO fashion.
|
||||
|
||||
```rust,ignore
|
||||
// A queue policy to get testcasess from the corpus
|
||||
let scheduler = QueueScheduler::new();
|
||||
|
||||
// A fuzzer with feedbacks and a corpus scheduler
|
||||
let mut fuzzer = StdFuzzer::new(scheduler, (), ());
|
||||
```rust
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:scheduler_fuzzer}}
|
||||
```
|
||||
|
||||
Last but not least, we need an **Executor** that is the entity responsible to run our program under test. In this example, we want to run the harness function in-process (without forking off a child, for example), and so we use the `InProcessExecutor`.
|
||||
|
||||
```rust,ignore
|
||||
// Create the executor for an in-process function
|
||||
let mut executor = InProcessExecutor::new(
|
||||
&mut harness,
|
||||
(),
|
||||
&mut fuzzer,
|
||||
&mut state,
|
||||
&mut mgr,
|
||||
)
|
||||
.expect("Failed to create the Executor");
|
||||
```rust
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:executor}}
|
||||
```
|
||||
|
||||
It takes a reference to the harness, the state, and the event manager. We will discuss the second parameter later.
|
||||
@ -175,41 +113,19 @@ Now we have the 4 major entities ready for running our tests, but we still canno
|
||||
|
||||
For this purpose, we use a **Generator**, `RandPrintablesGenerator` that generates a string of printable bytes.
|
||||
|
||||
```rust,ignore
|
||||
use libafl::generators::RandPrintablesGenerator;
|
||||
|
||||
// Generator of printable bytearrays of max size 32
|
||||
let mut generator = RandPrintablesGenerator::new(32);
|
||||
|
||||
// Generate 8 initial inputs
|
||||
state
|
||||
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
|
||||
.expect("Failed to generate the initial corpus".into());
|
||||
```rust
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:generator}}
|
||||
```
|
||||
|
||||
Now you can prepend the necessary `use` directives to your main.rs and compile the fuzzer.
|
||||
|
||||
```rust
|
||||
extern crate libafl;
|
||||
|
||||
use std::path::PathBuf;
|
||||
use libafl::{
|
||||
bolts::{AsSlice, current_nanos, rands::StdRand},
|
||||
corpus::{InMemoryCorpus, OnDiskCorpus},
|
||||
events::SimpleEventManager,
|
||||
executors::{inprocess::InProcessExecutor, ExitKind},
|
||||
fuzzer::StdFuzzer,
|
||||
generators::RandPrintablesGenerator,
|
||||
inputs::{BytesInput, HasTargetBytes},
|
||||
monitors::SimpleMonitor,
|
||||
schedulers::QueueScheduler,
|
||||
state::StdState,
|
||||
};
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-04/src/main.rs:use}}
|
||||
```
|
||||
|
||||
When running, you should see something similar to:
|
||||
|
||||
```sh
|
||||
```console
|
||||
$ cargo run
|
||||
Finished dev [unoptimized + debuginfo] target(s) in 0.04s
|
||||
Running `target/debug/baby_fuzzer`
|
||||
@ -225,60 +141,22 @@ Now we want to turn our simple fuzzer into a feedback-based one and increase the
|
||||
**Observer** can record the information about properties of a fuzzing run and then feeds the fuzzer. We use the `StdMapObserver`, the default observer that uses a map to keep track of covered elements. In our fuzzer, each condition is mapped to an entry of such map.
|
||||
|
||||
We represent such map as a `static mut` variable.
|
||||
As we don't rely on any instrumentation engine, we have to manually track the satisfied conditions by `singals_set` in our harness:
|
||||
As we don't rely on any instrumentation engine, we have to manually track the satisfied conditions by `signals_set` in our harness:
|
||||
|
||||
```rust
|
||||
extern crate libafl;
|
||||
use libafl::{
|
||||
bolts::AsSlice,
|
||||
inputs::{BytesInput, HasTargetBytes},
|
||||
executors::ExitKind,
|
||||
};
|
||||
|
||||
// Coverage map with explicit assignments due to the lack of instrumentation
|
||||
static mut SIGNALS: [u8; 16] = [0; 16];
|
||||
|
||||
fn signals_set(idx: usize) {
|
||||
unsafe { SIGNALS[idx] = 1 };
|
||||
}
|
||||
|
||||
// The closure that we want to fuzz
|
||||
let mut harness = |input: &BytesInput| {
|
||||
let target = input.target_bytes();
|
||||
let buf = target.as_slice();
|
||||
signals_set(0); // set SIGNALS[0]
|
||||
if buf.len() > 0 && buf[0] == 'a' as u8 {
|
||||
signals_set(1); // set SIGNALS[1]
|
||||
if buf.len() > 1 && buf[1] == 'b' as u8 {
|
||||
signals_set(2); // set SIGNALS[2]
|
||||
if buf.len() > 2 && buf[2] == 'c' as u8 {
|
||||
panic!("=)");
|
||||
}
|
||||
}
|
||||
}
|
||||
ExitKind::Ok
|
||||
};
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-05/src/main.rs:signals}}
|
||||
```
|
||||
|
||||
The observer can be created directly from the `SIGNALS` map, in the following way:
|
||||
|
||||
```rust,ignore
|
||||
// Create an observation channel using the signals map
|
||||
let observer = StdMapObserver::new("signals", unsafe { &mut SIGNALS });
|
||||
```rust
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-05/src/main.rs:observer}}
|
||||
```
|
||||
|
||||
The observers are usually kept in the corresponding executor as they keep track of information that is valid for just one run. We have then to modify our InProcessExecutor creation to include the observer as follows:
|
||||
|
||||
```rust,ignore
|
||||
// Create the executor for an in-process function with just one observer
|
||||
let mut executor = InProcessExecutor::new(
|
||||
&mut harness,
|
||||
tuple_list!(observer),
|
||||
&mut fuzzer,
|
||||
&mut state,
|
||||
&mut mgr,
|
||||
)
|
||||
.expect("Failed to create the Executor".into());
|
||||
```rust
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-05/src/main.rs:executor_with_observer}}
|
||||
```
|
||||
|
||||
Now that the fuzzer can observe which condition is satisfied, we need a way to rate an input as interesting (i.e. worth of addition to the corpus) based on this observation. Here comes the notion of Feedback.
|
||||
@ -287,49 +165,23 @@ Now that the fuzzer can observe which condition is satisfied, we need a way to r
|
||||
|
||||
We use `MaxMapFeedback`, a feedback that implements a novelty search over the map of the MapObserver. Basically, if there is a value in the observer's map that is greater than the maximum value registered so far for the same entry, it rates the input as interesting and updates its state.
|
||||
|
||||
**Objective Feedback** is another kind of Feedback which decide if an input is a "solution". It will save input to solutions(`./crashes` in our case) other than corpus when the input is rated interesting. We use `CrashFeedback` to tell the fuzzer that if an input causes the program to crash it is a solution for us.
|
||||
**Objective Feedback** is another kind of Feedback which decides if an input is a "solution". It will save input to solutions(`./crashes` in our case) rather than corpus when the input is rated interesting. We use `CrashFeedback` to tell the fuzzer that if an input causes the program to crash it is a solution for us.
|
||||
|
||||
We need to update our State creation including the feedback state and the Fuzzer including the feedback and the objective:
|
||||
|
||||
```rust,ignore
|
||||
extern crate libafl;
|
||||
use libafl::{
|
||||
bolts::{current_nanos, rands::StdRand, tuples::tuple_list},
|
||||
corpus::{InMemoryCorpus, OnDiskCorpus},
|
||||
feedbacks::{MaxMapFeedback, CrashFeedback},
|
||||
fuzzer::StdFuzzer,
|
||||
state::StdState,
|
||||
observers::StdMapObserver,
|
||||
};
|
||||
```rust
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-05/src/main.rs:state_with_feedback_and_objective}}
|
||||
```
|
||||
|
||||
// Feedback to rate the interestingness of an input
|
||||
let mut feedback = MaxMapFeedback::new(&observer);
|
||||
Once again, you need to add the necessary `use` directives for this to work properly:
|
||||
|
||||
// A feedback to choose if an input is a solution or not
|
||||
let mut objective = CrashFeedback::new();
|
||||
|
||||
// create a State from scratch
|
||||
let mut state = StdState::new(
|
||||
// RNG
|
||||
StdRand::with_seed(current_nanos()),
|
||||
// Corpus that will be evolved, we keep it in memory for performance
|
||||
InMemoryCorpus::new(),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
// on disk so the user can get them after stopping the fuzzer
|
||||
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
|
||||
&mut feedback,
|
||||
&mut objective
|
||||
).unwrap();
|
||||
|
||||
// ...
|
||||
|
||||
// A fuzzer with feedbacks and a corpus scheduler
|
||||
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
|
||||
```rust
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-05/src/main.rs:use}}
|
||||
```
|
||||
|
||||
## The actual fuzzing
|
||||
|
||||
Now, after including the correct `use`, we can run the program, but the outcome is not so different from the previous one as the random generator does not take into account what we save as interesting in the corpus. To do that, we need to plug a Mutator.
|
||||
Now, we can run the program, but the outcome is not so different from the previous one as the random generator does not take into account what we save as interesting in the corpus. To do that, we need to plug a Mutator.
|
||||
|
||||
**Stages** perform actions on individual inputs, taken from the corpus.
|
||||
For instance, the `MutationalStage` executes the harness several times in a row, every time with mutated inputs.
|
||||
@ -337,28 +189,20 @@ For instance, the `MutationalStage` executes the harness several times in a row,
|
||||
As the last step, we create a MutationalStage that uses a mutator inspired by the havoc mutator of AFL.
|
||||
|
||||
```rust,ignore
|
||||
use libafl::{
|
||||
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
|
||||
stages::mutational::StdMutationalStage,
|
||||
fuzzer::Fuzzer,
|
||||
};
|
||||
|
||||
// ...
|
||||
|
||||
// Setup a mutational stage with a basic bytes mutator
|
||||
let mutator = StdScheduledMutator::new(havoc_mutations());
|
||||
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
|
||||
|
||||
fuzzer
|
||||
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
|
||||
.expect("Error in the fuzzing loop");
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-06/src/main.rs:mutational_stage}}
|
||||
```
|
||||
|
||||
`fuzz_loop` will request a testcase for each iteration to the fuzzer using the scheduler and then it will invoke the stage.
|
||||
|
||||
After adding this code, we have a proper fuzzer, that can run a find the input that panics the function in less than a second.
|
||||
Again, we need to add the new `use` directives:
|
||||
|
||||
```text
|
||||
```rust,ignore
|
||||
{{#rustdoc_include ../../listings/baby_fuzzer/listing-06/src/main.rs:use}}
|
||||
```
|
||||
|
||||
After adding this code, we have a proper fuzzer, that can run and find the input that panics the function in less than a second.
|
||||
|
||||
```console
|
||||
$ cargo run
|
||||
Compiling baby_fuzzer v0.1.0 (/home/andrea/Desktop/baby_fuzzer)
|
||||
Finished dev [unoptimized + debuginfo] target(s) in 1.56s
|
||||
|
@ -9,4 +9,4 @@ Examples can be found under `./fuzzer`.
|
||||
| baby_fuzzer_nautilus | [nautilus](https://www.ndss-symposium.org/wp-content/uploads/2019/02/ndss2019_04A-3_Aschermann_paper.pdf) is a **coverage guided, grammar based** fuzzer|
|
||||
|baby_fuzzer_tokens| basic **token level** fuzzer with token level mutations|
|
||||
|baby_fuzzer_with_forkexecutor| example for **InProcessForkExecutor**|
|
||||
|baby_no_std|a minimalistic example how to create a libafl based fuzzer that works on **`no_std`** environments like TEEs, Kernels or on barew metal|
|
||||
|baby_no_std|a minimalistic example how to create a libafl based fuzzer that works on **`no_std`** environments like TEEs, Kernels or on bare metal|
|
||||
|
@ -4,8 +4,8 @@ The Corpus is where testcases are stored. We define a Testcase as an Input and a
|
||||
|
||||
A Corpus can store testcases in different ways, for example on disk, or in memory, or implement a cache to speedup on disk storage.
|
||||
|
||||
Usually, a testcase is added to the Corpus when it is considered as interesting, but a Corpus is used also to store testcases that fulfill an objective (like crashing the tested program for instance).
|
||||
Usually, a testcase is added to the Corpus when it is considered as interesting, but a Corpus is used also to store testcases that fulfill an objective (like crashing the program under test for instance).
|
||||
|
||||
Related to the Corpus, there is the way in which the fuzzer should ask for the next testcase to fuzz picking it from the Corpus. The taxonomy for this in LibAFL is CorpusScheduler, the entity representing the policy to pop testcases from the Corpus, FIFO for instance.
|
||||
Related to the Corpus is the way in which the next testcase (the fuzzer would ask for) is retrieved from the Corpus. The taxonomy for this handling in LibAFL is Scheduler, the entity representing the policy to pop testcases from the Corpus, in a FIFO fashion for instance.
|
||||
|
||||
Speaking about the code, [`Corpus`](https://docs.rs/libafl/0/libafl/corpus/trait.Corpus.html) and [`CorpusScheduler`](https://docs.rs/libafl/0/libafl/corpus/trait.CorpusScheduler.html) are traits.
|
||||
Speaking about the code, [`Corpus`](https://docs.rs/libafl/latest/libafl/corpus/trait.Corpus.html) and [`Scheduler`](https://docs.rs/libafl/latest/libafl/schedulers/trait.Scheduler.html) are traits.
|
||||
|
@ -9,11 +9,9 @@ So the Executor is for instance responsible to inform the program about the inpu
|
||||
|
||||
In our model, it can also hold a set of Observers connected with each execution.
|
||||
|
||||
In Rust, we bind this concept to the [`Executor`](https://docs.rs/libafl/0/libafl/executors/trait.Executor.html) trait. A structure implementing this trait must implement [`HasObservers`](https://docs.rs/libafl/0/libafl/executors/trait.HasObservers.html) too if wants to hold a set of Observers.
|
||||
In Rust, we bind this concept to the [`Executor`](https://docs.rs/libafl/latest/libafl/executors/trait.Executor.html) trait. A structure implementing this trait must implement [`HasObservers`](https://docs.rs/libafl/latest/libafl/executors/trait.HasObservers.html) too if wants to hold a set of Observers.
|
||||
|
||||
By default, we implement some commonly used Executors such as [`InProcessExecutor`](https://docs.rs/libafl/0/libafl/executors/inprocess/struct.InProcessExecutor.html) in which the target is a harness function providing in-process crash detection. Another Executor is the [`ForkserverExecutor`](https://docs.rs/libafl/0/libafl/executors/forkserver/struct.ForkserverExecutor.html) that implements an AFL-like mechanism to spawn child processes to fuzz.
|
||||
|
||||
A common pattern when creating an Executor is wrapping an existing one, for instance [`TimeoutExecutor`](https://docs.rs/libafl/0.6.1/libafl/executors/timeout/struct.TimeoutExecutor.html) wraps an executor and install a timeout callback before calling the original run function of the wrapped executor.
|
||||
By default, we implement some commonly used Executors such as [`InProcessExecutor`](https://docs.rs/libafl/latest/libafl/executors/inprocess/type.InProcessExecutor.html) in which the target is a harness function providing in-process crash detection. Another Executor is the [`ForkserverExecutor`](https://docs.rs/libafl/latest/libafl/executors/forkserver/struct.ForkserverExecutor.html) that implements an AFL-like mechanism to spawn child processes to fuzz.
|
||||
|
||||
## InProcessExecutor
|
||||
Let's begin with the base case; `InProcessExecutor`.
|
||||
@ -24,7 +22,7 @@ When you want to execute the harness as fast as possible, you will most probably
|
||||
One thing to note here is, when your harness is likely to have heap corruption bugs, you want to use another allocator so that corrupted heap does not affect the fuzzer itself. (For example, we adopt MiMalloc in some of our fuzzers.). Alternatively you can compile your harness with address sanitizer to make sure you can catch these heap bugs.
|
||||
|
||||
## ForkserverExecutor
|
||||
Next, we'll take a look at the `ForkserverExecutor`. In this case, it is `afl-cc` (from AFLplusplus/AFLplusplus) that compiles the harness code, and therefore, we can't use `EDGES_MAP` anymore. Hopefully, we have [_a way_](https://github.com/AFLplusplus/AFLplusplus/blob/2e15661f184c77ac1fbb6f868c894e946cbb7f17/instrumentation/afl-compiler-rt.o.c#L270) to tell the forkserver which map to record the coverage.
|
||||
Next, we'll take a look at the `ForkserverExecutor`. In this case, it is `afl-cc` (from AFL/AFLplusplus) that compiles the harness code, and therefore, we can't use `EDGES_MAP` anymore. Fortunately we have [_a way_](https://github.com/AFLplusplus/AFLplusplus/blob/2e15661f184c77ac1fbb6f868c894e946cbb7f17/instrumentation/afl-compiler-rt.o.c#L270) to tell the forkserver which map to record the coverage in.
|
||||
|
||||
As you can see from the forkserver example,
|
||||
|
||||
@ -33,7 +31,7 @@ As you can see from the forkserver example,
|
||||
let mut shmem = StdShMemProvider::new().unwrap().new_shmem(MAP_SIZE).unwrap();
|
||||
//let the forkserver know the shmid
|
||||
shmem.write_to_env("__AFL_SHM_ID").unwrap();
|
||||
let mut shmem_buf = shmem.as_mut_slice();
|
||||
let mut shmem_buf = shmem.as_slice_mut();
|
||||
```
|
||||
|
||||
Here we make a shared memory region; `shmem`, and write this to environmental variable `__AFL_SHM_ID`. Then the instrumented binary, or the forkserver, finds this shared memory region (from the aforementioned env var) to record its coverage. On your fuzzer side, you can pass this shmem map to your `Observer` to obtain coverage feedbacks combined with any `Feedback`.
|
||||
@ -48,7 +46,7 @@ See AFL++'s [_documentation_](https://github.com/AFLplusplus/AFLplusplus/blob/st
|
||||
Finally, we'll talk about the `InProcessForkExecutor`.
|
||||
`InProcessForkExecutor` has only one difference from `InprocessExecutor`; It forks before running the harness and that's it.
|
||||
|
||||
But why do we want to do so? well, under some circumstances, you may find your harness pretty unstable or your harness wreaks havoc on the global states. In this case, you want to fork it before executing the harness runs in the child process so that it doesn't break things.
|
||||
But why do we want to do so? Well, under some circumstances, you may find your harness pretty unstable or your harness wreaks havoc on the global states. In this case, you want to fork it before executing the harness runs in the child process so that it doesn't break things.
|
||||
|
||||
However, we have to take care of the shared memory, it's the child process that runs the harness code and writes the coverage to the map.
|
||||
|
||||
@ -59,9 +57,9 @@ On your fuzzer side, you can allocate a shared memory region and make the `EDGES
|
||||
```rust,ignore
|
||||
let mut shmem;
|
||||
unsafe{
|
||||
shmem = StdShMemProvider::new().unwrap().new_shmem(MAX_EDGES_NUM).unwrap();
|
||||
shmem = StdShMemProvider::new().unwrap().new_shmem(EDGES_MAP_SIZE_IN_USE).unwrap();
|
||||
}
|
||||
let shmem_buf = shmem.as_mut_slice();
|
||||
let shmem_buf = shmem.as_slice_mut();
|
||||
unsafe{
|
||||
EDGES_PTR = shmem_buf.as_ptr();
|
||||
}
|
||||
|
@ -10,17 +10,25 @@ The concept of "interestingness" is abstract, but typically it is related to a n
|
||||
|
||||
As an example, given an Observer that reports all the sizes of memory allocations, a maximization Feedback can be used to maximize these sizes to sport pathological inputs in terms of memory consumption.
|
||||
|
||||
In terms of code, the library offers the [`Feedback`](https://docs.rs/libafl/0/libafl/feedbacks/trait.Feedback.html) and the [`FeedbackState`](https://docs.rs/libafl/0/libafl/feedbacks/trait.FeedbackState.html) traits.
|
||||
The first is used to implement functors that, given the state of the observers from the last execution, tells if the execution was interesting. The second is tied with `Feedback` and it is the state of the data that the feedback wants to persist in the fuzzers's state, for instance the cumulative map holding all the edges seen so far in the case of a feedback based on edge coverage.
|
||||
In terms of code, the library offers the [`Feedback`](https://docs.rs/libafl/latest/libafl/feedbacks/trait.Feedback.html) trait.
|
||||
It is used to implement functors that, given the state of the observers from the last execution, tells if the execution was interesting.
|
||||
So to speak, it reduces the observations to a boolean result of [`is_interesting`](https://docs.rs/libafl/latest/libafl/feedbacks/trait.Feedback.html#tymethod.is_interesting) - or not.
|
||||
For this, a `Feedback` can store anything it wants to persist in the fuzzers's state.
|
||||
This might be, for instance, the cumulative map of all edges seen so far, in the case of a feedback based on edge coverage.
|
||||
This can be achieved by adding `Metadata` in [`init_state`](https://docs.rs/libafl/latest/libafl/feedbacks/trait.Feedback.html#method.init_state) and accessing it later in `is_interesting`.
|
||||
`Feedback` can also add custom metadata to a newly created [`Testcase`](https://docs.rs/libafl/latest/libafl/corpus/testcase/struct.Testcase.html) using [`append_metadata`](https://docs.rs/libafl/latest/libafl/feedbacks/trait.Feedback.html#method.append_metadata).
|
||||
|
||||
Multiple Feedbacks can be combined into boolean formula, considering for instance an execution as interesting if it triggers new code paths or execute in less time compared to the average execution time using [`feedback_or`](https://docs.rs/libafl/*/libafl/macro.feedback_or.html).
|
||||
Multiple Feedbacks can be combined into a boolean expression, considering for instance an execution as interesting if it triggers new code paths or execute in less time compared to the average execution time using [`feedback_or`](https://docs.rs/libafl/latest/libafl/macro.feedback_or.html).
|
||||
|
||||
On top, logic operators like `feedback_or` and `feedback_and` have a `_fast` option (`feedback_or_fast` where the second feedback will not be evaluated, if the first part already answers the `interestingness` question, to save precious performance.
|
||||
On top, logic operators like `feedback_or` and `feedback_and` have a `_fast` variant (e.g. `feedback_or_fast`) where the second feedback will not be evaluated, if the value of the first feedback operand already answers the `interestingness` question so as to save precious performance.
|
||||
|
||||
Using `feedback_and_fast` in combination with [`ConstFeedback`](https://docs.rs/libafl/*/libafl/feedbacks/enum.ConstFeedback.html#method.new), certain feedbacks can be disabled dynamically.
|
||||
Using `feedback_and_fast` in combination with [`ConstFeedback`](https://docs.rs/libafl/latest/libafl/feedbacks/enum.ConstFeedback.html#method.new), certain feedbacks can be disabled dynamically.
|
||||
|
||||
## Objectives
|
||||
|
||||
While feedbacks are commonly used to decide if an [`Input`](https://docs.rs/libafl/*/libafl/inputs/trait.Input.html) should be kept for future mutations, they serve a double-purpose, as so-called `Objective Feedbacks`.
|
||||
In this case, the `interestingness` of a feedback indicates, if an `Objective` has been hit.
|
||||
Commonly, these would be a`crash or a timeout, but they can also be used to find specific parts of the program, for sanitization, or a differential fuzzing success.
|
||||
While feedbacks are commonly used to decide if an [`Input`](https://docs.rs/libafl/latest/libafl/inputs/trait.Input.html) should be kept for future mutations, they serve a double-purpose, as so-called `Objective Feedbacks`.
|
||||
In this case, the `interestingness` of a feedback indicates if an `Objective` has been hit.
|
||||
Commonly, these objectives would be a crash or a timeout, but they can also be used to detect if specific parts of the program have been reached, for sanitization, or a differential fuzzing success.
|
||||
Objectives use the same trait as a normal [`Feedback`](https://docs.rs/libafl/latest/libafl/feedbacks/trait.Feedback.html) and the implementations can be used interchangeably.
|
||||
|
||||
The only difference is that `interesting` Objectives won't be mutated further, and are counted as `Solutions`, a successful fuzzing campaign.
|
||||
|
@ -6,4 +6,4 @@ Typically, a random generator is used to generate random inputs.
|
||||
|
||||
Generators are traditionally less used in Feedback-driven Fuzzing, but there are exceptions, like Nautilus, that uses a Grammar generator to create the initial corpus and a sub-tree Generator as a mutation of its grammar Mutator.
|
||||
|
||||
In the code, [`Generator`](https://docs.rs/libafl/0/libafl/generators/trait.Generator.html) is a trait.
|
||||
In the code, [`Generator`](https://docs.rs/libafl/latest/libafl/generators/trait.Generator.html) is a trait.
|
||||
|
@ -6,10 +6,10 @@ In our model of an abstract fuzzer, we define the Input as the internal represen
|
||||
|
||||
In the straightforward case, the input of the program is a byte array and in fuzzers such as AFL we store and manipulate exactly these byte arrays.
|
||||
|
||||
But it is not always the case. A program can expect inputs that are not byte arrays (e.g. a sequence of syscalls) and the fuzzer does not represent the Input in the same way that the program consumes it.
|
||||
But it is not always the case. A program can expect inputs that are not linear byte arrays (e.g. a sequence of syscalls forming a use case or protocol) and the fuzzer does not represent the Input in the same way that the program consumes it.
|
||||
|
||||
In case of a grammar fuzzer for instance, the Input is generally an Abstract Syntax Tree because it is a data structure that can be easily manipulated while maintaining the validity, but the program expects a byte array as input, so just before the execution, the tree is serialized to a sequence of bytes.
|
||||
|
||||
In the Rust code, an [`Input`](https://docs.rs/libafl/*/libafl/inputs/trait.Input.html) is a trait that can be implemented only by structures that are serializable and have only owned data as fields.
|
||||
In the Rust code, an [`Input`](https://docs.rs/libafl/latest/libafl/inputs/trait.Input.html) is a trait that can be implemented only by structures that are serializable and have only owned data as fields.
|
||||
|
||||
While most fuzzer use a normal `BytesInput`], more advanced inputs like inputs include special inputs for grammar fuzzing ([GramatronInput](https://docs.rs/libafl/*/libafl/inputs/gramatron/struct.GramatronInput.html) or `NautilusInput` on nightly), as well as the token-level [EncodedInput](https://docs.rs/libafl/*/libafl/inputs/encoded/struct.EncodedInput.html).
|
||||
While most fuzzers use a normal `BytesInput`, more advanced ones use inputs that include special inputs for grammar fuzzing ([GramatronInput](https://docs.rs/libafl/latest/libafl/inputs/gramatron/struct.GramatronInput.html) or `NautilusInput` on Rust nightly), as well as the token-level [EncodedInput](https://docs.rs/libafl/latest/libafl/inputs/encoded/struct.EncodedInput.html).
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Mutator
|
||||
|
||||
The Mutator is an entity that takes one or more Inputs and generates a new derived one.
|
||||
The Mutator is an entity that takes one or more Inputs and generates a new instance of Input derived by its inputs.
|
||||
|
||||
Mutators can be composed, and they are generally linked to a specific Input type.
|
||||
|
||||
There can be, for instance, a Mutator that applies more than a single type of mutation on the input. Consider a generic Mutator for a byte stream, bit flip is just one of the possible mutations but not the only one, there is also, for instance, the random replacement of a byte of the copy of a chunk.
|
||||
There can be, for instance, a Mutator that applies more than a single type of mutation to the input. Consider a generic Mutator for a byte stream, bit flip is just one of the possible mutations but not the only one, there is also, for instance, the random replacement of a byte of the copy of a chunk.
|
||||
|
||||
In LibAFL, [`Mutator`](https://docs.rs/libafl/*/libafl/mutators/trait.Mutator.html) is a trait.
|
||||
In LibAFL, [`Mutator`](https://docs.rs/libafl/latest/libafl/mutators/trait.Mutator.html) is a trait.
|
||||
|
@ -4,10 +4,10 @@ An Observer is an entity that provides an information observed during the execut
|
||||
|
||||
The information contained in the Observer is not preserved across executions, but it may be serialized and passed on to other nodes if an `Input` is considered `interesting`, and added to the `Corpus`.
|
||||
|
||||
As an example, the coverage map, filled during the execution to report the executed edges used by fuzzers such as AFL and `HonggFuzz` can be considered an observation. Another `Observer` can be the time spent executing a run, the program output, or more advanced observation, like maximum stack depth at runtime.
|
||||
This information is not preserved across runs, and it is an observation of a dynamic property of the program.
|
||||
As an example, the coverage map, filled during the execution to report the executed edges used by fuzzers such as AFL and `HonggFuzz` can be considered an observation. Another `Observer` can collect the time spent executing a run, the program output, or a more advanced observation, like maximum stack depth at runtime.
|
||||
This information is an observation of a dynamic property of the program.
|
||||
|
||||
In terms of code, in the library this entity is described by the [`Observer`](https://docs.rs/libafl/0/libafl/observers/trait.Observer.html) trait.
|
||||
In terms of code, in the library this entity is described by the [`Observer`](https://docs.rs/libafl/latest/libafl/observers/trait.Observer.html) trait.
|
||||
|
||||
In addition to holding the volatile data connected with the last execution of the target, the structures implementing this trait can define some execution hooks that are executed before and after each fuzz case. In these hooks, the observer can modify the fuzzer's state.
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Stage
|
||||
|
||||
A Stage is an entity that operates on a single Input got from the Corpus.
|
||||
A Stage is an entity that operates on a single Input received from the Corpus.
|
||||
|
||||
For instance, a Mutational Stage, given an input of the corpus, applies a Mutator and executes the generated input one or more time. How many times this has to be done can be scheduled, AFL for instance uses a performance score of the input to choose how many times the havoc mutator should be invoked. This can depend also on other parameters, for instance, the length of the input if we want to just apply a sequential bitflip, or be a fixed value.
|
||||
For instance, a Mutational Stage, given an input of the corpus, applies a Mutator and executes the generated input one or more times. How many times this has to be done can be scheduled, AFL for instance uses a performance score of the input to choose how many times the havoc mutator should be invoked. This can depend also on other parameters, for instance, the length of the input if we want to just apply a sequential bitflip, or a fixed value.
|
||||
|
||||
A stage can also be an analysis stage, for instance, the Colorization stage of Redqueen that aims to introduce more entropy in a testcase or the Trimming stage of AFL that aims to reduce the size of a testcase.
|
||||
|
||||
There are several stages in the LibAFL codebase implementing the [`Stage`](https://docs.rs/libafl/*/libafl/stages/trait.Stage.html) trait.
|
||||
There are several stages in the LibAFL codebase implementing the [`Stage`](https://docs.rs/libafl/latest/libafl/stages/trait.Stage.html) trait.
|
||||
|
@ -8,8 +8,8 @@ The LibAFL code reuse mechanism is based on components, rather than sub-classes,
|
||||
|
||||
Thinking about similar fuzzers, you can observe that most of the time the data structures that are modified are the ones related to testcases and the fuzzer global state.
|
||||
|
||||
Beside the entities previously described, we introduce the [`Testcase`](https://docs.rs/libafl/0.6/libafl/corpus/testcase/struct.Testcase.html) and [`State`](https://docs.rs/libafl/0.6/libafl/state/struct.StdState.html) entities. The Testcase is a container for an Input stored in the Corpus and its metadata (so, in the implementation, the Corpus stores Testcases) and the State contains all the metadata that are evolved while running the fuzzer, Corpus included.
|
||||
Beside the entities previously described, we introduce the [`Testcase`](https://docs.rs/libafl/latest/libafl/corpus/testcase/struct.Testcase.html) and [`State`](https://docs.rs/libafl/latest/libafl/state/struct.StdState.html) entities. The Testcase is a container for an Input stored in the Corpus and its metadata (so, in the implementation, the Corpus stores Testcases) and the State contains all the metadata that are evolved while running the fuzzer, Corpus included.
|
||||
|
||||
The State, in the implementation, contains only owned objects that are serializable, and it is serializable itself. Some fuzzers may want to serialize its state when pausing or just, when doing in-process fuzzing, serialize on crash and deserialize in the new process to continue to fuzz with all the metadata preserved.
|
||||
The State, in the implementation, contains only owned objects that are serializable, and it is serializable itself. Some fuzzers may want to serialize their state when pausing or just, when doing in-process fuzzing, serialize on crash and deserialize in the new process to continue to fuzz with all the metadata preserved.
|
||||
|
||||
Additionally, we group the entities that are "actions", like the `CorpusScheduler` and the `Feedbacks`, in a common place, the [`Fuzzer'](https://docs.rs/libafl/*/libafl/fuzzer/struct.StdFuzzer.html).
|
||||
Additionally, we group the entities that are "actions", like the `CorpusScheduler` and the `Feedbacks`, in a common place, the [`Fuzzer'](https://docs.rs/libafl/latest/libafl/fuzzer/struct.StdFuzzer.html).
|
||||
|
@ -5,10 +5,10 @@ A metadata in LibAFL is a self-contained structure that holds associated data to
|
||||
In terms of code, a metadata can be defined as a Rust struct registered in the SerdeAny register.
|
||||
|
||||
```rust
|
||||
extern crate libafl;
|
||||
extern crate serde;
|
||||
# extern crate libafl_bolts;
|
||||
# extern crate serde;
|
||||
|
||||
use libafl::SerdeAny;
|
||||
use libafl_bolts::SerdeAny;
|
||||
use serde::{Serialize, Deserialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, SerdeAny)]
|
||||
@ -19,15 +19,15 @@ pub struct MyMetadata {
|
||||
|
||||
The struct must be static, so it cannot hold references to borrowed objects.
|
||||
|
||||
As an alternative to `derive(SerdeAny)` that is a proc-macro in `libafl_derive` the user can use `libafl::impl_serdeany!(MyMetadata);`.
|
||||
As an alternative to `derive(SerdeAny)` which is a proc-macro in `libafl_derive` the user can use `libafl_bolts::impl_serdeany!(MyMetadata);`.
|
||||
|
||||
## Usage
|
||||
|
||||
Metadata objects are primarly intended to be used inside [`SerdeAnyMap`](https://docs.rs/libafl/0.5.0/libafl/bolts/serdeany/serdeany_registry/struct.SerdeAnyMap.html) and [`NamedSerdeAnyMap`](https://docs.rs/libafl/0.5.0/libafl/bolts/serdeany/serdeany_registry/struct.NamedSerdeAnyMap.html).
|
||||
Metadata objects are primarly intended to be used inside [`SerdeAnyMap`](https://docs.rs/libafl_bolts/latest/libafl_bolts/serdeany/serdeany_registry/struct.SerdeAnyMap.html) and [`NamedSerdeAnyMap`](https://docs.rs/libafl_bolts/latest/libafl_bolts/serdeany/serdeany_registry/struct.NamedSerdeAnyMap.html).
|
||||
|
||||
With these maps, the user can retrieve instances by type (and name). Internally, the instances are stored as SerdeAny trait objects.
|
||||
|
||||
Structs that want to have a set of metadata must implement the [`HasMetadata`](https://docs.rs/libafl/0.5.0/libafl/state/trait.HasMetadata.html) trait.
|
||||
Structs that want to have a set of metadata must implement the [`HasMetadata`](https://docs.rs/libafl/latest/libafl/state/trait.HasMetadata.html) trait.
|
||||
|
||||
By default, Testcase and State implement it and hold a SerdeAnyMap testcase.
|
||||
|
||||
|
27
docs/src/design/migration-0.11.md
Normal file
27
docs/src/design/migration-0.11.md
Normal file
@ -0,0 +1,27 @@
|
||||
# Migrating from LibAFL <0.11 to 0.11
|
||||
|
||||
We moved the old `libafl::bolts` module to its own crate called `libafl_bolts`.
|
||||
For this, imports for types in LibAFL bolts have changed in version 0.11, everything else should remain the same.
|
||||
|
||||
## Reasons for This Change
|
||||
|
||||
With the change we can now use a lot of low-level features of LibAFL for projects that are unrelated to fuzzing, or just completely different to LibAFL.
|
||||
Some cross-platform things in bolts include
|
||||
|
||||
* SerdeAnyMap: a map that stores and retrieves elements by type and is serializable and deserializable
|
||||
* ShMem: A cross-platform (Windows, Linux, Android, MacOS) shared memory implementation
|
||||
* LLMP: A fast, lock-free IPC mechanism via SharedMap
|
||||
* Core_affinity: A maintained version of `core_affinity` that can be used to get core information and bind processes to cores
|
||||
* Rands: Fast random number generators for fuzzing (like [RomuRand](http://www.romu-random.org/))
|
||||
* MiniBSOD: get and print information about the current process state including important registers.
|
||||
* Tuples: Haskel-like compile-time tuple lists
|
||||
* Os: OS specific stuff like signal handling, windows exception handling, pipes, and helpers for `fork`
|
||||
|
||||
## What changed
|
||||
|
||||
You will need to move all `libafl::bolts::` imports to `libafl_bolts:::` and add the crate dependency in your Cargo.toml (and specify feature flags there).
|
||||
As only exception, the `libafl::bolts::launcher::Launcher` has moved to `libafl::events::launcher::Launcher` since it has fuzzer and `EventManager` specific code.
|
||||
If you are using `prelude`, you may need to also ad `libafl_bolts::prelude`.
|
||||
|
||||
That's it.
|
||||
Enjoy using `libafl_bolts` in other projects.
|
9
docs/src/design/migration-0.12.md
Normal file
9
docs/src/design/migration-0.12.md
Normal file
@ -0,0 +1,9 @@
|
||||
# Migrating from <0.12 to 0.12
|
||||
|
||||
We deleted `TimeoutExecutor` and `TimeoutForkserverExecutor` and make it mandatory for `InProcessExecutor` and `ForkserverExecutor` to have the timeout. Now `InProcessExecutor` and `ForkserverExecutor` have the default timeout of 5 seconds.
|
||||
|
||||
## Reason for This Change.
|
||||
In 99% of the case, it is advised to have the timeout for the fuzzer. This is because we do not want the fuzzer to stop forever just because the target has hit a path that resulted in a infinite-loop.
|
||||
|
||||
## What changed
|
||||
You do not have to wrap the executor with `TimeoutExecutor` anymore. You can just use `InProcessExecutor::new()` to instantiate the executor with the default timeout or use `InProcessExecutor::timeout(duration)` to start the executor with the customized duration of timeout.
|
@ -75,7 +75,7 @@ where
|
||||
```
|
||||
|
||||
The executor is constrained to `EM` and `Z`, with each of their respective states being constrained to `E`'s state. It
|
||||
is no longer necessary to explicitly defined a generic for the input type, the state type, or the generic type, as these
|
||||
is no longer necessary to explicitly define a generic for the input type, the state type, or the generic type, as these
|
||||
are all present as associated types for `E`. Additionally, we don't even need to specify any details about the observers
|
||||
(`OT` in the previous version) as the type does not need to be constrained and is not shared by other types.
|
||||
|
||||
@ -101,7 +101,7 @@ See `fuzzers/` for examples of these changes.
|
||||
If you implemented a Mutator, Executor, State, or another kind of component, you must update your implementation. The
|
||||
main changes to the API are in the use of "Uses*" for associated types.
|
||||
|
||||
In many scenarios, Input, Observers, and State generics have been moved into traits with associated types (namely,
|
||||
In many scenarios, Input, Observer, and State generics have been moved into traits with associated types (namely,
|
||||
"UsesInput", "UsesObservers", and "UsesState". These traits are required for many existing traits now and are very
|
||||
straightforward to implement. In a majority of cases, you will have generics on your custom implementation or a fixed
|
||||
type to implement this with. Thankfully, Rust will let you know when you need to implement this type.
|
||||
@ -127,7 +127,7 @@ where
|
||||
}
|
||||
```
|
||||
|
||||
After 0.9, all `Corpus` implementations are required to implement `UsesInput` and `Corpus` no longer has a generic for
|
||||
After 0.9, all `Corpus` implementations are required to implement `UsesInput`. Also `Corpus` no longer has a generic for
|
||||
the input type (as it is now provided by the UsesInput impl). The migrated implementation is shown below:
|
||||
|
||||
```rust,ignore
|
||||
@ -160,3 +160,26 @@ Now, `Corpus` cannot be accidentally implemented for another type other than tha
|
||||
is fixed to the associated type for `UsesInput`.
|
||||
|
||||
A more complex example of migration can be found in the "Reasons for this change" section of this document.
|
||||
|
||||
## Observer Changes
|
||||
|
||||
Additionally, we changed the Observer API, as the API in 0.8 led to undefined behavior.
|
||||
At the same time, we used the change to simplify the common case: creating an `StdMapObserver`
|
||||
from libafl_target's `EDGES_MAP`.
|
||||
In the future, instead of using:
|
||||
|
||||
```rust,ignore
|
||||
let edges = unsafe { &mut EDGES_MAP[0..EDGES_MAP_SIZE_IN_USE] };
|
||||
let edges_observer = StdMapObserver::new("edges", edges);
|
||||
```
|
||||
|
||||
creating the edges observer is as simple as using the new `std_edges_map_observer` function.
|
||||
|
||||
```rust,ignore
|
||||
let edges_observer = unsafe { std_edges_map_observer("edges") };
|
||||
```
|
||||
|
||||
Alternatively, `StdMapObserver::new` will still work, but now the whole method is marked as `unsafe`.
|
||||
The reason is that the caller has to make sure `EDGES_MAP` (or other maps) are not moved or freed in memory,
|
||||
for the lifetime of the `MapObserver`.
|
||||
This means that the buffer should either be `static` or `Pin`.
|
||||
|
@ -6,7 +6,7 @@ LibAFL, as most of the Rust projects, can be built using `cargo` from the root d
|
||||
$ cargo build --release
|
||||
```
|
||||
|
||||
Note that the `--release` flag is optional for development, but you needed to add it to fuzzing at a decent speed.
|
||||
Note that the `--release` flag is optional for development, but you need to add it to do fuzzing at a decent speed.
|
||||
Slowdowns of 10x or more are not uncommon for Debug builds.
|
||||
|
||||
The LibAFL repository is composed of multiple crates.
|
||||
|
@ -10,7 +10,7 @@ libafl = { version = "*" }
|
||||
|
||||
## Crate List
|
||||
|
||||
For LibAFL, each crate has its self-contained purpose, and the user may not need to use all of them in its project.
|
||||
For LibAFL, each crate has its self-contained purpose, and the user may not need to use all of them in their project.
|
||||
Following the naming convention of the folders in the project's root, they are:
|
||||
|
||||
### [`libafl`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl)
|
||||
@ -31,20 +31,35 @@ You can choose the features by using `features = ["feature1", "feature2", ...]`
|
||||
Out of this list, by default, `std`, `derive`, and `rand_trait` are already set.
|
||||
You can choose to disable them by setting `default-features = false` in your `Cargo.toml`.
|
||||
|
||||
### libafl_sugar
|
||||
### [`libafl_bolts`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_bolts)
|
||||
|
||||
The `libafl_bolts` crate is a minimal tool shed filled with useful low-level rust features, not necessarily related to fuzzers.
|
||||
In it, you'll find highlights like:
|
||||
|
||||
- `core_affinity` to bind the current process to cores
|
||||
- `SerdeAnyMap` a map that can store typed values in a serializable fashion
|
||||
- `minibsod` to dump the current process state
|
||||
- `LLMP`, "low level message passing", a lock-free IPC mechanism
|
||||
- `Rand`, different fast (non-cryptographically secure) RNG implementations like RomuRand
|
||||
- `ShMem`, a platform independent shard memory implementation
|
||||
- `Tuples`, a compiletime tuple implementation
|
||||
|
||||
... and much more.
|
||||
|
||||
### [`libafl_sugar`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_sugar)
|
||||
|
||||
The sugar crate abstracts away most of the complexity of LibAFL's API.
|
||||
Instead of high flexibility, it aims to be high-level and easy-to-use.
|
||||
It is not as flexible as stitching your fuzzer together from each individual component, but allows you to build a fuzzer with minimal lines of code.
|
||||
To see it in action, take a look at the [`libfuzzer_stb_image_sugar` example fuzzer](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/libfuzzer_stb_image_sugar).
|
||||
|
||||
### libafl_derive
|
||||
### [`libafl_derive`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_derive)
|
||||
|
||||
This a proc-macro crate paired with the `libafl` crate.
|
||||
|
||||
At the moment, it just exposes the `derive(SerdeAny)` macro that can be used to define Metadata structs, see the section about [Metadata](../design/metadata.md) for details.
|
||||
|
||||
### libafl_targets
|
||||
### [`libafl_targets`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_targets)
|
||||
|
||||
This crate exposes code to interact with, and to instrument, targets.
|
||||
To enable and disable features at compile-time, the features are enabled and disabled using feature flags.
|
||||
@ -52,36 +67,36 @@ To enable and disable features at compile-time, the features are enabled and dis
|
||||
Currently, the supported flags are:
|
||||
|
||||
- `pcguard_edges` defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges in a map.
|
||||
- `pcguard_hitcounts defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges with the hitcounts (like AFL) in a map.
|
||||
- `pcguard_hitcounts` defines the SanitizerCoverage trace-pc-guard hooks to track the executed edges with the hitcounts (like AFL) in a map.
|
||||
- `libfuzzer` exposes a compatibility layer with libFuzzer style harnesses.
|
||||
- `value_profile` defines the SanitizerCoverage trace-cmp hooks to track the matching bits of each comparison in a map.
|
||||
|
||||
### libafl_cc
|
||||
### [`libafl_cc`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_cc)
|
||||
|
||||
This is a library that provides utils wrap compilers and create source-level fuzzers.
|
||||
This is a library that provides utils to wrap compilers and create source-level fuzzers.
|
||||
|
||||
At the moment, only the Clang compiler is supported.
|
||||
To understand it deeper, look through the tutorials and examples.
|
||||
|
||||
### libafl_frida
|
||||
### [`libafl_frida`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_frida)
|
||||
|
||||
This library bridges LibAFL with Frida as instrumentation backend.
|
||||
With this crate, you can instrument targets on Linux/macOS/Windows/Android for coverage collection.
|
||||
Additionally, it supports CmpLog, and AddressSanitizer instrumentation and runtimes for aarch64.
|
||||
See further information, as well as usage instructions, [later in the book](../advanced_features/frida.md).
|
||||
|
||||
### libafl_qemu
|
||||
### [`libafl_qemu`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_qemu)
|
||||
|
||||
This library bridges LibAFL with QEMU user-mode to fuzz ELF cross-platform binaries.
|
||||
|
||||
It works on Linux and can collect edge coverage without collisions!
|
||||
It also supports a wide range of hooks and instrumentation options.
|
||||
|
||||
### libafl_nyx
|
||||
### [`libafl_nyx`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_nyx)
|
||||
|
||||
[Nyx](https://nyx-fuzz.com/) is a KVM-based snapshot fuzzer. `libafl_nyx` adds these capabilities to LibAFL. There is a specific section explaining usage of libafl_nyx [later in the book](../advanced_features/nyx.md).
|
||||
|
||||
### libafl_concolic
|
||||
### [`libafl_concolic`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_concolic)
|
||||
|
||||
Concolic fuzzing is the combination of fuzzing and a symbolic execution engine.
|
||||
This can reach greater depth than normal fuzzing, and is exposed in this crate.
|
||||
|
@ -11,27 +11,27 @@ The first step is to download LibAFL and all dependencies that are not automatic
|
||||
> previous command. Additionally, PowerShell-specific examples will use `>`
|
||||
> rather than `$`.
|
||||
|
||||
While you technically do not need to install LibAFL, but can use the version from crates.io directly, we do recommend to download or clone the GitHub version.
|
||||
While technically you do not need to install LibAFL, but can use the version from crates.io directly, we do recommend to download or clone the GitHub version.
|
||||
This gets you the example fuzzers, additional utilities, and latest patches.
|
||||
The easiest way to do this is to use `git`.
|
||||
|
||||
```sh
|
||||
$ git clone git@github.com:AFLplusplus/LibAFL.git
|
||||
$ git clone https://github.com/AFLplusplus/LibAFL.git
|
||||
```
|
||||
|
||||
You can alternatively, on a UNIX-like machine, download a compressed archive and extract it with:
|
||||
Alternatively, on a UNIX-like machine, you can download a compressed archive and extract it with:
|
||||
|
||||
```sh
|
||||
wget https://github.com/AFLplusplus/LibAFL/archive/main.tar.gz
|
||||
$ tar xvf LibAFL-main.tar.gz
|
||||
$ rm LibAFL-main.tar.gz
|
||||
$ wget https://github.com/AFLplusplus/LibAFL/archive/main.tar.gz
|
||||
$ tar xvf main.tar.gz
|
||||
$ rm main.tar.gz
|
||||
$ ls LibAFL-main # this is the extracted folder
|
||||
```
|
||||
|
||||
## Clang installation
|
||||
|
||||
One of the external dependencies of LibAFL is the Clang C/C++ compiler.
|
||||
While most of the code is in pure Rust, we still need a C compiler because stable Rust still does not support features that some parts of LibAFL may need, such as weak linking, and LLVM builtins linking.
|
||||
While most of the code is written in pure Rust, we still need a C compiler because stable Rust still does not support features that some parts of LibAFL may need, such as weak linking, and LLVM builtins linking.
|
||||
For these parts, we use C to expose the missing functionalities to our Rust codebase.
|
||||
|
||||
In addition, if you want to perform source-level fuzz testing of C/C++ applications,
|
||||
|
@ -4,10 +4,10 @@ Fuzzers are important tools for security researchers and developers alike.
|
||||
A wide range of state-of-the-art tools like [AFL++](https://github.com/AFLplusplus/AFLplusplus), [libFuzzer](https://llvm.org/docs/LibFuzzer.html) or [honggfuzz](https://github.com/google/honggfuzz) are available to users. They do their job in a very effective way, finding thousands of bugs.
|
||||
|
||||
From the perspective of a power user, however, these tools are limited.
|
||||
Their design does not treat extensibility as a first-class citizen.
|
||||
Their designs do not treat extensibility as a first-class citizen.
|
||||
Usually, a fuzzer developer can choose to either fork one of these existing tools, or to create a new fuzzer from scratch.
|
||||
In any case, researchers end up with tons of fuzzers, all of which are incompatible with each other.
|
||||
Their outstanding features can not just be combined for new projects.
|
||||
Their outstanding features cannot just be combined for new projects.
|
||||
By reinventing the wheel over and over, we may completely miss out on features that are complex to reimplement.
|
||||
|
||||
To tackle this issue, we created LibAFL, a library that is _not just another fuzzer_, but a collection of reusable pieces for individual fuzzers.
|
||||
@ -24,11 +24,11 @@ Some highlight features currently include:
|
||||
This means it does not require a specific OS-dependent runtime to function.
|
||||
Define an allocator and a way to map pages, and you are good to inject LibAFL in obscure targets like embedded devices, hypervisors, or maybe even WebAssembly?
|
||||
- `adaptable`: Given years of experience fine-tuning *AFLplusplus* and our academic fuzzing background, we could incorporate recent fuzzing trends into LibAFL's design and make it future-proof.
|
||||
To give an example, as opposed to old-skool fuzzers, a `BytesInput` is just one of the potential forms of inputs:
|
||||
To give an example, as opposed to old-school fuzzers, a `BytesInput` is just one of the potential forms of inputs:
|
||||
feel free to use and mutate an Abstract Syntax Tree instead, for structured fuzzing.
|
||||
- `scalable`: As part of LibAFL, we developed `Low Level Message Passing`, `LLMP` for short, which allows LibAFL to scale almost linearly over cores. That is, if you chose to use this feature - it is your fuzzer, after all.
|
||||
Scaling to multiple machines over TCP is also possible, using LLMP's `broker2broker` feature.
|
||||
- `fast`: We do everything we can at compile time so that the runtime overhead is as minimal as it can get.
|
||||
- `bring your own target`: We support binary-only modes, like QEMU-Mode and Frida-Mode with ASAN and CmpLog, as well as multiple compilation passes for sourced-based instrumentation.
|
||||
- `bring your own target`: We support binary-only modes, like (full-system) QEMU-Mode and Frida-Mode with ASan and CmpLog, as well as multiple compilation passes for sourced-based instrumentation.
|
||||
Of course, we also support custom instrumentation, as you can see in the Python example based on Google's Atheris.
|
||||
- `usable`: This one is on you to decide. Dig right in!
|
||||
|
@ -1,11 +1,11 @@
|
||||
# The LibAFL Fuzzing Library
|
||||
|
||||
<img align="right" src="https://github.com/AFLplusplus/Website/raw/master/static/logo_256x256.png" alt="AFL++ Logo">
|
||||
<img align="right" src="https://raw.githubusercontent.com/AFLplusplus/Website/main/static/libafl_logo.svg" alt="LibAFL Logo" style="width: 256px; height: auto">
|
||||
|
||||
*by Andrea Fioraldi and Dominik Maier*
|
||||
|
||||
Welcome to LibAFL, the Advanced Fuzzing Library.
|
||||
This book shall be a gentle introduction into the library.
|
||||
This book shall be a gentle introduction to the library.
|
||||
|
||||
This version of the LibAFL book is coupled with the release 1.0 beta of the library.
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
Configurations for individual fuzzer nodes are relevant for multi node fuzzing.
|
||||
The chapter describes how to run nodes with different configurations
|
||||
in one fuzzing cluster.
|
||||
This allows, for example, a node compiled with ASAN, to know that it needs to rerun new testcases for a node without ASAN, while the same binary/configuration does not.
|
||||
This allows, for example, a node compiled with ASan, to know that it needs to rerun new testcases for a node without ASan, while the same binary/configuration does not.
|
||||
|
||||
Fuzzers with the same configuration can exchange Observers for new testcases and reuse them without rerunning the input.
|
||||
A different configuration indicates, that only the raw input can be exchanged, it must be rerun on the other node to capture relevant observations.
|
||||
|
@ -1,8 +1,8 @@
|
||||
# Message Passing
|
||||
|
||||
LibAFL offers a standard mechanism for message passing over processes and machines with a low overhead.
|
||||
LibAFL offers a standard mechanism for message passing between processes and machines with a low overhead.
|
||||
We use message passing to inform the other connected clients/fuzzers/nodes about new testcases, metadata, and statistics about the current run.
|
||||
Depending on individual needs, LibAFL can also write testcase contents to disk, while still using events to notify other fuzzers, using an `OnDiskCorpus`.
|
||||
Depending on individual needs, LibAFL can also write testcase contents to disk, while still using events to notify other fuzzers, using the `CachedOnDiskCorpus` or similar.
|
||||
|
||||
In our tests, message passing scales very well to share new testcases and metadata between multiple running fuzzer instances for multi-core fuzzing.
|
||||
Specifically, it scales _a lot_ better than using memory locks on a shared corpus, and _a lot_ better than sharing the testcases via the filesystem, as AFL traditionally does.
|
||||
@ -12,7 +12,7 @@ The `EventManager` interface is used to send Events over the wire using `Low Lev
|
||||
|
||||
## Low Level Message Passing (LLMP)
|
||||
|
||||
LibAFL comes with a reasonably lock-free message passing mechanism that scales well across cores and, using its *broker2broker* mechanism, even to connected machines via TCP.
|
||||
LibAFL comes with a reasonably lock-free message passing mechanism that scales well across cores and, using its _broker2broker_ mechanism, even to connected machines via TCP.
|
||||
Most example fuzzers use this mechanism, and it is the best `EventManager` if you want to fuzz on more than a single core.
|
||||
In the following, we will describe the inner workings of `LLMP`.
|
||||
|
||||
@ -28,12 +28,12 @@ Shared maps, called shared memory for the sake of not colliding with Rust's `map
|
||||
Each client, usually a fuzzer trying to share stats and new testcases, maps an outgoing `ShMem` map.
|
||||
With very few exceptions, only this client writes to this map, therefore, we do not run in race conditions and can live without locks.
|
||||
The broker reads from all client's `ShMem` maps.
|
||||
It checks all incoming client maps periodically and then forwards new messages to its outgoing broadcast-`ShMem`, mapped by all connected clients.
|
||||
It periodically checks all incoming client maps and then forwards new messages to its outgoing broadcast-`ShMem`, mapped by all connected clients.
|
||||
|
||||
To send new messages, a client places a new message at the end of their shared memory and then updates a static field to notify the broker.
|
||||
Once the outgoing map is full, the sender allocates a new `ShMem` using the respective `ShMemProvider`.
|
||||
It then sends the information needed to map the newly-allocated page in connected processes to the old page, using an end of page (`EOP`) message.
|
||||
Once the receiver maps the new page, flags it as safe for unmapping from the sending process (to avoid race conditions if we have more than a single EOP in a short time), and then continues to read from the new `ShMem`.
|
||||
Once the receiver maps the new page, it flags it as safe for unmapping by the sending process (to avoid race conditions if we have more than a single EOP in a short time), and then continues to read from the new `ShMem`.
|
||||
|
||||
The schema for client's maps to the broker is as follows:
|
||||
|
||||
@ -54,10 +54,10 @@ After the broker received a new message from clientN, (`clientN_out->current_id
|
||||
|
||||
The clients periodically, for example after finishing `n` mutations, check for new incoming messages by checking if (`current_broadcast_map->current_id != last_message->message_id`).
|
||||
While the broker uses the same EOP mechanism to map new `ShMem`s for its outgoing map, it never unmaps old pages.
|
||||
This additional memory overhead serves a good purpose: by keeping all broadcast pages around, we make sure that new clients can join in on a fuzzing campaign at a later point in time
|
||||
This additional memory resources serve a good purpose: by keeping all broadcast pages around, we make sure that new clients can join in on a fuzzing campaign at a later point in time.
|
||||
They just need to re-read all broadcasted messages from start to finish.
|
||||
|
||||
So the outgoing messages flow like this over the outgoing broadcast `Shmem`:
|
||||
So the outgoing messages flow is like this over the outgoing broadcast `Shmem`:
|
||||
|
||||
```text
|
||||
[broker]
|
||||
@ -78,7 +78,7 @@ They are the default if using LibAFL's `Launcher`.
|
||||
If you should want to use `LLMP` in its raw form, without any `LibAFL` abstractions, take a look at the `llmp_test` example in [./libafl/examples](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/examples/llmp_test/main.rs).
|
||||
You can run the example using `cargo run --example llmp_test` with the appropriate modes, as indicated by its help output.
|
||||
First, you will have to create a broker using `LlmpBroker::new()`.
|
||||
Then, create some `LlmpClient``s` in other threads and register them with the main thread using `LlmpBroker::register_client`.
|
||||
Then, create some `LlmpClient`s in other threads and register them with the main thread using `LlmpBroker::register_client`.
|
||||
Finally, call `LlmpBroker::loop_forever()`.
|
||||
|
||||
### B2B: Connecting Fuzzers via TCP
|
||||
|
@ -4,18 +4,18 @@ Multiple fuzzer instances can be spawned using different ways.
|
||||
|
||||
## Manually, via a TCP port
|
||||
|
||||
The straightforward way to do Multi-Threading is to use the `LlmpRestartingEventManager`, specifically to use `setup_restarting_mgr_std`.
|
||||
The straightforward way to do Multi-Threading is to use the [`LlmpRestartingEventManager`](https://docs.rs/libafl/latest/libafl/events/llmp/struct.LlmpRestartingEventManager.html), specifically to use [`setup_restarting_mgr_std`](https://docs.rs/libafl/latest/libafl/events/llmp/fn.setup_restarting_mgr_std.html).
|
||||
It abstracts away all the pesky details about restarts on crash handling (for in-memory fuzzers) and multi-threading.
|
||||
With it, every instance you launch manually tries to connect to a TCP port on the local machine.
|
||||
|
||||
If the port is not yet bound, this instance becomes the broker, itself binding to the port to await new clients.
|
||||
If the port is not yet bound, this instance becomes the broker, binding itself to the port to await new clients.
|
||||
|
||||
If the port is already bound, the EventManager will try to connect to it.
|
||||
The instance becomes a client and can now communicate with all other nodes.
|
||||
|
||||
Launching nodes manually has the benefit that you can have multiple nodes with different configurations, such as clients fuzzing with and without ASAN.
|
||||
Launching nodes manually has the benefit that you can have multiple nodes with different configurations, such as clients fuzzing with and without `ASan`.
|
||||
|
||||
While it's called "restarting" manager, it uses `fork` on Unix operating systems as optimization and only actually restarts from scratch on Windows.
|
||||
While it's called "restarting" manager, it uses `fork` on Unix-like operating systems as optimization and only actually restarts from scratch on Windows.
|
||||
|
||||
|
||||
## Automated, with Launcher
|
||||
@ -23,7 +23,7 @@ While it's called "restarting" manager, it uses `fork` on Unix operating systems
|
||||
The Launcher is the lazy way to do multiprocessing.
|
||||
You can use the Launcher builder to create a fuzzer that spawns multiple nodes with one click, all using restarting event managers and the same configuration.
|
||||
|
||||
To use launcher, first you need to write an anonymous function `let mut run_client = |state: Option<_>, mut mgr, _core_id|{}`, which uses three parameters to create individual fuzzer. Then you can specify the `shmem_provider`,`broker_port`,`monitor`,`cores` and other stuff through `Launcher::builder()`:
|
||||
To use launcher, first you need to write an anonymous function `let mut run_client = |state: Option<_>, mut mgr, _core_id|{}`, which uses three parameters to create an individual fuzzer. Then you can specify the `shmem_provider`,`broker_port`,`monitor`,`cores` and other stuff through `Launcher::builder()`:
|
||||
|
||||
```rust,ignore
|
||||
Launcher::builder()
|
||||
@ -42,13 +42,17 @@ To use launcher, first you need to write an anonymous function `let mut run_clie
|
||||
This first starts a broker, then spawns `n` clients, according to the value passed to `cores`.
|
||||
The value is a string indicating the cores to bind to, for example, `0,2,5` or `0-3`.
|
||||
For each client, `run_client` will be called.
|
||||
On Windows, the Launcher will restart each client, while on Unix, it will use `fork`.
|
||||
If the launcher uses `fork`, it will hide child output, unless the settings indicate otherwise, or the `LIBAFL_DEBUG_OUTPUT` env variable is set.
|
||||
On Windows, the Launcher will restart each client, while on Unix-alikes, it will use `fork`.
|
||||
|
||||
Advanced use-cases:
|
||||
|
||||
1. To connect multiple nodes together via TCP, you can use the `remote_broker_addr`. this requires the `llmp_bind_public` compile-time feature for `LibAFL`.
|
||||
2. To use multiple launchers for individual configurations, you can set `spawn_broker` to `false` on all but one.
|
||||
2. To use multiple launchers for individual configurations, you can set `spawn_broker` to `false` on all instances but one.
|
||||
3. Launcher will not select the cores automatically, so you need to specify the `cores` that you want.
|
||||
4. On `Unix`, you can chose between a forking and non-forking version of Launcher by setting the `fork` feature in LibAFL. Some targets may not like forking, but it is faster than restarting processes from scratch. Windows will never fork.
|
||||
5. For simple debugging, first set the `LIBAFL_DEBUG_OUTPUT` env variable to see if a child process printed anything.
|
||||
6. For further debugging of fuzzer failures, it may make sense to replace `Launcher` temporarily with a [`SimpleEventManager`](https://docs.rs/libafl/latest/libafl/events/simple/struct.SimpleEventManager.html#method.new) and call your harness fn (`run_client(None, mgr, 0);`) directly, so that fuzzing runs in the same thread and is easier to debug, before moving back to `Launcher` after the bugfix.
|
||||
|
||||
For more examples, you can check out `qemu_launcher` and `libfuzzer_libpng_launcher` in [`./fuzzers/`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers).
|
||||
|
||||
|
@ -2,4 +2,4 @@
|
||||
|
||||
In this chapter, we will build a custom fuzzer using the [Lain](https://github.com/microsoft/lain) mutator in Rust.
|
||||
|
||||
This tutorial will introduce you in writing extensions to LibAFL like Feedbacks and Testcase's metadata.
|
||||
This tutorial will introduce you to writing extensions to LibAFL like Feedbacks and Testcase's metadata.
|
||||
|
4
fuzzers/FRET/.gitignore
vendored
4
fuzzers/FRET/.gitignore
vendored
@ -1,4 +0,0 @@
|
||||
*.qcow2
|
||||
corpus
|
||||
*.axf
|
||||
demo
|
@ -1,41 +0,0 @@
|
||||
[package]
|
||||
name = "fret"
|
||||
version = "0.8.2"
|
||||
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
default = ["std", "snapshot_restore", "singlecore", "restarting", "feed_systemtrace", "fuzz_int" ]
|
||||
std = []
|
||||
snapshot_restore = []
|
||||
snapshot_fast = [ "snapshot_restore" ]
|
||||
singlecore = []
|
||||
restarting = ['singlecore']
|
||||
trace_abbs = []
|
||||
systemstate = []
|
||||
feed_systemgraph = [ "systemstate" ]
|
||||
feed_systemtrace = [ "systemstate" ]
|
||||
feed_longest = [ ]
|
||||
feed_afl = [ ]
|
||||
feed_genetic = [ ]
|
||||
fuzz_int = [ ]
|
||||
gensize_1 = [ ]
|
||||
gensize_10 = [ ]
|
||||
gensize_100 = [ ]
|
||||
observer_hitcounts = []
|
||||
no_hash_state = []
|
||||
run_until_saturation = []
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
debug = true
|
||||
|
||||
[dependencies]
|
||||
libafl = { path = "../../libafl/" }
|
||||
libafl_qemu = { path = "../../libafl_qemu/", features = ["arm", "systemmode"] }
|
||||
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
|
||||
hashbrown = { version = "0.12", features = ["serde", "ahash-compile-time-rng"] } # A faster hashmap, nostd compatible
|
||||
petgraph = { version="0.6.0", features = ["serde-1"] }
|
||||
ron = "0.7" # write serialized data - including hashmaps
|
||||
rand = "0.5"
|
@ -1,26 +0,0 @@
|
||||
# Qemu systemmode with launcher
|
||||
|
||||
This folder contains an example fuzzer for the qemu systemmode, using LLMP for fast multi-process fuzzing and crash detection.
|
||||
|
||||
## Build
|
||||
|
||||
To build this example, run
|
||||
|
||||
```bash
|
||||
cargo build --release
|
||||
cd example; sh build.sh; cd ..
|
||||
```
|
||||
|
||||
This will build the the fuzzer (src/fuzzer.rs) and a small example binary based on FreeRTOS, which can run under a qemu emulation target.
|
||||
|
||||
## Run
|
||||
|
||||
Since the instrumentation is based on snapshtos QEMU needs a virtual drive (even if it is unused...).
|
||||
Create on and then run the fuzzer:
|
||||
```bash
|
||||
# create an image
|
||||
qemu-img create -f qcow2 dummy.qcow2 32M
|
||||
# run the fuzzer
|
||||
KERNEL=./example/example.elf target/release/qemu_systemmode -icount shift=auto,align=off,sleep=off -machine mps2-an385 -monitor null -kernel ./example/example.elf -serial null -nographic -snapshot -drive if=none,format=qcow2,file=dummy.qcow2 -S
|
||||
```
|
||||
Currently the ``KERNEL`` variable is needed because the fuzzer does not parse QEMUs arguments to find the binary.
|
12
fuzzers/FRET/benchmark/.gitignore
vendored
12
fuzzers/FRET/benchmark/.gitignore
vendored
@ -1,12 +0,0 @@
|
||||
*dump
|
||||
timedump*
|
||||
corpora
|
||||
build
|
||||
mnt
|
||||
.R*
|
||||
*.png
|
||||
*.pdf
|
||||
bins
|
||||
.snakemake
|
||||
*.zip
|
||||
*.tar.*
|
@ -1,57 +0,0 @@
|
||||
TIME=7200
|
||||
|
||||
corpora/%/seed:
|
||||
mkdir -p $$(dirname $@)
|
||||
LINE=$$(grep "^$$(basename $*)" target_symbols.csv); \
|
||||
export \
|
||||
KERNEL=benchmark/build/$*.elf \
|
||||
FUZZ_MAIN=$$(echo $$LINE | cut -d, -f2) \
|
||||
FUZZ_INPUT=$$(echo $$LINE | cut -d, -f3) \
|
||||
FUZZ_INPUT_LEN=$$(echo $$LINE | cut -d, -f4) \
|
||||
BREAKPOINT=$$(echo $$LINE | cut -d, -f5) \
|
||||
SEED_DIR=benchmark/corpora/$* \
|
||||
DUMP_SEED=seed; \
|
||||
../fuzzer.sh
|
||||
|
||||
timedump/%$(FUZZ_RANDOM)$(SUFFIX): corpora/%/seed
|
||||
mkdir -p $$(dirname $@)
|
||||
LINE=$$(grep "^$$(basename $*)" target_symbols.csv); \
|
||||
export \
|
||||
KERNEL=benchmark/build/$*.elf \
|
||||
FUZZ_MAIN=$$(echo $$LINE | cut -d, -f2) \
|
||||
FUZZ_INPUT=$$(echo $$LINE | cut -d, -f3) \
|
||||
FUZZ_INPUT_LEN=$$(echo $$LINE | cut -d, -f4) \
|
||||
BREAKPOINT=$$(echo $$LINE | cut -d, -f5) \
|
||||
SEED_RANDOM=1 \
|
||||
TIME_DUMP=benchmark/$@ \
|
||||
CASE_DUMP=benchmark/$@; \
|
||||
../fuzzer.sh + + + + + $(TIME) + + + > $@_log
|
||||
#SEED_DIR=benchmark/corpora/$*
|
||||
|
||||
all_sequential: timedump/sequential/mpeg2$(FUZZ_RANDOM) timedump/sequential/dijkstra$(FUZZ_RANDOM) timedump/sequential/epic$(FUZZ_RANDOM) \
|
||||
timedump/sequential/g723_enc$(FUZZ_RANDOM) timedump/sequential/audiobeam$(FUZZ_RANDOM) \
|
||||
timedump/sequential/gsm_enc$(FUZZ_RANDOM)
|
||||
|
||||
all_kernel: timedump/kernel/bsort$(FUZZ_RANDOM) timedump/kernel/insertsort$(FUZZ_RANDOM) #timedump/kernel/fft$(FUZZ_RANDOM)
|
||||
|
||||
all_app: timedump/app/lift$(FUZZ_RANDOM)
|
||||
|
||||
all_system: timedump/lift$(FUZZ_RANDOM)$(SUFFIX)
|
||||
|
||||
all_period: timedump/waters$(FUZZ_RANDOM)$(SUFFIX)
|
||||
|
||||
tacle_rtos: timedump/tacle_rtos$(FUZZ_RANDOM)
|
||||
|
||||
graphics:
|
||||
Rscript --vanilla plot_comparison.r mnt/timedump/sequential audiobeam
|
||||
Rscript --vanilla plot_comparison.r mnt/timedump/sequential dijkstra
|
||||
Rscript --vanilla plot_comparison.r mnt/timedump/sequential epic
|
||||
Rscript --vanilla plot_comparison.r mnt/timedump/sequential g723_enc
|
||||
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential gsm_enc
|
||||
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential huff_dec
|
||||
Rscript --vanilla plot_comparison.r mnt/timedump/sequential mpeg2
|
||||
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential rijndael_dec
|
||||
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential rijndael_enc
|
||||
|
||||
clean:
|
||||
rm -rf corpora timedump
|
@ -1,281 +0,0 @@
|
||||
import csv
|
||||
import os
|
||||
def_flags="--no-default-features --features std,snapshot_restore,singlecore,restarting,run_until_saturation"
|
||||
remote="timedump_253048_1873f6_all/"
|
||||
RUNTIME=10
|
||||
TARGET_REPS_A=2
|
||||
TARGET_REPS_B=2
|
||||
NUM_NODES=2
|
||||
REP_PER_NODE_A=int(TARGET_REPS_A/NUM_NODES)
|
||||
REP_PER_NODE_B=int(TARGET_REPS_B/NUM_NODES)
|
||||
NODE_ID= 0 if os.getenv('NODE_ID') == None else int(os.environ['NODE_ID'])
|
||||
MY_RANGE_A=range(NODE_ID*REP_PER_NODE_A,(NODE_ID+1)*REP_PER_NODE_A)
|
||||
MY_RANGE_B=range(NODE_ID*REP_PER_NODE_B,(NODE_ID+1)*REP_PER_NODE_B)
|
||||
|
||||
rule build_showmap:
|
||||
output:
|
||||
directory("bins/target_showmap")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},systemstate"
|
||||
|
||||
rule build_random:
|
||||
output:
|
||||
directory("bins/target_random")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_longest"
|
||||
|
||||
rule build_feedlongest:
|
||||
output:
|
||||
directory("bins/target_feedlongest")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_longest"
|
||||
|
||||
rule build_frafl:
|
||||
output:
|
||||
directory("bins/target_frafl")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_afl,feed_longest"
|
||||
|
||||
rule build_afl:
|
||||
output:
|
||||
directory("bins/target_afl")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_afl,observer_hitcounts"
|
||||
|
||||
rule build_state:
|
||||
output:
|
||||
directory("bins/target_state")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_systemtrace"
|
||||
|
||||
rule build_nohashstate:
|
||||
output:
|
||||
directory("bins/target_nohashstate")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_systemtrace,no_hash_state"
|
||||
|
||||
rule build_graph:
|
||||
output:
|
||||
directory("bins/target_graph")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_systemgraph"
|
||||
|
||||
rule build_showmap_int:
|
||||
output:
|
||||
directory("bins/target_showmap_int")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},systemstate,fuzz_int"
|
||||
|
||||
rule build_random_int:
|
||||
output:
|
||||
directory("bins/target_random_int")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_longest,fuzz_int"
|
||||
|
||||
rule build_state_int:
|
||||
output:
|
||||
directory("bins/target_state_int")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_systemtrace,fuzz_int"
|
||||
|
||||
rule build_nohashstate_int:
|
||||
output:
|
||||
directory("bins/target_nohashstate_int")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_systemtrace,fuzz_int,no_hash_state"
|
||||
|
||||
rule build_frafl_int:
|
||||
output:
|
||||
directory("bins/target_frafl_int")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_afl,feed_longest,fuzz_int"
|
||||
|
||||
rule build_afl_int:
|
||||
output:
|
||||
directory("bins/target_afl_int")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_afl,fuzz_int,observer_hitcounts"
|
||||
|
||||
rule build_feedlongest_int:
|
||||
output:
|
||||
directory("bins/target_feedlongest_int")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_longest,fuzz_int"
|
||||
|
||||
rule build_feedgeneration1:
|
||||
output:
|
||||
directory("bins/target_feedgeneration1")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_genetic,gensize_1"
|
||||
|
||||
rule build_feedgeneration1_int:
|
||||
output:
|
||||
directory("bins/target_feedgeneration1_int")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_1"
|
||||
|
||||
rule build_feedgeneration10:
|
||||
output:
|
||||
directory("bins/target_feedgeneration10")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_genetic,gensize_10"
|
||||
|
||||
rule build_feedgeneration10_int:
|
||||
output:
|
||||
directory("bins/target_feedgeneration10_int")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_10"
|
||||
|
||||
rule build_feedgeneration100:
|
||||
output:
|
||||
directory("bins/target_feedgeneration100")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_genetic,gensize_100"
|
||||
|
||||
rule build_feedgeneration100_int:
|
||||
output:
|
||||
directory("bins/target_feedgeneration100_int")
|
||||
shell:
|
||||
"cargo build --target-dir {output} {def_flags},feed_genetic,fuzz_int,gensize_100"
|
||||
|
||||
rule run_bench:
|
||||
input:
|
||||
"build/{target}.elf",
|
||||
"bins/target_{fuzzer}"
|
||||
output:
|
||||
multiext("timedump/{fuzzer}/{target}.{num}", "", ".log") # , ".case"
|
||||
run:
|
||||
with open('target_symbols.csv') as csvfile:
|
||||
reader = csv.DictReader(csvfile)
|
||||
line = next((x for x in reader if x['kernel']==wildcards.target), None)
|
||||
if line == None:
|
||||
return False
|
||||
kernel=line['kernel']
|
||||
fuzz_main=line['main_function']
|
||||
fuzz_input=line['input_symbol']
|
||||
fuzz_len=line['input_size']
|
||||
bkp=line['return_function']
|
||||
script="""
|
||||
mkdir -p $(dirname {output[0]})
|
||||
export KERNEL=$(pwd)/{input[0]}
|
||||
export FUZZ_MAIN={fuzz_main}
|
||||
export FUZZ_INPUT={fuzz_input}
|
||||
export FUZZ_INPUT_LEN={fuzz_len}
|
||||
export BREAKPOINT={bkp}
|
||||
export SEED_RANDOM={wildcards.num}
|
||||
export TIME_DUMP=$(pwd)/{output[0]}
|
||||
export CASE_DUMP=$(pwd)/{output[0]}.case
|
||||
export TRACE_DUMP=$(pwd)/{output[0]}.trace
|
||||
export FUZZ_ITERS={RUNTIME}
|
||||
export FUZZER=$(pwd)/{input[1]}/debug/fret
|
||||
set +e
|
||||
../fuzzer.sh > {output[1]} 2>&1
|
||||
exit 0
|
||||
"""
|
||||
if wildcards.fuzzer.find('random') >= 0:
|
||||
script="export FUZZ_RANDOM={output[1]}\n"+script
|
||||
shell(script)
|
||||
|
||||
rule run_showmap:
|
||||
input:
|
||||
"{remote}build/{target}.elf",
|
||||
"bins/target_showmap",
|
||||
"bins/target_showmap_int",
|
||||
"{remote}timedump/{fuzzer}/{target}.{num}.case"
|
||||
output:
|
||||
"{remote}timedump/{fuzzer}/{target}.{num}.trace.ron",
|
||||
"{remote}timedump/{fuzzer}/{target}.{num}.case.time",
|
||||
run:
|
||||
with open('target_symbols.csv') as csvfile:
|
||||
reader = csv.DictReader(csvfile)
|
||||
line = next((x for x in reader if x['kernel']==wildcards.target), None)
|
||||
if line == None:
|
||||
return False
|
||||
kernel=line['kernel']
|
||||
fuzz_main=line['main_function']
|
||||
fuzz_input=line['input_symbol']
|
||||
fuzz_len=line['input_size']
|
||||
bkp=line['return_function']
|
||||
script=""
|
||||
if wildcards.fuzzer.find('_int') > -1:
|
||||
script="export FUZZER=$(pwd)/{input[2]}/debug/fret\n"
|
||||
else:
|
||||
script="export FUZZER=$(pwd)/{input[1]}/debug/fret\n"
|
||||
script+="""
|
||||
mkdir -p $(dirname {output})
|
||||
export KERNEL=$(pwd)/{input[0]}
|
||||
export FUZZ_MAIN={fuzz_main}
|
||||
export FUZZ_INPUT={fuzz_input}
|
||||
export FUZZ_INPUT_LEN={fuzz_len}
|
||||
export BREAKPOINT={bkp}
|
||||
export TRACE_DUMP=$(pwd)/{output[0]}
|
||||
export DO_SHOWMAP=$(pwd)/{input[3]}
|
||||
export TIME_DUMP=$(pwd)/{output[1]}
|
||||
set +e
|
||||
../fuzzer.sh
|
||||
exit 0
|
||||
"""
|
||||
if wildcards.fuzzer.find('random') >= 0:
|
||||
script="export FUZZ_RANDOM=1\n"+script
|
||||
shell(script)
|
||||
|
||||
rule tarnsform_trace:
|
||||
input:
|
||||
"{remote}timedump/{fuzzer}/{target}.{num}.trace.ron"
|
||||
output:
|
||||
"{remote}timedump/{fuzzer}/{target}.{num}.trace.csv"
|
||||
shell:
|
||||
"$(pwd)/../../../../state2gantt/target/debug/state2gantt {input} > {output[0]}"
|
||||
|
||||
rule trace2gantt:
|
||||
input:
|
||||
"{remote}timedump/{fuzzer}/{target}.{num}.trace.csv"
|
||||
output:
|
||||
"{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png"
|
||||
shell:
|
||||
"Rscript --vanilla $(pwd)/../../../../state2gantt/gantt.R {input}"
|
||||
|
||||
rule all_main:
|
||||
input:
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random','afl','feedgeneration10','state'], target=['waters','watersv2'],num=range(0,3))
|
||||
|
||||
rule all_main_int:
|
||||
input:
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random_int','afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=range(0,4))
|
||||
|
||||
rule all_compare_feedgeneration:
|
||||
input:
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1','feedgeneration10','feedgeneration100'], target=['waters_int','watersv2'],num=range(0,10))
|
||||
|
||||
rule all_compare_feedgeneration_int:
|
||||
input:
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1_int','feedgeneration10_int','feedgeneration100_int'], target=['waters_int','watersv2_int'],num=range(0,10))
|
||||
|
||||
rule all_compare_afl:
|
||||
input:
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl','frafl','feedlongest'], target=['waters','watersv2'],num=range(0,10))
|
||||
|
||||
rule all_compare_afl_int:
|
||||
input:
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl_int','frafl_int','feedlongest_int'], target=['waters_int','watersv2_int'],num=range(0,10))
|
||||
|
||||
rule all_images:
|
||||
input:
|
||||
expand("{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png",remote=remote, fuzzer=['afl','feedgeneration10','state'], target=['waters','watersv2'],num=range(0,3))
|
||||
|
||||
rule all_images_int:
|
||||
input:
|
||||
expand("{remote}timedump/{fuzzer}/{target}.{num}.trace.csv.png",remote=remote, fuzzer=['afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=range(0,3))
|
||||
|
||||
rule clusterfuzz:
|
||||
input:
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random','afl','feedgeneration10','state'], target=['waters','watersv2'],num=MY_RANGE_A),
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['random_int','afl_int','feedgeneration10_int','state_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_A),
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1','feedgeneration10','feedgeneration100'], target=['waters_int','watersv2'],num=MY_RANGE_B),
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['feedgeneration1_int','feedgeneration10_int','feedgeneration100_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_B),
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl','frafl','feedlongest'], target=['waters','watersv2'],num=MY_RANGE_B),
|
||||
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl_int','frafl_int','feedlongest_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_B),
|
||||
|
||||
rule all_bins:
|
||||
input:
|
||||
expand("bins/target_{target}{flag}",target=['random','afl','frafl','state','feedgeneration100'],flag=['','_int'])
|
@ -1,83 +0,0 @@
|
||||
library("mosaic")
|
||||
args = commandArgs(trailingOnly=TRUE)
|
||||
|
||||
#myolors=c("#339933","#0066ff","#993300") # grün, balu, rot
|
||||
myolors=c("dark green","dark blue","dark red", "yellow") # grün, balu, rot
|
||||
|
||||
if (length(args)==0) {
|
||||
runtype="timedump"
|
||||
target="waters"
|
||||
filename_1=sprintf("%s.png",target)
|
||||
filename_2=sprintf("%s_maxline.png",target)
|
||||
filename_3=sprintf("%s_hist.png",target)
|
||||
} else {
|
||||
runtype=args[1]
|
||||
target=args[2]
|
||||
filename_1=sprintf("%s.png",args[2])
|
||||
filename_2=sprintf("%s_maxline.png",args[2])
|
||||
filename_3=sprintf("%s_hist.png",args[2])
|
||||
# filename_1=args[3]
|
||||
}
|
||||
|
||||
file_1=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_state",runtype,target)
|
||||
file_2=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_afl",runtype,target)
|
||||
file_3=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_random",runtype,target)
|
||||
file_4=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_graph",runtype,target)
|
||||
timetrace <- read.table(file_1, quote="\"", comment.char="")
|
||||
timetrace_afl <- read.table(file_2, quote="\"", comment.char="")
|
||||
timetrace_rand <- read.table(file_3, quote="\"", comment.char="")
|
||||
timetrace_graph <- read.table(file_4, quote="\"", comment.char="")
|
||||
timetrace[[2]]=seq_len(length(timetrace[[1]]))
|
||||
timetrace_afl[[2]]=seq_len(length(timetrace_afl[[1]]))
|
||||
timetrace_rand[[2]]=seq_len(length(timetrace_rand[[1]]))
|
||||
timetrace_graph[[2]]=seq_len(length(timetrace_graph[[1]]))
|
||||
names(timetrace)[1] <- "timetrace"
|
||||
names(timetrace)[2] <- "iter"
|
||||
names(timetrace_afl)[1] <- "timetrace"
|
||||
names(timetrace_afl)[2] <- "iter"
|
||||
names(timetrace_rand)[1] <- "timetrace"
|
||||
names(timetrace_rand)[2] <- "iter"
|
||||
names(timetrace_graph)[1] <- "timetrace"
|
||||
names(timetrace_graph)[2] <- "iter"
|
||||
|
||||
png(file=filename_1)
|
||||
# pdf(file=filename_1,width=8, height=8)
|
||||
plot(timetrace[[2]],timetrace[[1]], col=myolors[1], xlab="iters", ylab="wcet", pch='.')
|
||||
points(timetrace_afl[[2]],timetrace_afl[[1]], col=myolors[2], pch='.')
|
||||
points(timetrace_rand[[2]],timetrace_rand[[1]], col=myolors[3], pch='.')
|
||||
points(timetrace_graph[[2]],timetrace_graph[[1]], col=myolors[4], pch='.')
|
||||
abline(lm(timetrace ~ iter, data=timetrace),col=myolors[1])
|
||||
abline(lm(timetrace ~ iter, data=timetrace_afl),col=myolors[2])
|
||||
abline(lm(timetrace ~ iter, data=timetrace_rand),col=myolors[3])
|
||||
dev.off()
|
||||
|
||||
png(file=filename_3)
|
||||
gf_histogram(~ timetrace,data=timetrace, fill=myolors[1]) %>%
|
||||
gf_histogram(~ timetrace,data=timetrace_afl, fill=myolors[2]) %>%
|
||||
gf_histogram(~ timetrace,data=timetrace_rand, fill=myolors[3]) %>%
|
||||
gf_histogram(~ timetrace,data=timetrace_graph, fill=myolors[4])
|
||||
dev.off()
|
||||
|
||||
# Takes a flat list
|
||||
trace2maxline <- function(tr) {
|
||||
maxline = tr
|
||||
for (var in seq_len(length(maxline))[2:length(maxline)]) {
|
||||
maxline[var] = max(maxline[var],maxline[var-1])
|
||||
}
|
||||
#plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET")
|
||||
return(maxline)
|
||||
}
|
||||
timetrace[[1]] <- trace2maxline(timetrace[[1]])
|
||||
timetrace_afl[[1]] <- trace2maxline(timetrace_afl[[1]])
|
||||
timetrace_rand[[1]] <- trace2maxline(timetrace_rand[[1]])
|
||||
timetrace_graph[[1]] <- trace2maxline(timetrace_graph[[1]])
|
||||
|
||||
png(file=filename_2)
|
||||
plot(timetrace[[2]],timetrace[[1]], col=myolors[1], xlab="iters", ylab="wcet", pch='.')
|
||||
points(timetrace_afl[[2]],timetrace_afl[[1]], col=myolors[2], pch='.')
|
||||
points(timetrace_rand[[2]],timetrace_rand[[1]], col=myolors[3], pch='.')
|
||||
points(timetrace_graph[[2]],timetrace_graph[[1]], col=myolors[4], pch='.')
|
||||
#abline(lm(timetrace ~ iter, data=timetrace),col=myolors[1])
|
||||
#abline(lm(timetrace ~ iter, data=timetrace_afl),col=myolors[2])
|
||||
#abline(lm(timetrace ~ iter, data=timetrace_rand),col=myolors[3])
|
||||
dev.off()
|
@ -1,327 +0,0 @@
|
||||
library("mosaic")
|
||||
library("dplyr")
|
||||
library("foreach")
|
||||
library("doParallel")
|
||||
|
||||
#setup parallel backend to use many processors
|
||||
cores=detectCores()
|
||||
cl <- makeCluster(cores[1]-1) #not to overload your computer
|
||||
registerDoParallel(cl)
|
||||
|
||||
args = commandArgs(trailingOnly=TRUE)
|
||||
|
||||
if (length(args)==0) {
|
||||
runtype="timedump_253048_1873f6_all/timedump"
|
||||
target="waters_int"
|
||||
outputpath="~/code/FRET/LibAFL/fuzzers/FRET/benchmark/"
|
||||
#MY_SELECTION <- c('state', 'afl', 'graph', 'random')
|
||||
SAVE_FILE=TRUE
|
||||
} else {
|
||||
runtype=args[1]
|
||||
target=args[2]
|
||||
outputpath=args[3]
|
||||
MY_SELECTION <- args[4:length(args)]
|
||||
SAVE_FILE=TRUE
|
||||
}
|
||||
worst_cases <- list(waters=0, waters_int=0, tmr=405669, micro_longint=0)
|
||||
worst_case <- worst_cases[[target]]
|
||||
if (is.null(worst_case)) {
|
||||
worst_case = 0
|
||||
}
|
||||
|
||||
#MY_COLORS=c("green","blue","red", "orange", "pink", "black")
|
||||
MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
|
||||
BENCHDIR=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s",runtype)
|
||||
BASENAMES=Filter(function(x) x!="" && substr(x,1,1)!='.',list.dirs(BENCHDIR,full.names=FALSE))
|
||||
PATTERNS="%s.[0-9]*$"
|
||||
#RIBBON='sd'
|
||||
#RIBBON='span'
|
||||
RIBBON='both'
|
||||
DRAW_WC = worst_case > 0
|
||||
LEGEND_POS="topright"
|
||||
#LEGEND_POS="bottomright"
|
||||
CONTINUE_LINE_TO_END=FALSE
|
||||
|
||||
# https://www.r-bloggers.com/2013/04/how-to-change-the-alpha-value-of-colours-in-r/
|
||||
alpha <- function(col, alpha=1){
|
||||
if(missing(col))
|
||||
stop("Please provide a vector of colours.")
|
||||
apply(sapply(col, col2rgb)/255, 2,
|
||||
function(x)
|
||||
rgb(x[1], x[2], x[3], alpha=alpha))
|
||||
}
|
||||
|
||||
# Trimm a list of data frames to common length
|
||||
trim_data <- function(input,len=NULL) {
|
||||
if (is.null(len)) {
|
||||
len <- min(sapply(input, function(v) dim(v)[1]))
|
||||
}
|
||||
return(lapply(input, function(d) slice_head(d,n=len)))
|
||||
}
|
||||
|
||||
length_of_data <- function(input) {
|
||||
min(sapply(input, function(v) dim(v)[1]))
|
||||
}
|
||||
|
||||
# Takes a flat list
|
||||
trace2maxline <- function(tr) {
|
||||
maxline = tr
|
||||
for (var in seq_len(length(maxline))[2:length(maxline)]) {
|
||||
#if (maxline[var]>1000000000) {
|
||||
# maxline[var]=maxline[var-1]
|
||||
#} else {
|
||||
maxline[var] = max(maxline[var],maxline[var-1])
|
||||
#}
|
||||
}
|
||||
#plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET")
|
||||
return(maxline)
|
||||
}
|
||||
|
||||
# Take a list of data frames, output same form but maxlines
|
||||
data2maxlines <- function(tr) {
|
||||
min_length <- min(sapply(tr, function(v) dim(v)[1]))
|
||||
maxline <- tr
|
||||
for (var in seq_len(length(tr))) {
|
||||
maxline[[var]][[1]]=trace2maxline(tr[[var]][[1]])
|
||||
}
|
||||
return(maxline)
|
||||
}
|
||||
# Take a multi-column data frame, output same form but maxlines
|
||||
frame2maxlines <- function(tr) {
|
||||
for (var in seq_len(length(tr))) {
|
||||
tr[[var]]=trace2maxline(tr[[var]])
|
||||
}
|
||||
return(tr)
|
||||
}
|
||||
|
||||
trace2maxpoints <- function(tr) {
|
||||
minval = tr[1,1]
|
||||
collect = tr[1,]
|
||||
for (i in seq_len(dim(tr)[1])) {
|
||||
if (minval < tr[i,1]) {
|
||||
collect = rbind(collect,tr[i,])
|
||||
minval = tr[i,1]
|
||||
}
|
||||
}
|
||||
tmp = tr[dim(tr)[1],]
|
||||
tmp[1] = minval[1]
|
||||
collect = rbind(collect,tmp)
|
||||
return(collect)
|
||||
}
|
||||
|
||||
sample_maxpoints <- function(tr,po) {
|
||||
index = 1
|
||||
collect=NULL
|
||||
endpoint = dim(tr)[1]
|
||||
for (p in po) {
|
||||
if (p<=tr[1,2]) {
|
||||
tmp = tr[index,]
|
||||
tmp[2] = p
|
||||
collect = rbind(collect, tmp)
|
||||
} else if (p>=tr[endpoint,2]) {
|
||||
tmp = tr[endpoint,]
|
||||
tmp[2] = p
|
||||
collect = rbind(collect, tmp)
|
||||
} else {
|
||||
for (i in seq(index,endpoint)-1) {
|
||||
if (p >= tr[i,2] && p<tr[i+1,2]) {
|
||||
tmp = tr[i,]
|
||||
tmp[2] = p
|
||||
collect = rbind(collect, tmp)
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return(collect)
|
||||
}
|
||||
|
||||
#https://www.r-bloggers.com/2012/01/parallel-r-loops-for-windows-and-linux/
|
||||
all_runtypetables <- foreach (bn=BASENAMES) %do% {
|
||||
runtypefiles <- list.files(file.path(BENCHDIR,bn),pattern=sprintf(PATTERNS,target),full.names = TRUE)
|
||||
if (length(runtypefiles) > 0) {
|
||||
runtypetables_reduced <- foreach(i=seq_len(length(runtypefiles))) %dopar% {
|
||||
rtable = read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i)))
|
||||
trace2maxpoints(rtable)
|
||||
}
|
||||
#runtypetables <- lapply(seq_len(length(runtypefiles)),
|
||||
# function(i)read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i))))
|
||||
#runtypetables_reduced <- lapply(runtypetables, trace2maxpoints)
|
||||
runtypetables_reduced
|
||||
#all_runtypetables = c(all_runtypetables, list(runtypetables_reduced))
|
||||
}
|
||||
}
|
||||
all_runtypetables = all_runtypetables[lapply(all_runtypetables, length) > 0]
|
||||
all_min_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% {
|
||||
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
|
||||
ret = data.frame(min(unlist(lapply(rtt, function(v) v[dim(v)[1],2]))))
|
||||
names(ret)[1] = bn
|
||||
ret/(3600 * 1000)
|
||||
}
|
||||
all_max_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% {
|
||||
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
|
||||
ret = data.frame(max(unlist(lapply(rtt, function(v) v[dim(v)[1],2]))))
|
||||
names(ret)[1] = bn
|
||||
ret/(3600 * 1000)
|
||||
}
|
||||
all_points = sort(unique(Reduce(c, lapply(all_runtypetables, function(v) Reduce(c, lapply(v, function(w) w[[2]]))))))
|
||||
all_maxlines <- foreach (rtt=all_runtypetables) %do% {
|
||||
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
|
||||
runtypetables_sampled = foreach(v=rtt) %dopar% {
|
||||
sample_maxpoints(v, all_points)[1]
|
||||
}
|
||||
#runtypetables_sampled = lapply(rtt, function(v) sample_maxpoints(v, all_points)[1])
|
||||
tmp_frame <- Reduce(cbind, runtypetables_sampled)
|
||||
statframe <- data.frame(rowMeans(tmp_frame),apply(tmp_frame, 1, sd),apply(tmp_frame, 1, min),apply(tmp_frame, 1, max), apply(tmp_frame, 1, median))
|
||||
names(statframe) <- c(bn, sprintf("%s_sd",bn), sprintf("%s_min",bn), sprintf("%s_max",bn), sprintf("%s_med",bn))
|
||||
#statframe[sprintf("%s_times",bn)] = all_points
|
||||
round(statframe)
|
||||
#all_maxlines = c(all_maxlines, list(round(statframe)))
|
||||
}
|
||||
one_frame<-data.frame(all_maxlines)
|
||||
one_frame[length(one_frame)+1] <- all_points/(3600 * 1000)
|
||||
names(one_frame)[length(one_frame)] <- 'time'
|
||||
|
||||
typenames = names(one_frame)[which(names(one_frame) != 'time')]
|
||||
typenames = typenames[which(!endsWith(typenames, "_sd"))]
|
||||
typenames = typenames[which(!endsWith(typenames, "_med"))]
|
||||
ylow=min(one_frame[typenames])
|
||||
yhigh=max(one_frame[typenames],worst_case)
|
||||
typenames = typenames[which(!endsWith(typenames, "_min"))]
|
||||
typenames = typenames[which(!endsWith(typenames, "_max"))]
|
||||
|
||||
ml2lines <- function(ml,lim) {
|
||||
lines = NULL
|
||||
last = 0
|
||||
for (i in seq_len(dim(ml)[1])) {
|
||||
if (!CONTINUE_LINE_TO_END && lim<ml[i,2]) {
|
||||
break
|
||||
}
|
||||
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
|
||||
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
|
||||
last = ml[i,2]
|
||||
}
|
||||
return(lines)
|
||||
}
|
||||
|
||||
plotting <- function(selection, filename, MY_COLORS_) {
|
||||
# filter out names of iters and sd cols
|
||||
typenames = names(one_frame)[which(names(one_frame) != 'times')]
|
||||
typenames = typenames[which(!endsWith(typenames, "_sd"))]
|
||||
typenames = typenames[which(!endsWith(typenames, "_med"))]
|
||||
typenames = typenames[which(!endsWith(typenames, "_min"))]
|
||||
typenames = typenames[which(!endsWith(typenames, "_max"))]
|
||||
typenames = selection[which(selection %in% typenames)]
|
||||
if (length(typenames) == 0) {return()}
|
||||
|
||||
h_ = 500
|
||||
w_ = h_*4/3
|
||||
|
||||
if (SAVE_FILE) {png(file=sprintf("%s%s_%s.png",outputpath,target,filename), width=w_, height=h_)}
|
||||
par(mar=c(4,4,1,1))
|
||||
par(oma=c(0,0,0,0))
|
||||
|
||||
plot(c(1,max(one_frame['time'])),c(ylow,yhigh), col='white', xlab="Time [h]", ylab="WORT [insn]", pch='.')
|
||||
|
||||
for (t in seq_len(length(typenames))) {
|
||||
#proj = one_frame[seq(1, dim(one_frame)[1], by=max(1, length(one_frame[[1]])/(10*w_))),]
|
||||
#points(proj[c('iters',typenames[t])], col=MY_COLORS_[t], pch='.')
|
||||
avglines = ml2lines(one_frame[c(typenames[t],'time')],all_max_points[typenames[t]])
|
||||
#lines(avglines, col=MY_COLORS_[t])
|
||||
medlines = ml2lines(one_frame[c(sprintf("%s_med",typenames[t]),'time')],all_max_points[typenames[t]])
|
||||
lines(medlines, col=MY_COLORS_[t], lty='solid')
|
||||
milines = NULL
|
||||
malines = NULL
|
||||
milines = ml2lines(one_frame[c(sprintf("%s_min",typenames[t]),'time')],all_max_points[typenames[t]])
|
||||
malines = ml2lines(one_frame[c(sprintf("%s_max",typenames[t]),'time')],all_max_points[typenames[t]])
|
||||
if (exists("RIBBON") && ( RIBBON=='max' )) {
|
||||
#lines(milines, col=MY_COLORS_[t], lty='dashed')
|
||||
lines(malines, col=MY_COLORS_[t], lty='dashed')
|
||||
#points(proj[c('iters',sprintf("%s_min",typenames[t]))], col=MY_COLORS_[t], pch='.')
|
||||
#points(proj[c('iters',sprintf("%s_max",typenames[t]))], col=MY_COLORS_[t], pch='.')
|
||||
}
|
||||
if (exists("RIBBON") && RIBBON != '') {
|
||||
for (i in seq_len(dim(avglines)[1]-1)) {
|
||||
if (RIBBON=='both') {
|
||||
# draw boxes
|
||||
x_l <- milines[i,][['X']]
|
||||
x_r <- milines[i+1,][['X']]
|
||||
y_l <- milines[i,][['Y']]
|
||||
y_h <- malines[i,][['Y']]
|
||||
rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
|
||||
}
|
||||
if (FALSE && RIBBON=='span') {
|
||||
# draw boxes
|
||||
x_l <- milines[i,][['X']]
|
||||
x_r <- milines[i+1,][['X']]
|
||||
y_l <- milines[i,][['Y']]
|
||||
y_h <- malines[i,][['Y']]
|
||||
rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
|
||||
}
|
||||
#if (FALSE && RIBBON=='both' || RIBBON=='sd') {
|
||||
# # draw sd
|
||||
# x_l <- avglines[i,][['X']]
|
||||
# x_r <- avglines[i+1,][['X']]
|
||||
# y_l <- avglines[i,][['Y']]-one_frame[ceiling(i/2),][[sprintf("%s_sd",typenames[t])]]
|
||||
# y_h <- avglines[i,][['Y']]+one_frame[ceiling(i/2),][[sprintf("%s_sd",typenames[t])]]
|
||||
# if (x_r != x_l) {
|
||||
# rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
|
||||
# }
|
||||
#}
|
||||
#sd_ <- row[sprintf("%s_sd",typenames[t])][[1]]
|
||||
#min_ <- row[sprintf("%s_min",typenames[t])][[1]]
|
||||
#max_ <- row[sprintf("%s_max",typenames[t])][[1]]
|
||||
#if (exists("RIBBON")) {
|
||||
# switch (RIBBON,
|
||||
# 'sd' = arrows(x_, y_-sd_, x_, y_+sd_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.03)),
|
||||
# 'both' = arrows(x_, y_-sd_, x_, y_+sd_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.05)),
|
||||
# 'span' = #arrows(x_, min_, x_, max_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.03))
|
||||
# )
|
||||
#}
|
||||
##arrows(x_, y_-sd_, x_, y_+sd_, length=0.05, angle=90, code=3, col=alpha(MY_COLORS[t], alpha=0.1))
|
||||
}
|
||||
}
|
||||
}
|
||||
leglines=typenames
|
||||
if (DRAW_WC) {
|
||||
lines(c(0,length(one_frame[[1]])),y=c(worst_case,worst_case), lty='dotted')
|
||||
leglines=c(typenames, 'worst observed')
|
||||
}
|
||||
legend(LEGEND_POS, legend=leglines,#"topleft"
|
||||
col=c(MY_COLORS_[1:length(typenames)],"black"),
|
||||
lty=c(rep("solid",length(typenames)),"dotted"))
|
||||
|
||||
if (SAVE_FILE) {dev.off()}
|
||||
}
|
||||
stopCluster(cl)
|
||||
|
||||
par(mar=c(3.8,3.8,0,0))
|
||||
par(oma=c(0,0,0,0))
|
||||
|
||||
#RIBBON='both'
|
||||
#MY_SELECTION = c('state_int','generation100_int')
|
||||
#MY_SELECTION = c('state_int','frafl_int')
|
||||
|
||||
if (exists("MY_SELECTION")) {
|
||||
plotting(MY_SELECTION, 'custom', MY_COLORS[c(1,2)])
|
||||
} else {
|
||||
# MY_SELECTION=c('state', 'afl', 'random', 'feedlongest', 'feedgeneration', 'feedgeneration10')
|
||||
#MY_SELECTION=c('state_int', 'afl_int', 'random_int', 'feedlongest_int', 'feedgeneration_int', 'feedgeneration10_int')
|
||||
#MY_SELECTION=c('state', 'frAFL', 'statenohash', 'feedgeneration10')
|
||||
#MY_SELECTION=c('state_int', 'frAFL_int', 'statenohash_int', 'feedgeneration10_int')
|
||||
MY_SELECTION=typenames
|
||||
RIBBON='both'
|
||||
for (i in seq_len(length(MY_SELECTION))) {
|
||||
n <- MY_SELECTION[i]
|
||||
plotting(c(n), n, c(MY_COLORS[i]))
|
||||
}
|
||||
RIBBON='max'
|
||||
plotting(MY_SELECTION,'all', MY_COLORS)
|
||||
}
|
||||
|
||||
for (t in seq_len(length(typenames))) {
|
||||
li = one_frame[dim(one_frame)[1],]
|
||||
pear = (li[[typenames[[t]]]]-li[[sprintf("%s_med",typenames[[t]])]])/li[[sprintf("%s_sd",typenames[[t]])]]
|
||||
print(sprintf("%s pearson: %g",typenames[[t]],pear))
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
kernel,main_function,input_symbol,input_size,return_function
|
||||
mpeg2,mpeg2_main,mpeg2_oldorgframe,90112,mpeg2_return
|
||||
audiobeam,audiobeam_main,audiobeam_input,11520,audiobeam_return
|
||||
epic,epic_main,epic_image,4096,epic_return
|
||||
dijkstra,dijkstra_main,dijkstra_AdjMatrix,10000,dijkstra_return
|
||||
fft,fft_main,fft_twidtable,2046,fft_return
|
||||
bsort,bsort_main,bsort_Array,400,bsort_return
|
||||
insertsort,insertsort_main,insertsort_a,400,insertsort_return
|
||||
g723_enc,g723_enc_main,g723_enc_INPUT,1024,g723_enc_return
|
||||
rijndael_dec,rijndael_dec_main,rijndael_dec_data,32768,rijndael_dec_return
|
||||
rijndael_enc,rijndael_enc_main,rijndael_enc_data,31369,rijndael_enc_return
|
||||
huff_dec,huff_dec_main,huff_dec_encoded,419,huff_dec_return
|
||||
huff_enc,huff_enc_main,huff_enc_plaintext,600,huff_enc_return
|
||||
gsm_enc,gsm_enc_main,gsm_enc_pcmdata,6400,gsm_enc_return
|
||||
tmr,main,FUZZ_INPUT,32,trigger_Qemu_break
|
||||
tacle_rtos,prvStage0,FUZZ_INPUT,604,trigger_Qemu_break
|
||||
lift,main_lift,FUZZ_INPUT,100,trigger_Qemu_break
|
||||
waters,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
|
||||
watersv2,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
|
||||
waters_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
|
||||
watersv2_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break
|
||||
micro_branchless,main_branchless,FUZZ_INPUT,4,trigger_Qemu_break
|
||||
micro_int,main_int,FUZZ_INPUT,16,trigger_Qemu_break
|
||||
micro_longint,main_micro_longint,FUZZ_INPUT,16,trigger_Qemu_break
|
|
@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
arm-none-eabi-gcc -ggdb -ffreestanding -nostartfiles -lgcc -T mps2_m3.ld -mcpu=cortex-m3 main.c startup.c -o example.elf
|
@ -1,38 +0,0 @@
|
||||
int BREAKPOINT() {
|
||||
for (;;)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
int LLVMFuzzerTestOneInput(unsigned int* Data, unsigned int Size) {
|
||||
//if (Data[3] == 0) {while(1){}} // cause a timeout
|
||||
for (int i=0; i<Size; i++) {
|
||||
// if (Data[i] > 0xFFd0 && Data[i] < 0xFFFF) {return 1;} // cause qemu to crash
|
||||
for (int j=i+1; j<Size; j++) {
|
||||
if (Data[j] == 0) {continue;}
|
||||
if (Data[j]>Data[i]) {
|
||||
int tmp = Data[i];
|
||||
Data[i]=Data[j];
|
||||
Data[j]=tmp;
|
||||
if (Data[i] <= 100) {j--;}
|
||||
}
|
||||
}
|
||||
}
|
||||
return BREAKPOINT();
|
||||
}
|
||||
unsigned int FUZZ_INPUT[] = {
|
||||
101,201,700,230,860,
|
||||
234,980,200,340,678,
|
||||
230,134,900,236,900,
|
||||
123,800,123,658,607,
|
||||
246,804,567,568,207,
|
||||
407,246,678,457,892,
|
||||
834,456,878,246,699,
|
||||
854,234,844,290,125,
|
||||
324,560,852,928,910,
|
||||
790,853,345,234,586,
|
||||
};
|
||||
|
||||
int main() {
|
||||
LLVMFuzzerTestOneInput(FUZZ_INPUT, 50);
|
||||
}
|
@ -1,143 +0,0 @@
|
||||
/*
|
||||
* FreeRTOS V202112.00
|
||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
* this software and associated documentation files (the "Software"), to deal in
|
||||
* the Software without restriction, including without limitation the rights to
|
||||
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
* the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
* subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* https://www.FreeRTOS.org
|
||||
* https://github.com/FreeRTOS
|
||||
*
|
||||
*/
|
||||
|
||||
MEMORY
|
||||
{
|
||||
RAM (xrw) : ORIGIN = 0x00000000, LENGTH = 4M
|
||||
/* Originally */
|
||||
/* FLASH (xr) : ORIGIN = 0x00000000, LENGTH = 4M */
|
||||
/* RAM (xrw) : ORIGIN = 0x20000000, LENGTH = 4M */
|
||||
}
|
||||
ENTRY(Reset_Handler)
|
||||
|
||||
_Min_Heap_Size = 0x300000 ; /* Required amount of heap. */
|
||||
_Min_Stack_Size = 0x4000 ; /* Required amount of stack. */
|
||||
M_VECTOR_RAM_SIZE = (16 + 48) * 4;
|
||||
_estack = ORIGIN(RAM) + LENGTH(RAM);
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
|
||||
.isr_vector :
|
||||
{
|
||||
__vector_table = .;
|
||||
KEEP(*(.isr_vector))
|
||||
. = ALIGN(4);
|
||||
} > RAM /* FLASH */
|
||||
|
||||
.text :
|
||||
{
|
||||
. = ALIGN(4);
|
||||
*(.text*)
|
||||
KEEP (*(.init))
|
||||
KEEP (*(.fini))
|
||||
KEEP(*(.eh_frame))
|
||||
*(.rodata*)
|
||||
. = ALIGN(4);
|
||||
_etext = .;
|
||||
} > RAM /* FLASH */
|
||||
|
||||
.ARM.extab :
|
||||
{
|
||||
. = ALIGN(4);
|
||||
*(.ARM.extab* .gnu.linkonce.armextab.*)
|
||||
. = ALIGN(4);
|
||||
} >RAM /* FLASH */
|
||||
|
||||
.ARM :
|
||||
{
|
||||
. = ALIGN(4);
|
||||
__exidx_start = .;
|
||||
*(.ARM.exidx* .gnu.linkonce.armexidx.*)
|
||||
__exidx_end = .;
|
||||
. = ALIGN(4);
|
||||
} >RAM /* FLASH */
|
||||
|
||||
.interrupts_ram :
|
||||
{
|
||||
. = ALIGN(4);
|
||||
__VECTOR_RAM__ = .;
|
||||
__interrupts_ram_start__ = .;
|
||||
. += M_VECTOR_RAM_SIZE;
|
||||
. = ALIGN(4);
|
||||
__interrupts_ram_end = .;
|
||||
} > RAM
|
||||
|
||||
_sidata = LOADADDR(.data);
|
||||
|
||||
.data : /* AT ( _sidata ) */
|
||||
{
|
||||
. = ALIGN(4);
|
||||
_sdata = .;
|
||||
*(.data*)
|
||||
. = ALIGN(4);
|
||||
_edata = .;
|
||||
} > RAM /* RAM AT > FLASH */
|
||||
|
||||
.uninitialized (NOLOAD):
|
||||
{
|
||||
. = ALIGN(32);
|
||||
__uninitialized_start = .;
|
||||
*(.uninitialized)
|
||||
KEEP(*(.keep.uninitialized))
|
||||
. = ALIGN(32);
|
||||
__uninitialized_end = .;
|
||||
} > RAM
|
||||
|
||||
.bss :
|
||||
{
|
||||
. = ALIGN(4);
|
||||
_sbss = .;
|
||||
__bss_start__ = _sbss;
|
||||
*(.bss*)
|
||||
*(COMMON)
|
||||
. = ALIGN(4);
|
||||
_ebss = .;
|
||||
__bss_end__ = _ebss;
|
||||
} >RAM
|
||||
|
||||
.heap :
|
||||
{
|
||||
. = ALIGN(8);
|
||||
PROVIDE ( end = . );
|
||||
PROVIDE ( _end = . );
|
||||
_heap_bottom = .;
|
||||
. = . + _Min_Heap_Size;
|
||||
_heap_top = .;
|
||||
. = . + _Min_Stack_Size;
|
||||
. = ALIGN(8);
|
||||
} >RAM
|
||||
|
||||
/* Set stack top to end of RAM, and stack limit move down by
|
||||
* size of stack_dummy section */
|
||||
__StackTop = ORIGIN(RAM) + LENGTH(RAM);
|
||||
__StackLimit = __StackTop - _Min_Stack_Size;
|
||||
PROVIDE(__stack = __StackTop);
|
||||
|
||||
/* Check if data + heap + stack exceeds RAM limit */
|
||||
ASSERT(__StackLimit >= _heap_top, "region RAM overflowed with stack")
|
||||
}
|
||||
|
@ -1,114 +0,0 @@
|
||||
/*
|
||||
* FreeRTOS V202112.00
|
||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
* this software and associated documentation files (the "Software"), to deal in
|
||||
* the Software without restriction, including without limitation the rights to
|
||||
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
* the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
* subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* https://www.FreeRTOS.org
|
||||
* https://github.com/FreeRTOS
|
||||
*
|
||||
*/
|
||||
|
||||
typedef unsigned int uint32_t;
|
||||
|
||||
extern int main();
|
||||
|
||||
extern uint32_t _estack, _sidata, _sdata, _edata, _sbss, _ebss;
|
||||
|
||||
/* Prevent optimization so gcc does not replace code with memcpy */
|
||||
__attribute__( ( optimize( "O0" ) ) )
|
||||
__attribute__( ( naked ) )
|
||||
void Reset_Handler( void )
|
||||
{
|
||||
/* set stack pointer */
|
||||
__asm volatile ( "ldr r0, =_estack" );
|
||||
__asm volatile ( "mov sp, r0" );
|
||||
|
||||
/* copy .data section from flash to RAM */
|
||||
// Not needed for this example, see linker script
|
||||
// for( uint32_t * src = &_sidata, * dest = &_sdata; dest < &_edata; )
|
||||
// {
|
||||
// *dest++ = *src++;
|
||||
// }
|
||||
|
||||
/* zero out .bss section */
|
||||
for( uint32_t * dest = &_sbss; dest < &_ebss; )
|
||||
{
|
||||
*dest++ = 0;
|
||||
}
|
||||
|
||||
/* jump to board initialisation */
|
||||
void _start( void );
|
||||
_start();
|
||||
}
|
||||
|
||||
const uint32_t * isr_vector[] __attribute__( ( section( ".isr_vector" ) ) ) =
|
||||
{
|
||||
( uint32_t * ) &_estack,
|
||||
( uint32_t * ) &Reset_Handler, /* Reset -15 */
|
||||
0, /* NMI_Handler -14 */
|
||||
0, /* HardFault_Handler -13 */
|
||||
0, /* MemManage_Handler -12 */
|
||||
0, /* BusFault_Handler -11 */
|
||||
0, /* UsageFault_Handler -10 */
|
||||
0, /* reserved */
|
||||
0, /* reserved */
|
||||
0, /* reserved */
|
||||
0, /* reserved -6 */
|
||||
0, /* SVC_Handler -5 */
|
||||
0, /* DebugMon_Handler -4 */
|
||||
0, /* reserved */
|
||||
0, /* PendSV handler -2 */
|
||||
0, /* SysTick_Handler -1 */
|
||||
0, /* uart0 receive 0 */
|
||||
0, /* uart0 transmit */
|
||||
0, /* uart1 receive */
|
||||
0, /* uart1 transmit */
|
||||
0, /* uart 2 receive */
|
||||
0, /* uart 2 transmit */
|
||||
0, /* GPIO 0 combined interrupt */
|
||||
0, /* GPIO 2 combined interrupt */
|
||||
0, /* Timer 0 */
|
||||
0, /* Timer 1 */
|
||||
0, /* Dial Timer */
|
||||
0, /* SPI0 SPI1 */
|
||||
0, /* uart overflow 1, 2,3 */
|
||||
0, /* Ethernet 13 */
|
||||
};
|
||||
|
||||
__attribute__( ( naked ) ) void exit(__attribute__((unused)) int status )
|
||||
{
|
||||
/* Force qemu to exit using ARM Semihosting */
|
||||
__asm volatile (
|
||||
"mov r1, r0\n"
|
||||
"cmp r1, #0\n"
|
||||
"bne .notclean\n"
|
||||
"ldr r1, =0x20026\n" /* ADP_Stopped_ApplicationExit, a clean exit */
|
||||
".notclean:\n"
|
||||
"movs r0, #0x18\n" /* SYS_EXIT */
|
||||
"bkpt 0xab\n"
|
||||
"end: b end\n"
|
||||
);
|
||||
}
|
||||
|
||||
void _start( void )
|
||||
{
|
||||
main( );
|
||||
exit( 0 );
|
||||
}
|
||||
|
@ -1,25 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
|
||||
cd "$parent_path"
|
||||
|
||||
[ -n "$1" -a "$1" != "+" -a -z "$KERNEL" ] && export KERNEL="$1"
|
||||
[ -n "$2" -a "$2" != "+" -a -z "$FUZZ_MAIN" ] && export FUZZ_MAIN="$2"
|
||||
[ -n "$3" -a "$3" != "+" -a -z "$FUZZ_INPUT" ] && export FUZZ_INPUT="$3"
|
||||
[ -n "$4" -a "$4" != "+" -a -z "$FUZZ_INPUT_LEN" ] && export FUZZ_INPUT_LEN="$4"
|
||||
[ -n "$5" -a "$5" != "+" -a -z "$BREAKPOINT" ] && export BREAKPOINT="$5"
|
||||
[ -n "$6" -a "$6" != "+" -a -z "$FUZZ_ITERS" ] && export FUZZ_ITERS="$6"
|
||||
[ -n "$7" -a "$7" != "+" -a -z "$TIME_DUMP" ] && export TIME_DUMP="$7"
|
||||
[ -n "$8" -a "$8" != "+" -a -z "$CASE_DUMP" ] && export CASE_DUMP="$8"
|
||||
[ -n "$9" -a "$9" != "+" -a -z "$DO_SHOWMAP" ] && export DO_SHOWMAP="$9"
|
||||
[ -n "${10}" -a "${10}" != "+" -a -z "$SHOWMAP_TEXTINPUT" ] && export SHOWMAP_TEXTINPUT="${10}"
|
||||
[ -n "${11}" -a "${11}" != "+" -a -z "$TRACE_DUMP" ] && export TRACE_DUMP="${11}"
|
||||
|
||||
[ -z "$FUZZER" ] && export FUZZER=target/debug/fret
|
||||
set +e
|
||||
$FUZZER -icount shift=4,align=off,sleep=off -machine mps2-an385 -monitor null -kernel $KERNEL -serial null -nographic -S -semihosting --semihosting-config enable=on,target=native -snapshot -drive if=none,format=qcow2,file=dummy.qcow2
|
||||
if [ "$exitcode" = "101" ]
|
||||
then
|
||||
exit 101
|
||||
else
|
||||
exit 0
|
||||
fi
|
@ -1,344 +0,0 @@
|
||||
use hashbrown::{hash_map::Entry, HashMap};
|
||||
use libafl::{
|
||||
bolts::{
|
||||
current_nanos,
|
||||
rands::StdRand,
|
||||
tuples::{tuple_list},
|
||||
},
|
||||
executors::{ExitKind},
|
||||
fuzzer::{StdFuzzer},
|
||||
inputs::{BytesInput, HasTargetBytes},
|
||||
observers::{Observer,VariableMapObserver},
|
||||
state::{StdState, HasNamedMetadata},
|
||||
Error,
|
||||
observers::ObserversTuple, prelude::UsesInput, impl_serdeany,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{cell::UnsafeCell, cmp::max, env, fs::OpenOptions, io::Write, time::Instant};
|
||||
use libafl::bolts::tuples::Named;
|
||||
|
||||
use libafl_qemu::{
|
||||
emu,
|
||||
emu::Emulator,
|
||||
executor::QemuExecutor,
|
||||
helper::{QemuHelper, QemuHelperTuple, QemuInstrumentationFilter},
|
||||
};
|
||||
use libafl::events::EventFirer;
|
||||
use libafl::state::HasClientPerfMonitor;
|
||||
use libafl::inputs::Input;
|
||||
use libafl::feedbacks::Feedback;
|
||||
use libafl::SerdeAny;
|
||||
use libafl::state::HasMetadata;
|
||||
use libafl::corpus::testcase::Testcase;
|
||||
use core::{fmt::Debug, time::Duration};
|
||||
// use libafl::feedbacks::FeedbackState;
|
||||
// use libafl::state::HasFeedbackStates;
|
||||
use libafl::bolts::tuples::MatchName;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
pub static mut FUZZ_START_TIMESTAMP : SystemTime = UNIX_EPOCH;
|
||||
|
||||
//========== Metadata
|
||||
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
|
||||
pub struct QemuIcountMetadata {
|
||||
runtime: u64,
|
||||
}
|
||||
|
||||
/// Metadata for [`QemuClockIncreaseFeedback`]
|
||||
#[derive(Debug, Serialize, Deserialize, SerdeAny)]
|
||||
pub struct MaxIcountMetadata {
|
||||
pub max_icount_seen: u64,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
// impl FeedbackState for MaxIcountMetadata
|
||||
// {
|
||||
// fn reset(&mut self) -> Result<(), Error> {
|
||||
// self.max_icount_seen = 0;
|
||||
// Ok(())
|
||||
// }
|
||||
// }
|
||||
|
||||
impl Named for MaxIcountMetadata
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
self.name.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl MaxIcountMetadata
|
||||
{
|
||||
/// Create new `MaxIcountMetadata`
|
||||
#[must_use]
|
||||
pub fn new(name: &'static str) -> Self {
|
||||
Self {
|
||||
max_icount_seen: 0,
|
||||
name: name.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MaxIcountMetadata {
|
||||
fn default() -> Self {
|
||||
Self::new("MaxClock")
|
||||
}
|
||||
}
|
||||
|
||||
/// A piece of metadata tracking all icounts
|
||||
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
|
||||
pub struct IcHist (pub Vec<(u64, u128)>, pub (u64,u128));
|
||||
|
||||
//========== Observer
|
||||
|
||||
/// A simple observer, just overlooking the runtime of the target.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct QemuClockObserver {
|
||||
name: String,
|
||||
start_tick: u64,
|
||||
end_tick: u64,
|
||||
}
|
||||
|
||||
impl QemuClockObserver {
|
||||
/// Creates a new [`QemuClockObserver`] with the given name.
|
||||
#[must_use]
|
||||
pub fn new(name: &'static str) -> Self {
|
||||
Self {
|
||||
name: name.to_string(),
|
||||
start_tick: 0,
|
||||
end_tick: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the runtime for the last execution of this target.
|
||||
#[must_use]
|
||||
pub fn last_runtime(&self) -> u64 {
|
||||
self.end_tick - self.start_tick
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Observer<S> for QemuClockObserver
|
||||
where
|
||||
S: UsesInput + HasMetadata,
|
||||
{
|
||||
fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
|
||||
// Only remember the pre-run ticks if presistent mode ist used
|
||||
#[cfg(not(feature = "snapshot_restore"))]
|
||||
unsafe {
|
||||
self.start_tick=emu::icount_get_raw();
|
||||
self.end_tick=self.start_tick;
|
||||
}
|
||||
// unsafe {
|
||||
// println!("clock pre {}",emu::icount_get_raw());
|
||||
// }
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn post_exec(&mut self, _state: &mut S, _input: &S::Input, _exit_kind: &ExitKind) -> Result<(), Error> {
|
||||
unsafe { self.end_tick = emu::icount_get_raw() };
|
||||
// println!("clock post {}", self.end_tick);
|
||||
// println!("Number of Ticks: {} <- {} {}",self.end_tick - self.start_tick, self.end_tick, self.start_tick);
|
||||
let metadata =_state.metadata_mut();
|
||||
let hist = metadata.get_mut::<IcHist>();
|
||||
let timestamp = SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis();
|
||||
match hist {
|
||||
None => {
|
||||
metadata.insert(IcHist(vec![(self.end_tick - self.start_tick, timestamp)],
|
||||
(self.end_tick - self.start_tick, timestamp)));
|
||||
}
|
||||
Some(v) => {
|
||||
v.0.push((self.end_tick - self.start_tick, timestamp));
|
||||
if (v.1.0 < self.end_tick-self.start_tick) {
|
||||
v.1 = (self.end_tick - self.start_tick, timestamp);
|
||||
}
|
||||
if v.0.len() >= 100 {
|
||||
if let Ok(td) = env::var("TIME_DUMP") {
|
||||
let mut file = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(td).expect("Could not open timedump");
|
||||
let newv : Vec<(u64, u128)> = Vec::with_capacity(100);
|
||||
for i in std::mem::replace(&mut v.0, newv).into_iter() {
|
||||
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
|
||||
}
|
||||
} else {
|
||||
// If we don't write out values we don't need to remember them at all
|
||||
v.0.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for QemuClockObserver {
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for QemuClockObserver {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::from("clock"),
|
||||
start_tick: 0,
|
||||
end_tick: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//========== Feedback
|
||||
/// Nop feedback that annotates execution time in the new testcase, if any
|
||||
/// for this Feedback, the testcase is never interesting (use with an OR).
|
||||
/// It decides, if the given [`QemuClockObserver`] value of a run is interesting.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct ClockTimeFeedback {
|
||||
exec_time: Option<Duration>,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for ClockTimeFeedback
|
||||
where
|
||||
S: UsesInput + HasClientPerfMonitor + HasMetadata,
|
||||
{
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
_state: &mut S,
|
||||
_manager: &mut EM,
|
||||
_input: &S::Input,
|
||||
observers: &OT,
|
||||
_exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>,
|
||||
{
|
||||
// TODO Replace with match_name_type when stable
|
||||
let observer = observers.match_name::<QemuClockObserver>(self.name()).unwrap();
|
||||
self.exec_time = Some(Duration::from_nanos(observer.last_runtime() << 4)); // Assume a somewhat realistic multiplier of clock, it does not matter
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// Append to the testcase the generated metadata in case of a new corpus item
|
||||
#[inline]
|
||||
fn append_metadata(
|
||||
&mut self,
|
||||
_state: &mut S,
|
||||
testcase: &mut Testcase<S::Input>,
|
||||
) -> Result<(), Error> {
|
||||
*testcase.exec_time_mut() = self.exec_time;
|
||||
self.exec_time = None;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Discard the stored metadata in case that the testcase is not added to the corpus
|
||||
#[inline]
|
||||
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
|
||||
self.exec_time = None;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for ClockTimeFeedback {
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
self.name.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl ClockTimeFeedback {
|
||||
/// Creates a new [`ClockFeedback`], deciding if the value of a [`QemuClockObserver`] with the given `name` of a run is interesting.
|
||||
#[must_use]
|
||||
pub fn new(name: &'static str) -> Self {
|
||||
Self {
|
||||
exec_time: None,
|
||||
name: name.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [`ClockFeedback`], deciding if the given [`QemuClockObserver`] value of a run is interesting.
|
||||
#[must_use]
|
||||
pub fn new_with_observer(observer: &QemuClockObserver) -> Self {
|
||||
Self {
|
||||
exec_time: None,
|
||||
name: observer.name().to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`Feedback`] rewarding increasing the execution cycles on Qemu.
|
||||
#[derive(Debug)]
|
||||
pub struct QemuClockIncreaseFeedback {
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for QemuClockIncreaseFeedback
|
||||
where
|
||||
S: UsesInput + HasNamedMetadata + HasClientPerfMonitor + Debug,
|
||||
{
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
state: &mut S,
|
||||
_manager: &mut EM,
|
||||
_input: &S::Input,
|
||||
_observers: &OT,
|
||||
_exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>,
|
||||
{
|
||||
let observer = _observers.match_name::<QemuClockObserver>("clock")
|
||||
.expect("QemuClockObserver not found");
|
||||
let clock_state = state
|
||||
.named_metadata_mut()
|
||||
.get_mut::<MaxIcountMetadata>(&self.name)
|
||||
.unwrap();
|
||||
if observer.last_runtime() > clock_state.max_icount_seen {
|
||||
// println!("Clock improving {}",observer.last_runtime());
|
||||
clock_state.max_icount_seen = observer.last_runtime();
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// Append to the testcase the generated metadata in case of a new corpus item
|
||||
#[inline]
|
||||
fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
|
||||
// testcase.metadata_mut().insert(QemuIcountMetadata{runtime: self.last_runtime});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Discard the stored metadata in case that the testcase is not added to the corpus
|
||||
#[inline]
|
||||
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl Named for QemuClockIncreaseFeedback {
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
impl QemuClockIncreaseFeedback {
|
||||
/// Creates a new [`HitFeedback`]
|
||||
#[must_use]
|
||||
pub fn new(name: &'static str) -> Self {
|
||||
Self {name: String::from(name)}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for QemuClockIncreaseFeedback {
|
||||
fn default() -> Self {
|
||||
Self::new("MaxClock")
|
||||
}
|
||||
}
|
@ -1,715 +0,0 @@
|
||||
//! A fuzzer using qemu in systemmode for binary-only coverage of kernels
|
||||
//!
|
||||
use core::time::Duration;
|
||||
use std::{env, path::PathBuf, process::{self, abort}, io::{Read, Write}, fs::{self, OpenOptions}, cmp::{min, max}, mem::transmute_copy, collections::btree_map::Range};
|
||||
|
||||
use libafl::{
|
||||
bolts::{
|
||||
core_affinity::Cores,
|
||||
current_nanos,
|
||||
launcher::Launcher,
|
||||
rands::StdRand,
|
||||
shmem::{ShMemProvider, StdShMemProvider},
|
||||
tuples::tuple_list,
|
||||
AsSlice,
|
||||
},
|
||||
corpus::{Corpus, InMemoryCorpus, OnDiskCorpus},
|
||||
events::EventConfig,
|
||||
executors::{ExitKind, TimeoutExecutor},
|
||||
feedback_or,
|
||||
feedback_or_fast,
|
||||
feedbacks::{CrashFeedback, MaxMapFeedback, TimeoutFeedback},
|
||||
fuzzer::{Fuzzer, StdFuzzer},
|
||||
inputs::{BytesInput, HasTargetBytes},
|
||||
monitors::MultiMonitor,
|
||||
observers::{VariableMapObserver},
|
||||
schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler},
|
||||
state::{HasCorpus, StdState, HasMetadata, HasNamedMetadata},
|
||||
Error,
|
||||
prelude::{SimpleMonitor, SimpleEventManager, AsMutSlice, RandBytesGenerator, Generator, SimpleRestartingEventManager, HasBytesVec, minimizer::TopRatedsMetadata, havoc_mutations, StdScheduledMutator, HitcountsMapObserver}, Evaluator, stages::StdMutationalStage,
|
||||
};
|
||||
use libafl_qemu::{
|
||||
edges, edges::QemuEdgeCoverageHelper, elf::EasyElf, emu::Emulator, GuestPhysAddr, QemuExecutor,
|
||||
QemuHooks, Regs, QemuInstrumentationFilter, GuestAddr,
|
||||
emu::libafl_qemu_set_native_breakpoint, emu::libafl_qemu_remove_native_breakpoint,
|
||||
};
|
||||
use rand::{SeedableRng, StdRng, Rng};
|
||||
use crate::{
|
||||
clock::{QemuClockObserver, ClockTimeFeedback, QemuClockIncreaseFeedback, IcHist, FUZZ_START_TIMESTAMP},
|
||||
qemustate::QemuStateRestoreHelper,
|
||||
systemstate::{helpers::QemuSystemStateHelper, observers::QemuSystemStateObserver, feedbacks::{DumpSystraceFeedback, NovelSystemStateFeedback}, graph::{SysMapFeedback, SysGraphFeedbackState, GraphMaximizerCorpusScheduler}, schedulers::{LongestTraceScheduler, GenerationScheduler}}, worst::{TimeMaximizerCorpusScheduler, ExecTimeIncFeedback, TimeStateMaximizerCorpusScheduler, AlwaysTrueFeedback},
|
||||
mutational::MyStateStage,
|
||||
mutational::{MINIMUM_INTER_ARRIVAL_TIME},
|
||||
};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
pub static mut RNG_SEED: u64 = 1;
|
||||
|
||||
pub static mut LIMIT : u32 = u32::MAX;
|
||||
|
||||
pub const MAX_NUM_INTERRUPT: usize = 32;
|
||||
pub const DO_NUM_INTERRUPT: usize = 32;
|
||||
pub static mut MAX_INPUT_SIZE: usize = 32;
|
||||
/// Read ELF program headers to resolve physical load addresses.
|
||||
fn virt2phys(vaddr: GuestPhysAddr, tab: &EasyElf) -> GuestPhysAddr {
|
||||
let ret;
|
||||
for i in &tab.goblin().program_headers {
|
||||
if i.vm_range().contains(&vaddr.try_into().unwrap()) {
|
||||
ret = vaddr - TryInto::<GuestPhysAddr>::try_into(i.p_vaddr).unwrap()
|
||||
+ TryInto::<GuestPhysAddr>::try_into(i.p_paddr).unwrap();
|
||||
return ret - (ret % 2);
|
||||
}
|
||||
}
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
static mut libafl_interrupt_offsets : [u32; 32];
|
||||
static mut libafl_num_interrupts : usize;
|
||||
}
|
||||
|
||||
pub fn fuzz() {
|
||||
unsafe {FUZZ_START_TIMESTAMP = SystemTime::now();}
|
||||
let mut starttime = std::time::Instant::now();
|
||||
if let Ok(s) = env::var("FUZZ_SIZE") {
|
||||
str::parse::<usize>(&s).expect("FUZZ_SIZE was not a number");
|
||||
};
|
||||
// Hardcoded parameters
|
||||
let timeout = Duration::from_secs(10);
|
||||
let broker_port = 1337;
|
||||
let cores = Cores::from_cmdline("1").unwrap();
|
||||
let corpus_dirs = [PathBuf::from("./corpus")];
|
||||
let objective_dir = PathBuf::from("./crashes");
|
||||
|
||||
let mut elf_buffer = Vec::new();
|
||||
let elf = EasyElf::from_file(
|
||||
env::var("KERNEL").expect("KERNEL env not set"),
|
||||
&mut elf_buffer,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// the main address where the fuzzer starts
|
||||
// if this is set for freeRTOS it has an influence on where the data will have to be written,
|
||||
// since the startup routine copies the data segemnt to it's virtual address
|
||||
let main_addr = elf
|
||||
.resolve_symbol(&env::var("FUZZ_MAIN").unwrap_or_else(|_| "FUZZ_MAIN".to_owned()), 0);
|
||||
if let Some(main_addr) = main_addr {
|
||||
println!("main address = {:#x}", main_addr);
|
||||
}
|
||||
|
||||
let input_addr = elf
|
||||
.resolve_symbol(
|
||||
&env::var("FUZZ_INPUT").unwrap_or_else(|_| "FUZZ_INPUT".to_owned()),
|
||||
0,
|
||||
)
|
||||
.expect("Symbol or env FUZZ_INPUT not found") as GuestPhysAddr;
|
||||
let input_addr = virt2phys(input_addr,&elf) as GuestPhysAddr;
|
||||
println!("FUZZ_INPUT @ {:#x}", input_addr);
|
||||
|
||||
let test_length_ptr = elf
|
||||
.resolve_symbol("FUZZ_LENGTH", 0).map(|x| x as GuestPhysAddr);
|
||||
let test_length_ptr = Option::map_or(test_length_ptr, None, |x| Some(virt2phys(x,&elf)));
|
||||
|
||||
let input_counter_ptr = elf
|
||||
.resolve_symbol(&env::var("FUZZ_POINTER").unwrap_or_else(|_| "FUZZ_POINTER".to_owned()), 0)
|
||||
.map(|x| x as GuestPhysAddr);
|
||||
let input_counter_ptr = Option::map_or(input_counter_ptr, None, |x| Some(virt2phys(x,&elf)));
|
||||
|
||||
#[cfg(feature = "systemstate")]
|
||||
let curr_tcb_pointer = elf // loads to the address specified in elf, without respecting program headers
|
||||
.resolve_symbol("pxCurrentTCB", 0)
|
||||
.expect("Symbol pxCurrentTCBC not found");
|
||||
// let curr_tcb_pointer = virt2phys(curr_tcb_pointer,&elf);
|
||||
#[cfg(feature = "systemstate")]
|
||||
println!("TCB pointer at {:#x}", curr_tcb_pointer);
|
||||
#[cfg(feature = "systemstate")]
|
||||
let task_queue_addr = elf
|
||||
.resolve_symbol("pxReadyTasksLists", 0)
|
||||
.expect("Symbol pxReadyTasksLists not found");
|
||||
// let task_queue_addr = virt2phys(task_queue_addr,&elf.goblin());
|
||||
#[cfg(feature = "systemstate")]
|
||||
println!("Task Queue at {:#x}", task_queue_addr);
|
||||
#[cfg(feature = "systemstate")]
|
||||
let svh = elf
|
||||
.resolve_symbol("xPortPendSVHandler", 0)
|
||||
.expect("Symbol xPortPendSVHandler not found");
|
||||
// let svh=virt2phys(svh, &elf);
|
||||
// let svh = elf
|
||||
// .resolve_symbol("vPortEnterCritical", 0)
|
||||
// .expect("Symbol vPortEnterCritical not found");
|
||||
#[cfg(feature = "systemstate")]
|
||||
let app_start = elf
|
||||
.resolve_symbol("__APP_CODE_START__", 0)
|
||||
.expect("Symbol __APP_CODE_START__ not found");
|
||||
#[cfg(feature = "systemstate")]
|
||||
let app_end = elf
|
||||
.resolve_symbol("__APP_CODE_END__", 0)
|
||||
.expect("Symbol __APP_CODE_END__ not found");
|
||||
#[cfg(feature = "systemstate")]
|
||||
let app_range = app_start..app_end;
|
||||
#[cfg(feature = "systemstate")]
|
||||
dbg!(app_range.clone());
|
||||
|
||||
let breakpoint = elf
|
||||
.resolve_symbol(
|
||||
&env::var("BREAKPOINT").unwrap_or_else(|_| "BREAKPOINT".to_owned()),
|
||||
0,
|
||||
)
|
||||
.expect("Symbol or env BREAKPOINT not found");
|
||||
println!("Breakpoint address = {:#x}", breakpoint);
|
||||
unsafe {
|
||||
libafl_num_interrupts = 0;
|
||||
}
|
||||
|
||||
if let Ok(input_len) = env::var("FUZZ_INPUT_LEN") {
|
||||
unsafe {MAX_INPUT_SIZE = str::parse::<usize>(&input_len).expect("FUZZ_INPUT_LEN was not a number");}
|
||||
}
|
||||
unsafe {dbg!(MAX_INPUT_SIZE);}
|
||||
|
||||
if let Ok(seed) = env::var("SEED_RANDOM") {
|
||||
unsafe {RNG_SEED = str::parse::<u64>(&seed).expect("SEED_RANDOM must be an integer.");}
|
||||
}
|
||||
|
||||
let mut run_client = |state: Option<_>, mut mgr, _core_id| {
|
||||
// Initialize QEMU
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let env: Vec<(String, String)> = env::vars().collect();
|
||||
let emu = Emulator::new(&args, &env);
|
||||
|
||||
if let Some(main_addr) = main_addr {
|
||||
unsafe {
|
||||
libafl_qemu_set_native_breakpoint(main_addr);
|
||||
emu.run();
|
||||
libafl_qemu_remove_native_breakpoint(main_addr);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe { libafl_qemu_set_native_breakpoint(breakpoint); }// BREAKPOINT
|
||||
|
||||
// The wrapped harness function, calling out to the LLVM-style harness
|
||||
let mut harness = |input: &BytesInput| {
|
||||
let target = input.target_bytes();
|
||||
let mut buf = target.as_slice();
|
||||
let mut len = buf.len();
|
||||
unsafe {
|
||||
#[cfg(feature = "fuzz_int")]
|
||||
{
|
||||
let mut start_tick : u32 = 0;
|
||||
for i in 0..DO_NUM_INTERRUPT {
|
||||
let mut t : [u8; 4] = [0,0,0,0];
|
||||
if len > (i+1)*4 {
|
||||
for j in 0 as usize..4 as usize {
|
||||
t[j]=buf[i*4+j];
|
||||
}
|
||||
if i == 0 || true {
|
||||
unsafe {start_tick = u32::from_le_bytes(t) % LIMIT;}
|
||||
} else {
|
||||
start_tick = u32::saturating_add(start_tick,max(MINIMUM_INTER_ARRIVAL_TIME,u32::from_le_bytes(t)));
|
||||
}
|
||||
libafl_interrupt_offsets[i] = start_tick;
|
||||
libafl_num_interrupts = i+1;
|
||||
}
|
||||
}
|
||||
if buf.len() > libafl_num_interrupts*4 {
|
||||
buf = &buf[libafl_num_interrupts*4..];
|
||||
len = buf.len();
|
||||
}
|
||||
// println!("Load: {:?}", libafl_interrupt_offsets[0..libafl_num_interrupts].to_vec());
|
||||
}
|
||||
if len > MAX_INPUT_SIZE {
|
||||
buf = &buf[0..MAX_INPUT_SIZE];
|
||||
len = MAX_INPUT_SIZE;
|
||||
}
|
||||
|
||||
emu.write_phys_mem(input_addr, buf);
|
||||
if let Some(s) = test_length_ptr {
|
||||
emu.write_phys_mem(s as u64, &len.to_le_bytes())
|
||||
}
|
||||
|
||||
emu.run();
|
||||
|
||||
// If the execution stops at any point other then the designated breakpoint (e.g. a breakpoint on a panic method) we consider it a crash
|
||||
let mut pcs = (0..emu.num_cpus())
|
||||
.map(|i| emu.cpu_from_index(i))
|
||||
.map(|cpu| -> Result<u32, String> { cpu.read_reg(Regs::Pc) });
|
||||
match pcs
|
||||
.find(|pc| (breakpoint..breakpoint + 5).contains(pc.as_ref().unwrap_or(&0)))
|
||||
{
|
||||
Some(_) => ExitKind::Ok,
|
||||
None => ExitKind::Crash,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Create an observation channel using the coverage map
|
||||
let edges = unsafe { &mut edges::EDGES_MAP };
|
||||
let edges_counter = unsafe { &mut edges::MAX_EDGES_NUM };
|
||||
let edges_observer = VariableMapObserver::new("edges", edges, edges_counter);
|
||||
#[cfg(feature = "observer_hitcounts")]
|
||||
let edges_observer = HitcountsMapObserver::new(edges_observer);
|
||||
|
||||
// Create an observation channel to keep track of the execution time
|
||||
let clock_time_observer = QemuClockObserver::new("clocktime");
|
||||
|
||||
let systemstate_observer = QemuSystemStateObserver::new();
|
||||
|
||||
// Feedback to rate the interestingness of an input
|
||||
// This one is composed by two Feedbacks in OR
|
||||
let mut feedback = feedback_or!(
|
||||
// Time feedback, this one does not need a feedback state
|
||||
ClockTimeFeedback::new_with_observer(&clock_time_observer)
|
||||
);
|
||||
#[cfg(feature = "feed_genetic")]
|
||||
let mut feedback = feedback_or!(
|
||||
feedback,
|
||||
AlwaysTrueFeedback::new()
|
||||
);
|
||||
#[cfg(feature = "feed_afl")]
|
||||
let mut feedback = feedback_or!(
|
||||
feedback,
|
||||
// New maximization map feedback linked to the edges observer and the feedback state
|
||||
MaxMapFeedback::new_tracking(&edges_observer, true, true)
|
||||
);
|
||||
#[cfg(feature = "feed_longest")]
|
||||
let mut feedback = feedback_or!(
|
||||
// afl feedback needs to be activated first for MapIndexesMetadata
|
||||
feedback,
|
||||
// Feedback to reward any input which increses the execution time
|
||||
ExecTimeIncFeedback::new()
|
||||
);
|
||||
#[cfg(all(feature = "systemstate",not(any(feature = "feed_systemgraph",feature = "feed_systemtrace"))))]
|
||||
let mut feedback = feedback_or!(
|
||||
feedback,
|
||||
DumpSystraceFeedback::with_dump(env::var("TRACE_DUMP").ok().map(PathBuf::from))
|
||||
);
|
||||
#[cfg(feature = "feed_systemtrace")]
|
||||
let mut feedback = feedback_or!(
|
||||
feedback,
|
||||
// AlwaysTrueFeedback::new(),
|
||||
NovelSystemStateFeedback::default()
|
||||
);
|
||||
#[cfg(feature = "feed_systemgraph")]
|
||||
let mut feedback = feedback_or!(
|
||||
feedback,
|
||||
SysMapFeedback::default()
|
||||
);
|
||||
|
||||
// A feedback to choose if an input is a solution or not
|
||||
let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new());
|
||||
|
||||
// If not restarting, create a State from scratch
|
||||
let mut state = state.unwrap_or_else(|| {
|
||||
StdState::new(
|
||||
// RNG
|
||||
unsafe {StdRand::with_seed(RNG_SEED) },
|
||||
// Corpus that will be evolved, we keep it in memory for performance
|
||||
InMemoryCorpus::new(),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
// on disk so the user can get them after stopping the fuzzer
|
||||
OnDiskCorpus::new(objective_dir.clone()).unwrap(),
|
||||
// States of the feedbacks.
|
||||
// The feedbacks can report the data that should persist in the State.
|
||||
&mut feedback,
|
||||
// Same for objective feedbacks
|
||||
&mut objective,
|
||||
)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
// A minimization+queue policy to get testcasess from the corpus
|
||||
#[cfg(not(any(feature = "feed_afl",feature = "feed_systemgraph",feature = "feed_systemtrace", feature = "feed_genetic")))]
|
||||
let scheduler = QueueScheduler::new();
|
||||
#[cfg(all(feature = "feed_afl",not(any(feature = "feed_systemgraph",feature = "feed_systemtrace"))))]
|
||||
let scheduler = TimeMaximizerCorpusScheduler::new(QueueScheduler::new());
|
||||
#[cfg(feature = "feed_systemtrace")]
|
||||
let scheduler = LongestTraceScheduler::new(TimeStateMaximizerCorpusScheduler::new(QueueScheduler::new()));
|
||||
#[cfg(feature = "feed_systemgraph")]
|
||||
let scheduler = GraphMaximizerCorpusScheduler::new(QueueScheduler::new());
|
||||
#[cfg(feature = "feed_genetic")]
|
||||
let scheduler = GenerationScheduler::new();
|
||||
|
||||
// A fuzzer with feedbacks and a corpus scheduler
|
||||
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
|
||||
#[cfg(not(feature = "systemstate"))]
|
||||
let qhelpers = tuple_list!(
|
||||
QemuEdgeCoverageHelper::default(),
|
||||
QemuStateRestoreHelper::new()
|
||||
);
|
||||
#[cfg(feature = "systemstate")]
|
||||
let qhelpers = tuple_list!(
|
||||
QemuEdgeCoverageHelper::default(),
|
||||
QemuStateRestoreHelper::new(),
|
||||
QemuSystemStateHelper::new(svh,curr_tcb_pointer,task_queue_addr,input_counter_ptr,app_range.clone())
|
||||
);
|
||||
let mut hooks = QemuHooks::new(&emu,qhelpers);
|
||||
|
||||
#[cfg(not(feature = "systemstate"))]
|
||||
let observer_list = tuple_list!(edges_observer, clock_time_observer);
|
||||
#[cfg(feature = "systemstate")]
|
||||
let observer_list = tuple_list!(edges_observer, clock_time_observer, systemstate_observer);
|
||||
|
||||
// Create a QEMU in-process executor
|
||||
let executor = QemuExecutor::new(
|
||||
&mut hooks,
|
||||
&mut harness,
|
||||
observer_list,
|
||||
&mut fuzzer,
|
||||
&mut state,
|
||||
&mut mgr,
|
||||
)
|
||||
.expect("Failed to create QemuExecutor");
|
||||
|
||||
// Wrap the executor to keep track of the timeout
|
||||
let mut executor = TimeoutExecutor::new(executor, timeout);
|
||||
|
||||
let mutations = havoc_mutations();
|
||||
// Setup an havoc mutator with a mutational stage
|
||||
let mutator = StdScheduledMutator::new(mutations);
|
||||
// #[cfg(not(all(feature = "feed_systemtrace", feature = "fuzz_int")))]
|
||||
// let mut stages = tuple_list!(StdMutationalStage::new(mutator));
|
||||
// #[cfg(all(feature = "feed_systemtrace", feature = "fuzz_int"))]
|
||||
#[cfg(feature = "fuzz_int")]
|
||||
let mut stages = tuple_list!(StdMutationalStage::new(mutator),MyStateStage::new());
|
||||
#[cfg(not(feature = "fuzz_int"))]
|
||||
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
|
||||
|
||||
if env::var("DO_SHOWMAP").is_ok() {
|
||||
let s = &env::var("DO_SHOWMAP").unwrap();
|
||||
let show_input = if s=="-" {
|
||||
let mut buf = Vec::<u8>::new();
|
||||
std::io::stdin().read_to_end(&mut buf).expect("Could not read Stdin");
|
||||
buf
|
||||
} else if s=="$" {
|
||||
env::var("SHOWMAP_TEXTINPUT").expect("SHOWMAP_TEXTINPUT not set").as_bytes().to_owned()
|
||||
} else {
|
||||
fs::read(s).expect("Input file for DO_SHOWMAP can not be read")
|
||||
};
|
||||
fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, BytesInput::new(show_input))
|
||||
.unwrap();
|
||||
if let Ok(td) = env::var("TIME_DUMP") {
|
||||
let mut file = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(td).expect("Could not open timedump");
|
||||
if let Some(ichist) = state.metadata_mut().get_mut::<IcHist>() {
|
||||
for i in ichist.0.drain(..) {
|
||||
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if let Ok(_) = env::var("SEED_RANDOM") {
|
||||
unsafe {
|
||||
let mut rng = StdRng::seed_from_u64(RNG_SEED);
|
||||
for i in 0..100 {
|
||||
let inp = BytesInput::new(vec![rng.gen::<u8>(); MAX_INPUT_SIZE]);
|
||||
fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, inp).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
else if let Ok(sf) = env::var("SEED_DIR") {
|
||||
state
|
||||
.load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &[PathBuf::from(&sf)])
|
||||
.unwrap_or_else(|_| {
|
||||
println!("Failed to load initial corpus at {:?}", &corpus_dirs);
|
||||
process::exit(0);
|
||||
});
|
||||
println!("We imported {} inputs from seedfile.", state.corpus().count());
|
||||
} else if state.corpus().count() < 1 {
|
||||
state
|
||||
.load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &corpus_dirs)
|
||||
.unwrap_or_else(|_| {
|
||||
println!("Failed to load initial corpus at {:?}", &corpus_dirs);
|
||||
process::exit(0);
|
||||
});
|
||||
println!("We imported {} inputs from disk.", state.corpus().count());
|
||||
}
|
||||
|
||||
match env::var("FUZZ_ITERS") {
|
||||
Err(_) => {
|
||||
fuzzer
|
||||
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
|
||||
.unwrap();
|
||||
},
|
||||
Ok(t) => {
|
||||
println!("Iterations {}",t);
|
||||
let num = str::parse::<u64>(&t).expect("FUZZ_ITERS was not a number");
|
||||
if let Ok(s) = env::var("FUZZ_RANDOM") { unsafe {
|
||||
if s.contains("watersv2_int") {
|
||||
println!("V2");
|
||||
LIMIT=7000000;
|
||||
} else {
|
||||
println!("V1");
|
||||
LIMIT=5000000;
|
||||
}
|
||||
println!("Random Fuzzing, ignore corpus");
|
||||
// let mut generator = RandBytesGenerator::new(MAX_INPUT_SIZE);
|
||||
let target_duration = Duration::from_secs(num);
|
||||
let start_time = std::time::Instant::now();
|
||||
let mut rng = StdRng::seed_from_u64(RNG_SEED);
|
||||
while start_time.elapsed() < target_duration {
|
||||
// let inp = generator.generate(&mut state).unwrap();
|
||||
// libafl's generator is too slow
|
||||
let inp = BytesInput::new(vec![rng.gen::<u8>(); MAX_INPUT_SIZE]);
|
||||
fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, inp).unwrap();
|
||||
}
|
||||
}} else {
|
||||
// fuzzer
|
||||
// .fuzz_loop_for_duration(&mut stages, &mut executor, &mut state, &mut mgr, Duration::from_secs(num))
|
||||
// .unwrap();
|
||||
fuzzer
|
||||
.fuzz_loop_until(&mut stages, &mut executor, &mut state, &mut mgr, starttime.checked_add(Duration::from_secs(num)).unwrap())
|
||||
.unwrap();
|
||||
#[cfg(feature = "run_until_saturation")]
|
||||
{
|
||||
{
|
||||
let mut dumper = |marker : String| {
|
||||
if let Ok(td) = env::var("TIME_DUMP") {
|
||||
let mut file = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(td).expect("Could not open timedump");
|
||||
if let Some(ichist) = state.metadata_mut().get_mut::<IcHist>() {
|
||||
for i in ichist.0.drain(..) {
|
||||
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Ok(td) = env::var("CASE_DUMP") {
|
||||
println!("Dumping worst case to {:?}", td);
|
||||
let corpus = state.corpus();
|
||||
let mut worst = Duration::new(0,0);
|
||||
let mut worst_input = None;
|
||||
for i in 0..corpus.count() {
|
||||
let tc = corpus.get(i).expect("Could not get element from corpus").borrow();
|
||||
if worst < tc.exec_time().expect("Testcase missing duration") {
|
||||
worst_input = Some(tc.input().as_ref().unwrap().bytes().to_owned());
|
||||
worst = tc.exec_time().expect("Testcase missing duration");
|
||||
}
|
||||
}
|
||||
match worst_input {
|
||||
Some(wi) => {
|
||||
// let cd = format!("{}.case",&td);
|
||||
let mut cd = td.clone();
|
||||
cd.push_str(&marker);
|
||||
fs::write(&cd,wi).expect("Failed to write worst corpus element");
|
||||
},
|
||||
None => (),
|
||||
}
|
||||
#[cfg(feature = "feed_systemgraph")]
|
||||
{
|
||||
let mut gd = String::from(&td);
|
||||
gd.push_str(&format!(".graph{}", marker));
|
||||
if let Some(md) = state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap") {
|
||||
fs::write(&gd,ron::to_string(&md).expect("Failed to serialize graph")).expect("Failed to write graph");
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut gd = String::from(&td);
|
||||
if let Some(md) = state.metadata_mut().get_mut::<TopRatedsMetadata>() {
|
||||
let mut uniq: Vec<usize> = md.map.values().map(|x| x.clone()).collect();
|
||||
uniq.sort();
|
||||
uniq.dedup();
|
||||
gd.push_str(&format!(".{}.toprated{}", uniq.len(), marker));
|
||||
fs::write(&gd,ron::to_string(&md.map).expect("Failed to serialize metadata")).expect("Failed to write graph");
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
dumper(format!(".iter_{}",t));
|
||||
}
|
||||
println!("Start running until saturation");
|
||||
let mut last = state.metadata().get::<IcHist>().unwrap().1;
|
||||
while SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis() < last.1 + Duration::from_secs(10800).as_millis() {
|
||||
starttime=starttime.checked_add(Duration::from_secs(30)).unwrap();
|
||||
fuzzer
|
||||
.fuzz_loop_until(&mut stages, &mut executor, &mut state, &mut mgr, starttime)
|
||||
.unwrap();
|
||||
let after = state.metadata().get::<IcHist>().unwrap().1;
|
||||
if after.0 > last.0 {
|
||||
last=after;
|
||||
}
|
||||
if let Ok(td) = env::var("CASE_DUMP") {
|
||||
println!("Dumping worst case to {:?}", td);
|
||||
let corpus = state.corpus();
|
||||
let mut worst = Duration::new(0,0);
|
||||
let mut worst_input = None;
|
||||
for i in 0..corpus.count() {
|
||||
let tc = corpus.get(i).expect("Could not get element from corpus").borrow();
|
||||
if worst < tc.exec_time().expect("Testcase missing duration") {
|
||||
worst_input = Some(tc.input().as_ref().unwrap().bytes().to_owned());
|
||||
worst = tc.exec_time().expect("Testcase missing duration");
|
||||
}
|
||||
}
|
||||
match worst_input {
|
||||
Some(wi) => {
|
||||
// let cd = format!("{}.case",&td);
|
||||
let cd = td.clone();
|
||||
fs::write(&cd,wi).expect("Failed to write worst corpus element");
|
||||
},
|
||||
None => (),
|
||||
}
|
||||
#[cfg(feature = "feed_systemgraph")]
|
||||
{
|
||||
let mut gd = String::from(&td);
|
||||
gd.push_str(".graph" );
|
||||
if let Some(md) = state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap") {
|
||||
fs::write(&gd,ron::to_string(&md).expect("Failed to serialize graph")).expect("Failed to write graph");
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut gd = String::from(&td);
|
||||
if let Some(md) = state.metadata_mut().get_mut::<TopRatedsMetadata>() {
|
||||
let mut uniq: Vec<usize> = md.map.values().map(|x| x.clone()).collect();
|
||||
uniq.sort();
|
||||
uniq.dedup();
|
||||
gd.push_str(&format!(".{}.toprated", uniq.len()));
|
||||
fs::write(&gd,ron::to_string(&md.map).expect("Failed to serialize metadata")).expect("Failed to write graph");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Ok(td) = env::var("TIME_DUMP") {
|
||||
let mut file = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(td).expect("Could not open timedump");
|
||||
if let Some(ichist) = state.metadata_mut().get_mut::<IcHist>() {
|
||||
for i in ichist.0.drain(..) {
|
||||
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Ok(td) = env::var("CASE_DUMP") {
|
||||
println!("Dumping worst case to {:?}", td);
|
||||
let corpus = state.corpus();
|
||||
let mut worst = Duration::new(0,0);
|
||||
let mut worst_input = None;
|
||||
for i in 0..corpus.count() {
|
||||
let tc = corpus.get(i).expect("Could not get element from corpus").borrow();
|
||||
if worst < tc.exec_time().expect("Testcase missing duration") {
|
||||
worst_input = Some(tc.input().as_ref().unwrap().bytes().to_owned());
|
||||
worst = tc.exec_time().expect("Testcase missing duration");
|
||||
}
|
||||
}
|
||||
match worst_input {
|
||||
Some(wi) => {
|
||||
// let cd = format!("{}.case",&td);
|
||||
let cd = td.clone();
|
||||
fs::write(&cd,wi).expect("Failed to write worst corpus element");
|
||||
},
|
||||
None => (),
|
||||
}
|
||||
#[cfg(feature = "feed_systemgraph")]
|
||||
{
|
||||
let mut gd = String::from(&td);
|
||||
gd.push_str(".graph");
|
||||
if let Some(md) = state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap") {
|
||||
fs::write(&gd,ron::to_string(&md).expect("Failed to serialize graph")).expect("Failed to write graph");
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut gd = String::from(&td);
|
||||
if let Some(md) = state.metadata_mut().get_mut::<TopRatedsMetadata>() {
|
||||
let mut uniq: Vec<usize> = md.map.values().map(|x| x.clone()).collect();
|
||||
uniq.sort();
|
||||
uniq.dedup();
|
||||
gd.push_str(&format!(".{}.toprated", uniq.len()));
|
||||
fs::write(&gd,ron::to_string(&md.map).expect("Failed to serialize metadata")).expect("Failed to write graph");
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "singlecore"))]
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Special case where no fuzzing happens, but standard input is dumped
|
||||
if let Ok(input_dump) = env::var("DUMP_SEED") {
|
||||
// Initialize QEMU
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let env: Vec<(String, String)> = env::vars().collect();
|
||||
let emu = Emulator::new(&args, &env);
|
||||
|
||||
if let Some(main_addr) = main_addr {
|
||||
unsafe { libafl_qemu_set_native_breakpoint(main_addr); }// BREAKPOINT
|
||||
}
|
||||
unsafe {
|
||||
emu.run();
|
||||
|
||||
let mut buf = [0u8].repeat(MAX_INPUT_SIZE);
|
||||
emu.read_phys_mem(input_addr, buf.as_mut_slice());
|
||||
|
||||
let dir = env::var("SEED_DIR").map_or("./corpus".to_string(), |x| x);
|
||||
let filename = if input_dump == "" {"input"} else {&input_dump};
|
||||
println!("Dumping input to: {}/{}",&dir,filename);
|
||||
fs::write(format!("{}/{}",&dir,filename), buf).expect("could not write input dump");
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
#[cfg(feature = "singlecore")]
|
||||
{
|
||||
let monitor = SimpleMonitor::new(|s| println!("{}", s));
|
||||
#[cfg(not(feature = "restarting"))]
|
||||
{
|
||||
let mgr = SimpleEventManager::new(monitor);
|
||||
run_client(None, mgr, 0);
|
||||
}
|
||||
|
||||
#[cfg(feature = "restarting")]
|
||||
{
|
||||
let mut shmem_provider = StdShMemProvider::new().unwrap();
|
||||
let (state, mut mgr) = match SimpleRestartingEventManager::launch(monitor, &mut shmem_provider)
|
||||
{
|
||||
// The restarting state will spawn the same process again as child, then restarted it each time it crashes.
|
||||
Ok(res) => res,
|
||||
Err(err) => match err {
|
||||
Error::ShuttingDown => {
|
||||
return;
|
||||
}
|
||||
_ => {
|
||||
panic!("Failed to setup the restarter: {}", err);
|
||||
}
|
||||
},
|
||||
};
|
||||
run_client(state, mgr, 0);
|
||||
}
|
||||
}
|
||||
// else -> multicore
|
||||
#[cfg(not(feature = "singlecore"))]
|
||||
{
|
||||
// The shared memory allocator
|
||||
let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory");
|
||||
|
||||
// The stats reporter for the broker
|
||||
let monitor = MultiMonitor::new(|s| println!("{}", s));
|
||||
|
||||
// Build and run a Launcher
|
||||
match Launcher::builder()
|
||||
.shmem_provider(shmem_provider)
|
||||
.broker_port(broker_port)
|
||||
.configuration(EventConfig::from_build_id())
|
||||
.monitor(monitor)
|
||||
.run_client(&mut run_client)
|
||||
.cores(&cores)
|
||||
// .stdout_file(Some("/dev/null"))
|
||||
.build()
|
||||
.launch()
|
||||
{
|
||||
Ok(()) => (),
|
||||
Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."),
|
||||
Err(err) => panic!("Failed to run launcher: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
#![feature(is_sorted)]
|
||||
#[cfg(target_os = "linux")]
|
||||
mod fuzzer;
|
||||
#[cfg(target_os = "linux")]
|
||||
mod clock;
|
||||
#[cfg(target_os = "linux")]
|
||||
mod qemustate;
|
||||
#[cfg(target_os = "linux")]
|
||||
pub mod systemstate;
|
||||
#[cfg(target_os = "linux")]
|
||||
mod mutational;
|
||||
#[cfg(target_os = "linux")]
|
||||
mod worst;
|
@ -1,240 +0,0 @@
|
||||
//| The [`MutationalStage`] is the default stage used during fuzzing.
|
||||
//! For the current input, it will perform a range of random mutations, and then run them in the executor.
|
||||
|
||||
use core::marker::PhantomData;
|
||||
use std::cmp::{max, min};
|
||||
|
||||
use libafl::{
|
||||
bolts::rands::Rand,
|
||||
corpus::{Corpus, self},
|
||||
fuzzer::Evaluator,
|
||||
mark_feature_time,
|
||||
stages::{Stage},
|
||||
start_timer,
|
||||
state::{HasClientPerfMonitor, HasCorpus, HasRand, UsesState, HasMetadata},
|
||||
Error, prelude::{HasBytesVec, UsesInput, new_hash_feedback, StdRand, RandomSeed, MutationResult, Mutator},
|
||||
};
|
||||
use crate::{systemstate::{FreeRTOSSystemStateMetadata, RefinedFreeRTOSSystemState}, fuzzer::DO_NUM_INTERRUPT, clock::IcHist};
|
||||
|
||||
pub const MINIMUM_INTER_ARRIVAL_TIME : u32 = 700 * 1000 * (1 << 4);
|
||||
|
||||
//======================= Custom mutator
|
||||
|
||||
/// The default mutational stage
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct MyStateStage<E, EM, Z> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
phantom: PhantomData<(E, EM, Z)>,
|
||||
}
|
||||
|
||||
impl<E, EM, Z> MyStateStage<E, EM, Z>
|
||||
where
|
||||
E: UsesState<State = Z::State>,
|
||||
EM: UsesState<State = Z::State>,
|
||||
Z: Evaluator<E, EM>,
|
||||
Z::State: HasClientPerfMonitor + HasCorpus + HasRand,
|
||||
{
|
||||
pub fn new() -> Self {
|
||||
Self { phantom: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, EM, Z> Stage<E, EM, Z> for MyStateStage<E, EM, Z>
|
||||
where
|
||||
E: UsesState<State = Z::State>,
|
||||
EM: UsesState<State = Z::State>,
|
||||
Z: Evaluator<E, EM>,
|
||||
Z::State: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata,
|
||||
<Z::State as UsesInput>::Input: HasBytesVec
|
||||
{
|
||||
fn perform(
|
||||
&mut self,
|
||||
fuzzer: &mut Z,
|
||||
executor: &mut E,
|
||||
state: &mut Self::State,
|
||||
manager: &mut EM,
|
||||
corpus_idx: usize,
|
||||
) -> Result<(), Error> {
|
||||
let mut _input = state
|
||||
.corpus()
|
||||
.get(corpus_idx)?
|
||||
.borrow_mut().clone();
|
||||
let mut newinput = _input.input_mut().as_mut().unwrap().clone();
|
||||
// let mut tmpinput = _input.input_mut().as_mut().unwrap().clone();
|
||||
let mut do_rerun = false;
|
||||
{
|
||||
// need our own random generator, because borrowing rules
|
||||
let mut myrand = StdRand::new();
|
||||
let mut target_bytes : Vec<u8> = vec![];
|
||||
{
|
||||
let input = _input.input_mut().as_ref().unwrap();
|
||||
let tmp = &mut state.rand_mut();
|
||||
myrand.set_seed(tmp.next());
|
||||
target_bytes = input.bytes().to_vec();
|
||||
}
|
||||
|
||||
// produce a slice of absolute interrupt times
|
||||
let mut interrupt_offsets : [u32; 32] = [0u32; 32];
|
||||
let mut num_interrupts : usize = 0;
|
||||
{
|
||||
let mut start_tick : u32 = 0;
|
||||
for i in 0..DO_NUM_INTERRUPT {
|
||||
let mut t : [u8; 4] = [0,0,0,0];
|
||||
if target_bytes.len() > (i+1)*4 {
|
||||
for j in 0 as usize..4 as usize {
|
||||
t[j]=target_bytes[i*4+j];
|
||||
}
|
||||
if i == 0 || true {
|
||||
start_tick = u32::from_le_bytes(t);
|
||||
} else {
|
||||
start_tick = u32::saturating_add(start_tick,max(MINIMUM_INTER_ARRIVAL_TIME,u32::from_le_bytes(t)));
|
||||
}
|
||||
interrupt_offsets[i] = start_tick;
|
||||
num_interrupts = i+1;
|
||||
}
|
||||
}
|
||||
}
|
||||
interrupt_offsets.sort();
|
||||
|
||||
// println!("Vor Mutator: {:?}", interrupt_offsets[0..num_interrupts].to_vec());
|
||||
// let num_i = min(target_bytes.len() / 4, DO_NUM_INTERRUPT);
|
||||
let mut suffix = target_bytes.split_off(4 * num_interrupts);
|
||||
let mut prefix : Vec<[u8; 4]> = vec![];
|
||||
// let mut suffix : Vec<u8> = vec![];
|
||||
#[cfg(feature = "feed_systemtrace")]
|
||||
{
|
||||
let tmp = _input.metadata().get::<FreeRTOSSystemStateMetadata>();
|
||||
if tmp.is_some() {
|
||||
let trace = tmp.expect("FreeRTOSSystemStateMetadata not found");
|
||||
|
||||
// calculate hits and identify snippets
|
||||
let mut last_m = false;
|
||||
let mut marks : Vec<(&RefinedFreeRTOSSystemState, usize, usize)>= vec![]; // 1: got interrupted, 2: interrupt handler
|
||||
for i in 0..trace.inner.len() {
|
||||
let curr = &trace.inner[i];
|
||||
let m = interrupt_offsets[0..num_interrupts].iter().any(|x| (curr.start_tick..curr.end_tick).contains(&(*x as u64)));
|
||||
if m {
|
||||
marks.push((curr, i, 1));
|
||||
// println!("1: {}",curr.current_task.task_name);
|
||||
} else if last_m {
|
||||
marks.push((curr, i, 2));
|
||||
// println!("2: {}",curr.current_task.task_name);
|
||||
} else {
|
||||
marks.push((curr, i, 0));
|
||||
}
|
||||
last_m = m;
|
||||
}
|
||||
for i in 0..num_interrupts {
|
||||
// bounds based on minimum inter-arrival time
|
||||
let mut lb = 0;
|
||||
let mut ub : u32 = marks[marks.len()-1].0.end_tick.try_into().expect("ticks > u32");
|
||||
if i > 0 {
|
||||
lb = u32::saturating_add(interrupt_offsets[i-1],MINIMUM_INTER_ARRIVAL_TIME);
|
||||
}
|
||||
if i < num_interrupts-1 {
|
||||
ub = u32::saturating_sub(interrupt_offsets[i+1],MINIMUM_INTER_ARRIVAL_TIME);
|
||||
}
|
||||
// get old hit and handler
|
||||
let old_hit = marks.iter().filter(
|
||||
|x| x.0.start_tick < (interrupt_offsets[i] as u64) && (interrupt_offsets[i] as u64) < x.0.end_tick
|
||||
).next();
|
||||
let old_handler = match old_hit {
|
||||
Some(s) => if s.1 < num_interrupts-1 && s.1 < marks.len()-1 {
|
||||
Some(marks[s.1+1])
|
||||
} else {None},
|
||||
None => None
|
||||
};
|
||||
// find reachable alternatives
|
||||
let alternatives : Vec<_> = marks.iter().filter(|x|
|
||||
x.2 != 2 &&
|
||||
(
|
||||
x.0.start_tick < (lb as u64) && (lb as u64) < x.0.end_tick
|
||||
|| x.0.start_tick < (ub as u64) && (ub as u64) < x.0.end_tick )
|
||||
).collect();
|
||||
// in cases there are no alternatives
|
||||
if alternatives.len() == 0 {
|
||||
if old_hit.is_none() {
|
||||
// choose something random
|
||||
let untouched : Vec<_> = marks.iter().filter(
|
||||
|x| x.2 == 0
|
||||
).collect();
|
||||
if untouched.len() > 0 {
|
||||
let tmp = interrupt_offsets[i];
|
||||
let choice = myrand.choose(untouched);
|
||||
interrupt_offsets[i] = myrand.between(choice.0.start_tick, choice.0.end_tick)
|
||||
.try_into().expect("tick > u32");
|
||||
do_rerun = true;
|
||||
}
|
||||
// println!("no alternatives, choose random i: {} {} -> {}",i,tmp,interrupt_offsets[i]);
|
||||
continue;
|
||||
} else {
|
||||
// do nothing
|
||||
// println!("no alternatives, do nothing i: {} {}",i,interrupt_offsets[i]);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let replacement = myrand.choose(alternatives);
|
||||
if (old_hit.map_or(false, |x| x == replacement)) {
|
||||
// use the old value
|
||||
// println!("chose old value, do nothing i: {} {}",i,interrupt_offsets[i]);
|
||||
continue;
|
||||
} else {
|
||||
let extra = if (old_hit.map_or(false, |x| x.1 < replacement.1)) {
|
||||
// move futher back, respect old_handler
|
||||
old_handler.map_or(0, |x| x.0.end_tick - x.0.start_tick)
|
||||
} else { 0 };
|
||||
let tmp = interrupt_offsets[i];
|
||||
interrupt_offsets[i] = (myrand.between(replacement.0.start_tick,
|
||||
replacement.0.end_tick) + extra).try_into().expect("ticks > u32");
|
||||
// println!("chose new alternative, i: {} {} -> {}",i,tmp, interrupt_offsets[i]);
|
||||
do_rerun = true;
|
||||
}
|
||||
}
|
||||
let mut numbers : Vec<u32> = interrupt_offsets[0..num_interrupts].to_vec();
|
||||
numbers.sort();
|
||||
// println!("Mutator: {:?}", numbers);
|
||||
let mut start : u32 = 0;
|
||||
// for i in 0..numbers.len() {
|
||||
// let tmp = numbers[i];
|
||||
// numbers[i] = numbers[i]-start;
|
||||
// start = tmp;
|
||||
// }
|
||||
for i in 0..numbers.len() {
|
||||
prefix.push(u32::to_le_bytes(numbers[i]));
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "feed_systemtrace"))]
|
||||
{
|
||||
let metadata = state.metadata();
|
||||
let hist = metadata.get::<IcHist>().unwrap();
|
||||
let maxtick : u64 = hist.1.0;
|
||||
// let maxtick : u64 = (_input.exec_time().expect("No duration found").as_nanos() >> 4).try_into().unwrap();
|
||||
let mut numbers : Vec<u32> = vec![];
|
||||
for i in 0..num_interrupts {
|
||||
prefix.push(u32::to_le_bytes(myrand.between(0, min(maxtick, u32::MAX as u64)).try_into().expect("ticks > u32")));
|
||||
}
|
||||
}
|
||||
|
||||
let mut n : Vec<u8> = vec![];
|
||||
n = [prefix.concat(), suffix].concat();
|
||||
newinput.bytes_mut().clear();
|
||||
newinput.bytes_mut().append(&mut n);
|
||||
}
|
||||
// InterruptShifterMutator::mutate(&mut mymut, state, &mut input, 0)?;
|
||||
if do_rerun {
|
||||
let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, newinput)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, EM, Z> UsesState for MyStateStage<E, EM, Z>
|
||||
where
|
||||
E: UsesState<State = Z::State>,
|
||||
EM: UsesState<State = Z::State>,
|
||||
Z: Evaluator<E, EM>,
|
||||
Z::State: HasClientPerfMonitor + HasCorpus + HasRand,
|
||||
{
|
||||
type State = Z::State;
|
||||
}
|
@ -1,96 +0,0 @@
|
||||
use libafl::prelude::UsesInput;
|
||||
use libafl_qemu::CPUArchState;
|
||||
use libafl_qemu::Emulator;
|
||||
use libafl_qemu::FastSnapshot;
|
||||
use libafl_qemu::QemuExecutor;
|
||||
use libafl_qemu::QemuHelper;
|
||||
use libafl_qemu::QemuHelperTuple;
|
||||
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
|
||||
use libafl_qemu::QemuHooks;
|
||||
|
||||
use libafl_qemu::{
|
||||
emu,
|
||||
};
|
||||
// TODO be thread-safe maybe with https://amanieu.github.io/thread_local-rs/thread_local/index.html
|
||||
#[derive(Debug)]
|
||||
pub struct QemuStateRestoreHelper {
|
||||
has_snapshot: bool,
|
||||
use_snapshot: bool,
|
||||
saved_cpu_states: Vec<CPUArchState>,
|
||||
fastsnap: Option<FastSnapshot>
|
||||
}
|
||||
|
||||
impl QemuStateRestoreHelper {
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
has_snapshot: false,
|
||||
use_snapshot: true,
|
||||
saved_cpu_states: vec![],
|
||||
fastsnap: None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for QemuStateRestoreHelper {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> QemuHelper<S> for QemuStateRestoreHelper
|
||||
where
|
||||
S: UsesInput,
|
||||
{
|
||||
const HOOKS_DO_SIDE_EFFECTS: bool = true;
|
||||
|
||||
fn init_hooks<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
|
||||
where
|
||||
QT: QemuHelperTuple<S>,
|
||||
{
|
||||
}
|
||||
|
||||
fn first_exec<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
|
||||
where
|
||||
QT: QemuHelperTuple<S>,
|
||||
{
|
||||
}
|
||||
|
||||
fn post_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
|
||||
// unsafe { println!("snapshot post {}",emu::icount_get_raw()) };
|
||||
}
|
||||
|
||||
fn pre_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
|
||||
// only restore in pre-exec, to preserve the post-execution state for inspection
|
||||
#[cfg(feature = "snapshot_restore")]
|
||||
{
|
||||
#[cfg(feature = "snapshot_fast")]
|
||||
match self.fastsnap {
|
||||
Some(s) => emulator.restore_fast_snapshot(s),
|
||||
None => {self.fastsnap = Some(emulator.create_fast_snapshot(true));},
|
||||
}
|
||||
#[cfg(not(feature = "snapshot_fast"))]
|
||||
if !self.has_snapshot {
|
||||
emulator.save_snapshot("Start", true);
|
||||
self.has_snapshot = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
emulator.load_snapshot("Start", true);
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "snapshot_restore"))]
|
||||
if !self.has_snapshot {
|
||||
self.saved_cpu_states = (0..emulator.num_cpus())
|
||||
.map(|i| emulator.cpu_from_index(i).save_state())
|
||||
.collect();
|
||||
self.has_snapshot = true;
|
||||
} else {
|
||||
for (i, s) in self.saved_cpu_states.iter().enumerate() {
|
||||
emulator.cpu_from_index(i).restore_state(s);
|
||||
}
|
||||
}
|
||||
|
||||
// unsafe { println!("snapshot pre {}",emu::icount_get_raw()) };
|
||||
}
|
||||
}
|
@ -1,299 +0,0 @@
|
||||
use libafl::SerdeAny;
|
||||
use libafl::bolts::ownedref::OwnedSlice;
|
||||
use libafl::inputs::BytesInput;
|
||||
use libafl::prelude::UsesInput;
|
||||
use libafl::state::HasNamedMetadata;
|
||||
use std::path::PathBuf;
|
||||
use crate::clock::QemuClockObserver;
|
||||
use libafl::corpus::Testcase;
|
||||
use libafl::bolts::tuples::MatchName;
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::Hasher;
|
||||
use std::hash::Hash;
|
||||
use libafl::events::EventFirer;
|
||||
use libafl::state::HasClientPerfMonitor;
|
||||
use libafl::feedbacks::Feedback;
|
||||
use libafl::bolts::tuples::Named;
|
||||
use libafl::Error;
|
||||
use hashbrown::HashMap;
|
||||
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::RefinedFreeRTOSSystemState;
|
||||
use super::FreeRTOSSystemStateMetadata;
|
||||
use super::observers::QemuSystemStateObserver;
|
||||
use petgraph::prelude::DiGraph;
|
||||
use petgraph::graph::NodeIndex;
|
||||
use petgraph::Direction;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
//============================= Feedback
|
||||
|
||||
/// Shared Metadata for a systemstateFeedback
|
||||
#[derive(Debug, Serialize, Deserialize, SerdeAny, Clone, Default)]
|
||||
pub struct SystemStateFeedbackState
|
||||
{
|
||||
known_traces: HashMap<u64,(u64,u64,usize)>, // encounters,ticks,length
|
||||
longest: Vec<RefinedFreeRTOSSystemState>,
|
||||
}
|
||||
impl Named for SystemStateFeedbackState
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
"systemstate"
|
||||
}
|
||||
}
|
||||
// impl FeedbackState for systemstateFeedbackState
|
||||
// {
|
||||
// fn reset(&mut self) -> Result<(), Error> {
|
||||
// self.longest.clear();
|
||||
// self.known_traces.clear();
|
||||
// Ok(())
|
||||
// }
|
||||
// }
|
||||
|
||||
/// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||
pub struct NovelSystemStateFeedback
|
||||
{
|
||||
last_trace: Option<Vec<RefinedFreeRTOSSystemState>>,
|
||||
// known_traces: HashMap<u64,(u64,usize)>,
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for NovelSystemStateFeedback
|
||||
where
|
||||
S: UsesInput + HasClientPerfMonitor + HasNamedMetadata,
|
||||
{
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
state: &mut S,
|
||||
manager: &mut EM,
|
||||
input: &S::Input,
|
||||
observers: &OT,
|
||||
exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>
|
||||
{
|
||||
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
|
||||
.expect("QemuSystemStateObserver not found");
|
||||
let clock_observer = observers.match_name::<QemuClockObserver>("clocktime") //TODO not fixed
|
||||
.expect("QemuClockObserver not found");
|
||||
let feedbackstate = match state
|
||||
.named_metadata_mut()
|
||||
.get_mut::<SystemStateFeedbackState>("systemstate") {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
let n=SystemStateFeedbackState::default();
|
||||
state.named_metadata_mut().insert(n, "systemstate");
|
||||
state.named_metadata_mut().get_mut::<SystemStateFeedbackState>("systemstate").unwrap()
|
||||
}
|
||||
};
|
||||
// let feedbackstate = state
|
||||
// .feedback_states_mut()
|
||||
// .match_name_mut::<systemstateFeedbackState>("systemstate")
|
||||
// .unwrap();
|
||||
// Do Stuff
|
||||
let mut hasher = DefaultHasher::new();
|
||||
observer.last_run.hash(&mut hasher);
|
||||
let somehash = hasher.finish();
|
||||
let mut is_novel = false;
|
||||
let mut takes_longer = false;
|
||||
match feedbackstate.known_traces.get_mut(&somehash) {
|
||||
None => {
|
||||
is_novel = true;
|
||||
feedbackstate.known_traces.insert(somehash,(1,clock_observer.last_runtime(),observer.last_run.len()));
|
||||
}
|
||||
Some(s) => {
|
||||
s.0+=1;
|
||||
if s.1 < clock_observer.last_runtime() {
|
||||
s.1 = clock_observer.last_runtime();
|
||||
takes_longer = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if observer.last_run.len() > feedbackstate.longest.len() {
|
||||
feedbackstate.longest=observer.last_run.clone();
|
||||
}
|
||||
self.last_trace = Some(observer.last_run.clone());
|
||||
// if (!is_novel) { println!("not novel") };
|
||||
Ok(is_novel | takes_longer)
|
||||
}
|
||||
|
||||
/// Append to the testcase the generated metadata in case of a new corpus item
|
||||
#[inline]
|
||||
fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
|
||||
let a = self.last_trace.take();
|
||||
match a {
|
||||
Some(s) => testcase.metadata_mut().insert(FreeRTOSSystemStateMetadata::new(s)),
|
||||
None => (),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Discard the stored metadata in case that the testcase is not added to the corpus
|
||||
#[inline]
|
||||
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
|
||||
self.last_trace = None;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for NovelSystemStateFeedback
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
"systemstate"
|
||||
}
|
||||
}
|
||||
|
||||
//=============================
|
||||
|
||||
pub fn match_traces(target: &Vec<RefinedFreeRTOSSystemState>, last: &Vec<RefinedFreeRTOSSystemState>) -> bool {
|
||||
let mut ret = true;
|
||||
if target.len() > last.len() {return false;}
|
||||
for i in 0..target.len() {
|
||||
ret &= target[i].current_task.task_name==last[i].current_task.task_name;
|
||||
}
|
||||
ret
|
||||
}
|
||||
pub fn match_traces_name(target: &Vec<String>, last: &Vec<RefinedFreeRTOSSystemState>) -> bool {
|
||||
let mut ret = true;
|
||||
if target.len() > last.len() {return false;}
|
||||
for i in 0..target.len() {
|
||||
ret &= target[i]==last[i].current_task.task_name;
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
/// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||
pub struct HitSystemStateFeedback
|
||||
{
|
||||
target: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for HitSystemStateFeedback
|
||||
where
|
||||
S: UsesInput + HasClientPerfMonitor,
|
||||
{
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
state: &mut S,
|
||||
manager: &mut EM,
|
||||
input: &S::Input,
|
||||
observers: &OT,
|
||||
exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>
|
||||
{
|
||||
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
|
||||
.expect("QemuSystemStateObserver not found");
|
||||
// Do Stuff
|
||||
match &self.target {
|
||||
Some(s) => {
|
||||
// #[cfg(debug_assertions)] eprintln!("Hit systemstate Feedback trigger");
|
||||
Ok(match_traces_name(s, &observer.last_run))
|
||||
},
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for HitSystemStateFeedback
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
"hit_systemstate"
|
||||
}
|
||||
}
|
||||
|
||||
impl HitSystemStateFeedback {
|
||||
pub fn new(target: Option<Vec<RefinedFreeRTOSSystemState>>) -> Self {
|
||||
Self {target: target.map(|x| x.into_iter().map(|y| y.current_task.task_name).collect())}
|
||||
}
|
||||
}
|
||||
//=========================== Debugging Feedback
|
||||
/// A [`Feedback`] meant to dump the system-traces for debugging. Depends on [`QemuSystemStateObserver`]
|
||||
#[derive(Debug)]
|
||||
pub struct DumpSystraceFeedback
|
||||
{
|
||||
dumpfile: Option<PathBuf>,
|
||||
dump_metadata: bool,
|
||||
last_trace: Option<Vec<RefinedFreeRTOSSystemState>>,
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for DumpSystraceFeedback
|
||||
where
|
||||
S: UsesInput + HasClientPerfMonitor,
|
||||
{
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
state: &mut S,
|
||||
manager: &mut EM,
|
||||
input: &S::Input,
|
||||
observers: &OT,
|
||||
exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>
|
||||
{
|
||||
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
|
||||
.expect("QemuSystemStateObserver not found");
|
||||
let names : Vec<String> = observer.last_run.iter().map(|x| x.current_task.task_name.clone()).collect();
|
||||
match &self.dumpfile {
|
||||
Some(s) => {
|
||||
std::fs::write(s,ron::to_string(&observer.last_run).expect("Error serializing hashmap")).expect("Can not dump to file");
|
||||
self.dumpfile = None
|
||||
},
|
||||
None => if !self.dump_metadata {println!("{:?}\n{:?}",observer.last_run,names);}
|
||||
};
|
||||
if self.dump_metadata {self.last_trace=Some(observer.last_run.clone());}
|
||||
Ok(!self.dump_metadata)
|
||||
}
|
||||
/// Append to the testcase the generated metadata in case of a new corpus item
|
||||
#[inline]
|
||||
fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
|
||||
if !self.dump_metadata {return Ok(());}
|
||||
let a = self.last_trace.take();
|
||||
match a {
|
||||
Some(s) => testcase.metadata_mut().insert(FreeRTOSSystemStateMetadata::new(s)),
|
||||
None => (),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Discard the stored metadata in case that the testcase is not added to the corpus
|
||||
#[inline]
|
||||
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
|
||||
self.last_trace = None;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for DumpSystraceFeedback
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
"Dumpsystemstate"
|
||||
}
|
||||
}
|
||||
|
||||
impl DumpSystraceFeedback
|
||||
{
|
||||
/// Creates a new [`DumpSystraceFeedback`]
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {dumpfile: None, dump_metadata: false, last_trace: None}
|
||||
}
|
||||
pub fn with_dump(dumpfile: Option<PathBuf>) -> Self {
|
||||
Self {dumpfile: dumpfile, dump_metadata: false, last_trace: None}
|
||||
}
|
||||
pub fn metadata_only() -> Self {
|
||||
Self {dumpfile: None, dump_metadata: true, last_trace: None}
|
||||
}
|
||||
}
|
@ -1,122 +0,0 @@
|
||||
#![allow(non_camel_case_types,non_snake_case,non_upper_case_globals,deref_nullptr)]
|
||||
use serde::{Deserialize, Serialize};
|
||||
// Manual Types
|
||||
use libafl_qemu::Emulator;
|
||||
|
||||
/*========== Start of generated Code =============*/
|
||||
pub type char_ptr = ::std::os::raw::c_uint;
|
||||
pub type ListItem_t_ptr = ::std::os::raw::c_uint;
|
||||
pub type StackType_t_ptr = ::std::os::raw::c_uint;
|
||||
pub type void_ptr = ::std::os::raw::c_uint;
|
||||
pub type tskTaskControlBlock_ptr = ::std::os::raw::c_uint;
|
||||
pub type xLIST_ptr = ::std::os::raw::c_uint;
|
||||
pub type xLIST_ITEM_ptr = ::std::os::raw::c_uint;
|
||||
/* automatically generated by rust-bindgen 0.59.2 */
|
||||
|
||||
pub type __uint8_t = ::std::os::raw::c_uchar;
|
||||
pub type __uint16_t = ::std::os::raw::c_ushort;
|
||||
pub type __uint32_t = ::std::os::raw::c_uint;
|
||||
pub type StackType_t = u32;
|
||||
pub type UBaseType_t = ::std::os::raw::c_uint;
|
||||
pub type TickType_t = u32;
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct xLIST_ITEM {
|
||||
pub xItemValue: TickType_t,
|
||||
pub pxNext: xLIST_ITEM_ptr,
|
||||
pub pxPrevious: xLIST_ITEM_ptr,
|
||||
pub pvOwner: void_ptr,
|
||||
pub pvContainer: xLIST_ptr,
|
||||
}
|
||||
pub type ListItem_t = xLIST_ITEM;
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct xMINI_LIST_ITEM {
|
||||
pub xItemValue: TickType_t,
|
||||
pub pxNext: xLIST_ITEM_ptr,
|
||||
pub pxPrevious: xLIST_ITEM_ptr,
|
||||
}
|
||||
pub type MiniListItem_t = xMINI_LIST_ITEM;
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct xLIST {
|
||||
pub uxNumberOfItems: UBaseType_t,
|
||||
pub pxIndex: ListItem_t_ptr,
|
||||
pub xListEnd: MiniListItem_t,
|
||||
}
|
||||
pub type List_t = xLIST;
|
||||
pub type TaskHandle_t = tskTaskControlBlock_ptr;
|
||||
pub const eTaskState_eRunning: eTaskState = 0;
|
||||
pub const eTaskState_eReady: eTaskState = 1;
|
||||
pub const eTaskState_eBlocked: eTaskState = 2;
|
||||
pub const eTaskState_eSuspended: eTaskState = 3;
|
||||
pub const eTaskState_eDeleted: eTaskState = 4;
|
||||
pub const eTaskState_eInvalid: eTaskState = 5;
|
||||
pub type eTaskState = ::std::os::raw::c_uint;
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct xTASK_STATUS {
|
||||
pub xHandle: TaskHandle_t,
|
||||
pub pcTaskName: char_ptr,
|
||||
pub xTaskNumber: UBaseType_t,
|
||||
pub eCurrentState: eTaskState,
|
||||
pub uxCurrentPriority: UBaseType_t,
|
||||
pub uxBasePriority: UBaseType_t,
|
||||
pub ulRunTimeCounter: u32,
|
||||
pub pxStackBase: StackType_t_ptr,
|
||||
pub usStackHighWaterMark: u16,
|
||||
}
|
||||
pub type TaskStatus_t = xTASK_STATUS;
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct tskTaskControlBlock {
|
||||
pub pxTopOfStack: StackType_t_ptr,
|
||||
pub xStateListItem: ListItem_t,
|
||||
pub xEventListItem: ListItem_t,
|
||||
pub uxPriority: UBaseType_t,
|
||||
pub pxStack: StackType_t_ptr,
|
||||
pub pcTaskName: [::std::os::raw::c_char; 10usize],
|
||||
pub uxBasePriority: UBaseType_t,
|
||||
pub uxMutexesHeld: UBaseType_t,
|
||||
pub ulNotifiedValue: [u32; 1usize],
|
||||
pub ucNotifyState: [u8; 1usize],
|
||||
pub ucStaticallyAllocated: u8,
|
||||
pub ucDelayAborted: u8,
|
||||
}
|
||||
pub type tskTCB = tskTaskControlBlock;
|
||||
pub type TCB_t = tskTCB;
|
||||
/*========== End of generated Code =============*/
|
||||
|
||||
pub trait emu_lookup {
|
||||
fn lookup(emu: &Emulator, addr: ::std::os::raw::c_uint) -> Self;
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
|
||||
pub enum rtos_struct {
|
||||
TCB_struct(TCB_t),
|
||||
List_struct(List_t),
|
||||
List_Item_struct(ListItem_t),
|
||||
List_MiniItem_struct(MiniListItem_t),
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! impl_emu_lookup {
|
||||
($struct_name:ident) => {
|
||||
impl $crate::systemstate::freertos::emu_lookup for $struct_name {
|
||||
fn lookup(emu: &Emulator, addr: ::std::os::raw::c_uint) -> $struct_name {
|
||||
let mut tmp : [u8; std::mem::size_of::<$struct_name>()] = [0u8; std::mem::size_of::<$struct_name>()];
|
||||
unsafe {
|
||||
emu.read_mem(addr.into(), &mut tmp);
|
||||
std::mem::transmute::<[u8; std::mem::size_of::<$struct_name>()], $struct_name>(tmp)
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
impl_emu_lookup!(TCB_t);
|
||||
impl_emu_lookup!(List_t);
|
||||
impl_emu_lookup!(ListItem_t);
|
||||
impl_emu_lookup!(MiniListItem_t);
|
||||
impl_emu_lookup!(void_ptr);
|
||||
impl_emu_lookup!(TaskStatus_t);
|
@ -1,604 +0,0 @@
|
||||
|
||||
use libafl::SerdeAny;
|
||||
/// Feedbacks organizing SystemStates as a graph
|
||||
use libafl::inputs::HasBytesVec;
|
||||
use libafl::bolts::rands::RandomSeed;
|
||||
use libafl::bolts::rands::StdRand;
|
||||
use libafl::mutators::Mutator;
|
||||
use libafl::mutators::MutationResult;
|
||||
use libafl::prelude::HasTargetBytes;
|
||||
use libafl::prelude::UsesInput;
|
||||
use libafl::state::HasNamedMetadata;
|
||||
use libafl::state::UsesState;
|
||||
use core::marker::PhantomData;
|
||||
use libafl::state::HasCorpus;
|
||||
use libafl::state::HasSolutions;
|
||||
use libafl::state::HasRand;
|
||||
use crate::worst::MaxExecsLenFavFactor;
|
||||
use libafl::schedulers::MinimizerScheduler;
|
||||
use libafl::bolts::HasRefCnt;
|
||||
use libafl::bolts::AsSlice;
|
||||
use libafl::bolts::ownedref::OwnedSlice;
|
||||
use libafl::inputs::BytesInput;
|
||||
use std::path::PathBuf;
|
||||
use crate::clock::QemuClockObserver;
|
||||
use libafl::corpus::Testcase;
|
||||
use libafl::bolts::tuples::MatchName;
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::Hasher;
|
||||
use std::hash::Hash;
|
||||
use libafl::events::EventFirer;
|
||||
use libafl::state::HasClientPerfMonitor;
|
||||
use libafl::feedbacks::Feedback;
|
||||
use libafl::bolts::tuples::Named;
|
||||
use libafl::Error;
|
||||
use hashbrown::HashMap;
|
||||
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::RefinedFreeRTOSSystemState;
|
||||
use super::FreeRTOSSystemStateMetadata;
|
||||
use super::observers::QemuSystemStateObserver;
|
||||
use petgraph::prelude::DiGraph;
|
||||
use petgraph::graph::NodeIndex;
|
||||
use petgraph::Direction;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use libafl::bolts::rands::Rand;
|
||||
|
||||
//============================= Data Structures
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default)]
|
||||
pub struct VariantTuple
|
||||
{
|
||||
pub start_tick: u64,
|
||||
pub end_tick: u64,
|
||||
input_counter: u32,
|
||||
pub input: Vec<u8>, // in the end any kind of input are bytes, regardless of type and lifetime
|
||||
}
|
||||
impl VariantTuple {
|
||||
fn from(other: &RefinedFreeRTOSSystemState,input: Vec<u8>) -> Self {
|
||||
VariantTuple{
|
||||
start_tick: other.start_tick,
|
||||
end_tick: other.end_tick,
|
||||
input_counter: other.input_counter,
|
||||
input: input,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||
pub struct SysGraphNode
|
||||
{
|
||||
base: RefinedFreeRTOSSystemState,
|
||||
pub variants: Vec<VariantTuple>,
|
||||
}
|
||||
impl SysGraphNode {
|
||||
fn from(base: RefinedFreeRTOSSystemState, input: Vec<u8>) -> Self {
|
||||
SysGraphNode{variants: vec![VariantTuple::from(&base, input)], base:base }
|
||||
}
|
||||
/// unites the variants of this value with another, draining the other if the bases are equal
|
||||
fn unite(&mut self, other: &mut SysGraphNode) -> bool {
|
||||
if self!=other {return false;}
|
||||
self.variants.append(&mut other.variants);
|
||||
self.variants.dedup();
|
||||
return true;
|
||||
}
|
||||
/// add a Varint from a [`RefinedFreeRTOSSystemState`]
|
||||
fn unite_raw(&mut self, other: &RefinedFreeRTOSSystemState, input: &Vec<u8>) -> bool {
|
||||
if &self.base!=other {return false;}
|
||||
self.variants.push(VariantTuple::from(other, input.clone()));
|
||||
self.variants.dedup();
|
||||
return true;
|
||||
}
|
||||
/// add a Varint from a [`RefinedFreeRTOSSystemState`], if it's interesting
|
||||
fn unite_interesting(&mut self, other: &RefinedFreeRTOSSystemState, input: &Vec<u8>) -> bool {
|
||||
if &self.base!=other {return false;}
|
||||
let interesting =
|
||||
self.variants.iter().all(|x| x.end_tick-x.start_tick<other.end_tick-other.start_tick) || // longest variant
|
||||
self.variants.iter().all(|x| x.end_tick-x.start_tick>other.end_tick-other.start_tick) || // shortest variant
|
||||
self.variants.iter().all(|x| x.input_counter>other.input_counter) || // longest input
|
||||
self.variants.iter().all(|x| x.input_counter<other.input_counter); // shortest input
|
||||
if interesting {
|
||||
let var = VariantTuple::from(other, input.clone());
|
||||
self.variants.push(var);
|
||||
}
|
||||
return interesting;
|
||||
}
|
||||
pub fn get_taskname(&self) -> &str {
|
||||
&self.base.current_task.task_name
|
||||
}
|
||||
pub fn get_input_counts(&self) -> Vec<u32> {
|
||||
self.variants.iter().map(|x| x.input_counter).collect()
|
||||
}
|
||||
}
|
||||
impl PartialEq for SysGraphNode {
|
||||
fn eq(&self, other: &SysGraphNode) -> bool {
|
||||
self.base==other.base
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
|
||||
pub struct SysGraphMetadata {
|
||||
pub inner: Vec<NodeIndex>,
|
||||
indices: Vec<usize>,
|
||||
tcref: isize,
|
||||
}
|
||||
impl SysGraphMetadata {
|
||||
pub fn new(inner: Vec<NodeIndex>) -> Self{
|
||||
Self {indices: inner.iter().map(|x| x.index()).collect(), inner: inner, tcref: 0}
|
||||
}
|
||||
}
|
||||
impl AsSlice for SysGraphMetadata {
|
||||
/// Convert the slice of system-states to a slice of hashes over enumerated states
|
||||
fn as_slice(&self) -> &[usize] {
|
||||
self.indices.as_slice()
|
||||
}
|
||||
|
||||
type Entry = usize;
|
||||
}
|
||||
|
||||
impl HasRefCnt for SysGraphMetadata {
|
||||
fn refcnt(&self) -> isize {
|
||||
self.tcref
|
||||
}
|
||||
|
||||
fn refcnt_mut(&mut self) -> &mut isize {
|
||||
&mut self.tcref
|
||||
}
|
||||
}
|
||||
|
||||
libafl::impl_serdeany!(SysGraphMetadata);
|
||||
|
||||
pub type GraphMaximizerCorpusScheduler<CS> =
|
||||
MinimizerScheduler<CS, MaxExecsLenFavFactor<<CS as UsesState>::State>,SysGraphMetadata>;
|
||||
|
||||
//============================= Graph Feedback
|
||||
|
||||
/// Improved System State Graph
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default, SerdeAny)]
|
||||
pub struct SysGraphFeedbackState
|
||||
{
|
||||
pub graph: DiGraph<SysGraphNode, ()>,
|
||||
entrypoint: NodeIndex,
|
||||
exit: NodeIndex,
|
||||
name: String,
|
||||
}
|
||||
impl SysGraphFeedbackState
|
||||
{
|
||||
pub fn new() -> Self {
|
||||
let mut graph = DiGraph::<SysGraphNode, ()>::new();
|
||||
let mut entry = SysGraphNode::default();
|
||||
entry.base.current_task.task_name="Start".to_string();
|
||||
let mut exit = SysGraphNode::default();
|
||||
exit.base.current_task.task_name="End".to_string();
|
||||
let entry = graph.add_node(entry);
|
||||
let exit = graph.add_node(exit);
|
||||
Self {graph: graph, entrypoint: entry, exit: exit, name: String::from("SysMap")}
|
||||
}
|
||||
fn insert(&mut self, list: Vec<RefinedFreeRTOSSystemState>, input: &Vec<u8>) {
|
||||
let mut current_index = self.entrypoint;
|
||||
for n in list {
|
||||
let mut done = false;
|
||||
for i in self.graph.neighbors_directed(current_index, Direction::Outgoing) {
|
||||
if n == self.graph[i].base {
|
||||
done = true;
|
||||
current_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !done {
|
||||
let j = self.graph.add_node(SysGraphNode::from(n,input.clone()));
|
||||
self.graph.add_edge(current_index, j, ());
|
||||
current_index = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Try adding a system state path from a [Vec<RefinedFreeRTOSSystemState>], return true if the path was interesting
|
||||
fn update(&mut self, list: &Vec<RefinedFreeRTOSSystemState>, input: &Vec<u8>) -> (bool, Vec<NodeIndex>) {
|
||||
let mut current_index = self.entrypoint;
|
||||
let mut novel = false;
|
||||
let mut trace : Vec<NodeIndex> = vec![current_index];
|
||||
for n in list {
|
||||
let mut matching : Option<NodeIndex> = None;
|
||||
for i in self.graph.neighbors_directed(current_index, Direction::Outgoing) {
|
||||
let tmp = &self.graph[i];
|
||||
if n == &tmp.base {
|
||||
matching = Some(i);
|
||||
current_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
match matching {
|
||||
None => {
|
||||
novel = true;
|
||||
let j = self.graph.add_node(SysGraphNode::from(n.clone(),input.clone()));
|
||||
self.graph.add_edge(current_index, j, ());
|
||||
current_index = j;
|
||||
},
|
||||
Some(i) => {
|
||||
novel |= self.graph[i].unite_interesting(&n, input);
|
||||
}
|
||||
}
|
||||
trace.push(current_index);
|
||||
}
|
||||
self.graph.update_edge(current_index, self.exit, ()); // every path ends in the exit noded
|
||||
return (novel, trace);
|
||||
}
|
||||
}
|
||||
impl Named for SysGraphFeedbackState
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
impl SysGraphFeedbackState
|
||||
{
|
||||
fn reset(&mut self) -> Result<(), Error> {
|
||||
self.graph.clear();
|
||||
let mut entry = SysGraphNode::default();
|
||||
entry.base.current_task.task_name="Start".to_string();
|
||||
let mut exit = SysGraphNode::default();
|
||||
exit.base.current_task.task_name="End".to_string();
|
||||
self.entrypoint = self.graph.add_node(entry);
|
||||
self.exit = self.graph.add_node(exit);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||
pub struct SysMapFeedback
|
||||
{
|
||||
name: String,
|
||||
last_trace: Option<Vec<NodeIndex>>,
|
||||
}
|
||||
impl SysMapFeedback {
|
||||
pub fn new() -> Self {
|
||||
Self {name: String::from("SysMapFeedback"), last_trace: None }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for SysMapFeedback
|
||||
where
|
||||
S: UsesInput + HasClientPerfMonitor + HasNamedMetadata,
|
||||
S::Input: HasTargetBytes,
|
||||
{
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
state: &mut S,
|
||||
_manager: &mut EM,
|
||||
_input: &S::Input,
|
||||
observers: &OT,
|
||||
_exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>,
|
||||
{
|
||||
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
|
||||
.expect("QemuSystemStateObserver not found");
|
||||
let feedbackstate = match state
|
||||
.named_metadata_mut()
|
||||
.get_mut::<SysGraphFeedbackState>("SysMap") {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
let n=SysGraphFeedbackState::default();
|
||||
state.named_metadata_mut().insert(n, "SysMap");
|
||||
state.named_metadata_mut().get_mut::<SysGraphFeedbackState>("SysMap").unwrap()
|
||||
}
|
||||
};
|
||||
let ret = feedbackstate.update(&observer.last_run, &observer.last_input);
|
||||
self.last_trace = Some(ret.1);
|
||||
Ok(ret.0)
|
||||
}
|
||||
|
||||
/// Append to the testcase the generated metadata in case of a new corpus item
|
||||
#[inline]
|
||||
fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase<S::Input>) -> Result<(), Error> {
|
||||
let a = self.last_trace.take();
|
||||
match a {
|
||||
Some(s) => testcase.metadata_mut().insert(SysGraphMetadata::new(s)),
|
||||
None => (),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Discard the stored metadata in case that the testcase is not added to the corpus
|
||||
#[inline]
|
||||
fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
|
||||
self.last_trace = None;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
impl Named for SysMapFeedback
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
//============================= Mutators
|
||||
//=============================== Snippets
|
||||
// pub struct RandGraphSnippetMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// phantom: PhantomData<(I, S)>,
|
||||
// }
|
||||
// impl<I, S> RandGraphSnippetMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// pub fn new() -> Self {
|
||||
// RandGraphSnippetMutator{phantom: PhantomData}
|
||||
// }
|
||||
// }
|
||||
// impl<I, S> Mutator<I, S> for RandGraphSnippetMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// fn mutate(
|
||||
// &mut self,
|
||||
// state: &mut S,
|
||||
// input: &mut I,
|
||||
// _stage_idx: i32
|
||||
// ) -> Result<MutationResult, Error>
|
||||
// {
|
||||
// // need our own random generator, because borrowing rules
|
||||
// let mut myrand = StdRand::new();
|
||||
// let tmp = &mut state.rand_mut();
|
||||
// myrand.set_seed(tmp.next());
|
||||
// drop(tmp);
|
||||
|
||||
// let feedbackstate = state
|
||||
// .feedback_states()
|
||||
// .match_name::<SysGraphFeedbackState>("SysMap")
|
||||
// .unwrap();
|
||||
// let g = &feedbackstate.graph;
|
||||
// let tmp = state.metadata().get::<SysGraphMetadata>();
|
||||
// if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
|
||||
// return Ok(MutationResult::Skipped);
|
||||
// }
|
||||
// let trace =tmp.expect("SysGraphMetadata not found");
|
||||
// // follow the path, extract snippets from last reads, find common snippets.
|
||||
// // those are likley keys parts. choose random parts from other sibling traces
|
||||
// let sibling_inputs : Vec<&Vec<u8>>= g[*trace.inner.last().unwrap()].variants.iter().map(|x| &x.input).collect();
|
||||
// let mut snippet_collector = vec![];
|
||||
// let mut per_input_counters = HashMap::<&Vec<u8>,usize>::new(); // ugly workaround to track multiple inputs
|
||||
// for t in &trace.inner {
|
||||
// let node = &g[*t];
|
||||
// let mut per_node_snippets = HashMap::<&Vec<u8>,&[u8]>::new();
|
||||
// for v in &node.variants {
|
||||
// match per_input_counters.get_mut(&v.input) {
|
||||
// None => {
|
||||
// if sibling_inputs.iter().any(|x| *x==&v.input) { // only collect info about siblin inputs from target
|
||||
// per_input_counters.insert(&v.input, v.input_counter.try_into().unwrap());
|
||||
// }
|
||||
// },
|
||||
// Some(x) => {
|
||||
// let x_u = *x;
|
||||
// if x_u<v.input_counter as usize {
|
||||
// *x=v.input_counter as usize;
|
||||
// per_node_snippets.insert(&v.input,&v.input[x_u..v.input_counter as usize]);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// snippet_collector.push(per_node_snippets);
|
||||
// }
|
||||
// let mut new_input : Vec<u8> = vec![];
|
||||
// for c in snippet_collector {
|
||||
// new_input.extend_from_slice(myrand.choose(c).1);
|
||||
// }
|
||||
// for i in new_input.iter().enumerate() {
|
||||
// input.bytes_mut()[i.0]=*i.1;
|
||||
// }
|
||||
|
||||
// Ok(MutationResult::Mutated)
|
||||
// }
|
||||
|
||||
// fn post_exec(
|
||||
// &mut self,
|
||||
// _state: &mut S,
|
||||
// _stage_idx: i32,
|
||||
// _corpus_idx: Option<usize>
|
||||
// ) -> Result<(), Error> {
|
||||
// Ok(())
|
||||
// }
|
||||
// }
|
||||
|
||||
// impl<I, S> Named for RandGraphSnippetMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// fn name(&self) -> &str {
|
||||
// "RandGraphSnippetMutator"
|
||||
// }
|
||||
// }
|
||||
// //=============================== Snippets
|
||||
// pub struct RandInputSnippetMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// phantom: PhantomData<(I, S)>,
|
||||
// }
|
||||
// impl<I, S> RandInputSnippetMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// pub fn new() -> Self {
|
||||
// RandInputSnippetMutator{phantom: PhantomData}
|
||||
// }
|
||||
// }
|
||||
// impl<I, S> Mutator<I, S> for RandInputSnippetMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// fn mutate(
|
||||
// &mut self,
|
||||
// state: &mut S,
|
||||
// input: &mut I,
|
||||
// _stage_idx: i32
|
||||
// ) -> Result<MutationResult, Error>
|
||||
// {
|
||||
// // need our own random generator, because borrowing rules
|
||||
// let mut myrand = StdRand::new();
|
||||
// let tmp = &mut state.rand_mut();
|
||||
// myrand.set_seed(tmp.next());
|
||||
// drop(tmp);
|
||||
|
||||
// let feedbackstate = state
|
||||
// .feedback_states()
|
||||
// .match_name::<SysGraphFeedbackState>("SysMap")
|
||||
// .unwrap();
|
||||
// let g = &feedbackstate.graph;
|
||||
// let tmp = state.metadata().get::<SysGraphMetadata>();
|
||||
// if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
|
||||
// return Ok(MutationResult::Skipped);
|
||||
// }
|
||||
// let trace = tmp.expect("SysGraphMetadata not found");
|
||||
|
||||
// let mut collection : Vec<Vec<u8>> = Vec::new();
|
||||
// let mut current_pointer : usize = 0;
|
||||
// for t in &trace.inner {
|
||||
// let node = &g[*t];
|
||||
// for v in &node.variants {
|
||||
// if v.input == input.bytes() {
|
||||
// if v.input_counter > current_pointer.try_into().unwrap() {
|
||||
// collection.push(v.input[current_pointer..v.input_counter as usize].to_owned());
|
||||
// current_pointer = v.input_counter as usize;
|
||||
// }
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// let index_to_mutate = myrand.below(collection.len() as u64) as usize;
|
||||
// for i in 0..collection[index_to_mutate].len() {
|
||||
// collection[index_to_mutate][i] = myrand.below(0xFF) as u8;
|
||||
// }
|
||||
// for i in collection.concat().iter().enumerate() {
|
||||
// input.bytes_mut()[i.0]=*i.1;
|
||||
// }
|
||||
|
||||
// Ok(MutationResult::Mutated)
|
||||
// }
|
||||
|
||||
// fn post_exec(
|
||||
// &mut self,
|
||||
// _state: &mut S,
|
||||
// _stage_idx: i32,
|
||||
// _corpus_idx: Option<usize>
|
||||
// ) -> Result<(), Error> {
|
||||
// Ok(())
|
||||
// }
|
||||
// }
|
||||
|
||||
// impl<I, S> Named for RandInputSnippetMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// fn name(&self) -> &str {
|
||||
// "RandInputSnippetMutator"
|
||||
// }
|
||||
// }
|
||||
// //=============================== Suffix
|
||||
// pub struct RandGraphSuffixMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// phantom: PhantomData<(I, S)>,
|
||||
// }
|
||||
// impl<I, S> RandGraphSuffixMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// pub fn new() -> Self {
|
||||
// RandGraphSuffixMutator{phantom: PhantomData}
|
||||
// }
|
||||
// }
|
||||
// impl<I, S> Mutator<I, S> for RandGraphSuffixMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// fn mutate(
|
||||
// &mut self,
|
||||
// state: &mut S,
|
||||
// input: &mut I,
|
||||
// _stage_idx: i32
|
||||
// ) -> Result<MutationResult, Error>
|
||||
// {
|
||||
// // need our own random generator, because borrowing rules
|
||||
// let mut myrand = StdRand::new();
|
||||
// let tmp = &mut state.rand_mut();
|
||||
// myrand.set_seed(tmp.next());
|
||||
// drop(tmp);
|
||||
|
||||
// let feedbackstate = state
|
||||
// .feedback_states()
|
||||
// .match_name::<SysGraphFeedbackState>("SysMap")
|
||||
// .unwrap();
|
||||
// let g = &feedbackstate.graph;
|
||||
// let tmp = state.metadata().get::<SysGraphMetadata>();
|
||||
// if tmp.is_none() { // if there are no metadata it was probably not interesting anyways
|
||||
// return Ok(MutationResult::Skipped);
|
||||
// }
|
||||
// let trace =tmp.expect("SysGraphMetadata not found");
|
||||
// // follow the path, extract snippets from last reads, find common snippets.
|
||||
// // those are likley keys parts. choose random parts from other sibling traces
|
||||
// let inp_c_end = g[*trace.inner.last().unwrap()].base.input_counter;
|
||||
// let mut num_to_reverse = myrand.below(trace.inner.len().try_into().unwrap());
|
||||
// for t in trace.inner.iter().rev() {
|
||||
// let int_c_prefix = g[*t].base.input_counter;
|
||||
// if int_c_prefix < inp_c_end {
|
||||
// num_to_reverse-=1;
|
||||
// if num_to_reverse<=0 {
|
||||
// let mut new_input=input.bytes()[..(int_c_prefix as usize)].to_vec();
|
||||
// let mut ext : Vec<u8> = (int_c_prefix..inp_c_end).map(|_| myrand.next().to_le_bytes()).flatten().collect();
|
||||
// new_input.append(&mut ext);
|
||||
// for i in new_input.iter().enumerate() {
|
||||
// if input.bytes_mut().len()>i.0 {
|
||||
// input.bytes_mut()[i.0]=*i.1;
|
||||
// }
|
||||
// else { break };
|
||||
// }
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// Ok(MutationResult::Mutated)
|
||||
// }
|
||||
|
||||
// fn post_exec(
|
||||
// &mut self,
|
||||
// _state: &mut S,
|
||||
// _stage_idx: i32,
|
||||
// _corpus_idx: Option<usize>
|
||||
// ) -> Result<(), Error> {
|
||||
// Ok(())
|
||||
// }
|
||||
// }
|
||||
|
||||
// impl<I, S> Named for RandGraphSuffixMutator<I, S>
|
||||
// where
|
||||
// I: Input + HasBytesVec,
|
||||
// S: HasRand + HasMetadata + HasCorpus<I> + HasSolutions<I>,
|
||||
// {
|
||||
// fn name(&self) -> &str {
|
||||
// "RandGraphSuffixMutator"
|
||||
// }
|
||||
// }
|
@ -1,209 +0,0 @@
|
||||
use std::cell::UnsafeCell;
|
||||
use std::io::Write;
|
||||
use std::ops::Range;
|
||||
use libafl::prelude::UsesInput;
|
||||
use libafl_qemu::Emulator;
|
||||
use libafl_qemu::GuestAddr;
|
||||
use libafl_qemu::QemuHooks;
|
||||
use libafl_qemu::edges::QemuEdgesMapMetadata;
|
||||
use libafl_qemu::emu;
|
||||
use libafl_qemu::hooks;
|
||||
use crate::systemstate::RawFreeRTOSSystemState;
|
||||
use crate::systemstate::CURRENT_SYSTEMSTATE_VEC;
|
||||
use crate::systemstate::NUM_PRIOS;
|
||||
use super::freertos::TCB_t;
|
||||
use super::freertos::rtos_struct::List_Item_struct;
|
||||
use super::freertos::rtos_struct::*;
|
||||
use super::freertos;
|
||||
|
||||
use libafl_qemu::{
|
||||
helper::{QemuHelper, QemuHelperTuple},
|
||||
// edges::SAVED_JUMP,
|
||||
};
|
||||
|
||||
//============================= Struct definitions
|
||||
|
||||
pub static mut INTR_OFFSET : Option<u64> = None;
|
||||
pub static mut INTR_DONE : bool = true;
|
||||
|
||||
// only used when inputs are injected
|
||||
pub static mut NEXT_INPUT : Vec<u8> = Vec::new();
|
||||
|
||||
//============================= Qemu Helper
|
||||
|
||||
/// A Qemu Helper with reads FreeRTOS specific structs from Qemu whenever certain syscalls occur, also inject inputs
|
||||
#[derive(Debug)]
|
||||
pub struct QemuSystemStateHelper {
|
||||
kerneladdr: u32,
|
||||
tcb_addr: u32,
|
||||
ready_queues: u32,
|
||||
input_counter: Option<u64>,
|
||||
app_range: Range<u32>,
|
||||
}
|
||||
|
||||
impl QemuSystemStateHelper {
|
||||
#[must_use]
|
||||
pub fn new(
|
||||
kerneladdr: u32,
|
||||
tcb_addr: u32,
|
||||
ready_queues: u32,
|
||||
input_counter: Option<u64>,
|
||||
app_range: Range<u32>,
|
||||
) -> Self {
|
||||
QemuSystemStateHelper {
|
||||
kerneladdr,
|
||||
tcb_addr: tcb_addr,
|
||||
ready_queues: ready_queues,
|
||||
input_counter: input_counter,
|
||||
app_range,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> QemuHelper<S> for QemuSystemStateHelper
|
||||
where
|
||||
S: UsesInput,
|
||||
{
|
||||
fn first_exec<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
|
||||
where
|
||||
QT: QemuHelperTuple<S>,
|
||||
{
|
||||
_hooks.instruction(self.kerneladdr, exec_syscall_hook::<QT, S>, false);
|
||||
#[cfg(feature = "trace_abbs")]
|
||||
_hooks.jmps(Some(gen_jmp_is_syscall::<QT, S>), Some(trace_api_call::<QT, S>));
|
||||
}
|
||||
|
||||
// TODO: refactor duplicate code
|
||||
fn pre_exec(&mut self, _emulator: &Emulator, _input: &S::Input) {
|
||||
unsafe {
|
||||
CURRENT_SYSTEMSTATE_VEC.clear();
|
||||
let p = LAST_API_CALL.with(|x| x.get());
|
||||
*p = None;
|
||||
}
|
||||
}
|
||||
|
||||
fn post_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
|
||||
trigger_collection(emulator, self)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn trigger_collection(emulator: &Emulator, h: &QemuSystemStateHelper) {
|
||||
let listbytes : u32 = u32::try_from(std::mem::size_of::<freertos::List_t>()).unwrap();
|
||||
let mut systemstate = RawFreeRTOSSystemState::default();
|
||||
unsafe {
|
||||
// TODO: investigate why can_do_io is not set sometimes, as this is just a workaround
|
||||
let c = emulator.cpu_from_index(0);
|
||||
let can_do_io = (*c.raw_ptr()).can_do_io;
|
||||
(*c.raw_ptr()).can_do_io = 1;
|
||||
systemstate.qemu_tick = emu::icount_get_raw();
|
||||
(*c.raw_ptr()).can_do_io = can_do_io;
|
||||
}
|
||||
let mut buf : [u8; 4] = [0,0,0,0];
|
||||
match h.input_counter {
|
||||
Some(s) => unsafe { emulator.read_phys_mem(s, &mut buf); },
|
||||
None => (),
|
||||
};
|
||||
systemstate.input_counter = u32::from_le_bytes(buf);
|
||||
|
||||
let curr_tcb_addr : freertos::void_ptr = freertos::emu_lookup::lookup(emulator, h.tcb_addr);
|
||||
if curr_tcb_addr == 0 {
|
||||
return;
|
||||
};
|
||||
systemstate.current_tcb = freertos::emu_lookup::lookup(emulator,curr_tcb_addr);
|
||||
|
||||
unsafe {
|
||||
LAST_API_CALL.with(|x|
|
||||
match *x.get() {
|
||||
Some(s) => {
|
||||
systemstate.last_pc = Some(s.0 as u64);
|
||||
},
|
||||
None => (),
|
||||
}
|
||||
);
|
||||
}
|
||||
// println!("{:?}",std::str::from_utf8(¤t_tcb.pcTaskName));
|
||||
|
||||
for i in 0..NUM_PRIOS {
|
||||
let target : u32 = listbytes*u32::try_from(i).unwrap()+h.ready_queues;
|
||||
systemstate.prio_ready_lists[i] = freertos::emu_lookup::lookup(emulator, target);
|
||||
// println!("List at {}: {:?}",target, systemstate.prio_ready_lists[i]);
|
||||
let mut next_index = systemstate.prio_ready_lists[i].pxIndex;
|
||||
for _j in 0..systemstate.prio_ready_lists[i].uxNumberOfItems {
|
||||
// always jump over the xListEnd marker
|
||||
if (target..target+listbytes).contains(&next_index) {
|
||||
let next_item : freertos::MiniListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
|
||||
let new_next_index=next_item.pxNext;
|
||||
systemstate.dumping_ground.insert(next_index,List_MiniItem_struct(next_item));
|
||||
next_index = new_next_index;
|
||||
}
|
||||
let next_item : freertos::ListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
|
||||
// println!("Item at {}: {:?}",next_index,next_item);
|
||||
assert_eq!(next_item.pvContainer,target);
|
||||
let new_next_index=next_item.pxNext;
|
||||
let next_tcb : TCB_t= freertos::emu_lookup::lookup(emulator,next_item.pvOwner);
|
||||
// println!("TCB at {}: {:?}",next_item.pvOwner,next_tcb);
|
||||
systemstate.dumping_ground.insert(next_item.pvOwner,TCB_struct(next_tcb.clone()));
|
||||
systemstate.dumping_ground.insert(next_index,List_Item_struct(next_item));
|
||||
next_index=new_next_index;
|
||||
}
|
||||
// Handle edge case where the end marker was not included yet
|
||||
if (target..target+listbytes).contains(&next_index) {
|
||||
let next_item : freertos::MiniListItem_t = freertos::emu_lookup::lookup(emulator, next_index);
|
||||
systemstate.dumping_ground.insert(next_index,List_MiniItem_struct(next_item));
|
||||
}
|
||||
}
|
||||
|
||||
unsafe { CURRENT_SYSTEMSTATE_VEC.push(systemstate); }
|
||||
}
|
||||
|
||||
pub fn exec_syscall_hook<QT, S>(
|
||||
hooks: &mut QemuHooks<'_, QT, S>,
|
||||
_state: Option<&mut S>,
|
||||
_pc: u32,
|
||||
)
|
||||
where
|
||||
S: UsesInput,
|
||||
QT: QemuHelperTuple<S>,
|
||||
{
|
||||
let emulator = hooks.emulator();
|
||||
let h = hooks.helpers().match_first_type::<QemuSystemStateHelper>().expect("QemuSystemHelper not found in helper tupel");
|
||||
trigger_collection(emulator, h);
|
||||
}
|
||||
|
||||
thread_local!(static LAST_API_CALL : UnsafeCell<Option<(GuestAddr,GuestAddr)>> = UnsafeCell::new(None));
|
||||
|
||||
pub fn gen_jmp_is_syscall<QT, S>(
|
||||
hooks: &mut QemuHooks<'_, QT, S>,
|
||||
_state: Option<&mut S>,
|
||||
src: GuestAddr,
|
||||
dest: GuestAddr,
|
||||
) -> Option<u64>
|
||||
where
|
||||
S: UsesInput,
|
||||
QT: QemuHelperTuple<S>,
|
||||
{
|
||||
if let Some(h) = hooks.helpers().match_first_type::<QemuSystemStateHelper>() {
|
||||
if h.app_range.contains(&src) && !h.app_range.contains(&dest) {
|
||||
// println!("New jmp {:x} {:x}", src, dest);
|
||||
return Some(1);
|
||||
}
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
||||
pub fn trace_api_call<QT, S>(
|
||||
_hooks: &mut QemuHooks<'_, QT, S>,
|
||||
_state: Option<&mut S>,
|
||||
src: GuestAddr, dest: GuestAddr, id: u64
|
||||
)
|
||||
where
|
||||
S: UsesInput,
|
||||
QT: QemuHelperTuple<S>,
|
||||
{
|
||||
unsafe {
|
||||
let p = LAST_API_CALL.with(|x| x.get());
|
||||
*p = Some((src,dest));
|
||||
// print!("*");
|
||||
}
|
||||
}
|
@ -1,167 +0,0 @@
|
||||
//! systemstate referes to the State of a FreeRTOS fuzzing target
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use libafl::bolts::HasRefCnt;
|
||||
use libafl::bolts::AsSlice;
|
||||
use std::hash::Hasher;
|
||||
use std::hash::Hash;
|
||||
use hashbrown::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use freertos::TCB_t;
|
||||
|
||||
pub mod freertos;
|
||||
pub mod helpers;
|
||||
pub mod observers;
|
||||
pub mod feedbacks;
|
||||
pub mod graph;
|
||||
pub mod schedulers;
|
||||
|
||||
// #[cfg(feature = "fuzz_interrupt")]
|
||||
// pub const IRQ_INPUT_BYTES_NUMBER : u32 = 2; // Offset for interrupt bytes
|
||||
// #[cfg(not(feature = "fuzz_interrupt"))]
|
||||
// pub const IRQ_INPUT_BYTES_NUMBER : u32 = 0; // Offset for interrupt bytes
|
||||
// pub const IRQ_INPUT_OFFSET : u32 = 347780; // Tick offset for app code start
|
||||
|
||||
// Constants
|
||||
const NUM_PRIOS: usize = 5;
|
||||
|
||||
//============================= Struct definitions
|
||||
/// Raw info Dump from Qemu
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct RawFreeRTOSSystemState {
|
||||
qemu_tick: u64,
|
||||
current_tcb: TCB_t,
|
||||
prio_ready_lists: [freertos::List_t; NUM_PRIOS],
|
||||
dumping_ground: HashMap<u32,freertos::rtos_struct>,
|
||||
input_counter: u32,
|
||||
last_pc: Option<u64>,
|
||||
}
|
||||
/// List of system state dumps from QemuHelpers
|
||||
static mut CURRENT_SYSTEMSTATE_VEC: Vec<RawFreeRTOSSystemState> = vec![];
|
||||
|
||||
/// A reduced version of freertos::TCB_t
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)]
|
||||
pub struct RefinedTCB {
|
||||
pub task_name: String,
|
||||
pub priority: u32,
|
||||
pub base_priority: u32,
|
||||
mutexes_held: u32,
|
||||
notify_value: u32,
|
||||
notify_state: u8,
|
||||
}
|
||||
|
||||
impl Hash for RefinedTCB {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.task_name.hash(state);
|
||||
self.priority.hash(state);
|
||||
self.mutexes_held.hash(state);
|
||||
#[cfg(not(feature = "no_hash_state"))]
|
||||
self.notify_state.hash(state);
|
||||
// self.notify_value.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl RefinedTCB {
|
||||
pub fn from_tcb(input: &TCB_t) -> Self {
|
||||
unsafe {
|
||||
let tmp = std::mem::transmute::<[i8; 10],[u8; 10]>(input.pcTaskName);
|
||||
let name : String = std::str::from_utf8(&tmp).expect("TCB name was not utf8").chars().filter(|x| *x != '\0').collect::<String>();
|
||||
Self {
|
||||
task_name: name,
|
||||
priority: input.uxPriority,
|
||||
base_priority: input.uxBasePriority,
|
||||
mutexes_held: input.uxMutexesHeld,
|
||||
notify_value: input.ulNotifiedValue[0],
|
||||
notify_state: input.ucNotifyState[0],
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn from_tcb_owned(input: TCB_t) -> Self {
|
||||
unsafe {
|
||||
let tmp = std::mem::transmute::<[i8; 10],[u8; 10]>(input.pcTaskName);
|
||||
let name : String = std::str::from_utf8(&tmp).expect("TCB name was not utf8").chars().filter(|x| *x != '\0').collect::<String>();
|
||||
Self {
|
||||
task_name: name,
|
||||
priority: input.uxPriority,
|
||||
base_priority: input.uxBasePriority,
|
||||
mutexes_held: input.uxMutexesHeld,
|
||||
notify_value: input.ulNotifiedValue[0],
|
||||
notify_state: input.ucNotifyState[0],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Refined information about the states an execution transitioned between
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
|
||||
pub struct RefinedFreeRTOSSystemState {
|
||||
pub start_tick: u64,
|
||||
pub end_tick: u64,
|
||||
last_pc: Option<u64>,
|
||||
input_counter: u32,
|
||||
pub current_task: RefinedTCB,
|
||||
ready_list_after: Vec<RefinedTCB>,
|
||||
}
|
||||
impl PartialEq for RefinedFreeRTOSSystemState {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.current_task == other.current_task && self.ready_list_after == other.ready_list_after &&
|
||||
self.last_pc == other.last_pc
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for RefinedFreeRTOSSystemState {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.current_task.hash(state);
|
||||
self.ready_list_after.hash(state);
|
||||
// self.last_pc.hash(state);
|
||||
}
|
||||
}
|
||||
impl RefinedFreeRTOSSystemState {
|
||||
fn get_time(&self) -> u64 {
|
||||
self.end_tick-self.start_tick
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
|
||||
pub struct FreeRTOSSystemStateMetadata {
|
||||
pub inner: Vec<RefinedFreeRTOSSystemState>,
|
||||
trace_length: usize,
|
||||
indices: Vec<usize>, // Hashed enumeration of States
|
||||
tcref: isize,
|
||||
}
|
||||
impl FreeRTOSSystemStateMetadata {
|
||||
pub fn new(inner: Vec<RefinedFreeRTOSSystemState>) -> Self{
|
||||
let tmp = inner.iter().enumerate().map(|x| compute_hash(x) as usize).collect();
|
||||
Self {trace_length: inner.len(), inner: inner, indices: tmp, tcref: 0}
|
||||
}
|
||||
}
|
||||
pub fn compute_hash<T>(obj: T) -> u64
|
||||
where
|
||||
T: Hash
|
||||
{
|
||||
let mut s = DefaultHasher::new();
|
||||
obj.hash(&mut s);
|
||||
s.finish()
|
||||
}
|
||||
|
||||
impl AsSlice for FreeRTOSSystemStateMetadata {
|
||||
/// Convert the slice of system-states to a slice of hashes over enumerated states
|
||||
fn as_slice(&self) -> &[usize] {
|
||||
self.indices.as_slice()
|
||||
}
|
||||
|
||||
type Entry = usize;
|
||||
}
|
||||
|
||||
impl HasRefCnt for FreeRTOSSystemStateMetadata {
|
||||
fn refcnt(&self) -> isize {
|
||||
self.tcref
|
||||
}
|
||||
|
||||
fn refcnt_mut(&mut self) -> &mut isize {
|
||||
&mut self.tcref
|
||||
}
|
||||
}
|
||||
|
||||
libafl::impl_serdeany!(FreeRTOSSystemStateMetadata);
|
@ -1,133 +0,0 @@
|
||||
// use crate::systemstate::IRQ_INPUT_BYTES_NUMBER;
|
||||
use libafl::prelude::{ExitKind, AsSlice};
|
||||
use libafl::{inputs::HasTargetBytes, prelude::UsesInput};
|
||||
use libafl::bolts::HasLen;
|
||||
use libafl::bolts::tuples::Named;
|
||||
use libafl::Error;
|
||||
use libafl::observers::Observer;
|
||||
use hashbrown::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{
|
||||
CURRENT_SYSTEMSTATE_VEC,
|
||||
RawFreeRTOSSystemState,
|
||||
RefinedTCB,
|
||||
RefinedFreeRTOSSystemState,
|
||||
freertos::{List_t, TCB_t, rtos_struct, rtos_struct::*},
|
||||
};
|
||||
|
||||
//============================= Observer
|
||||
|
||||
/// The Qemusystemstate Observer retrieves the systemstate
|
||||
/// that will get updated by the target.
|
||||
#[derive(Serialize, Deserialize, Debug, Default)]
|
||||
#[allow(clippy::unsafe_derive_deserialize)]
|
||||
pub struct QemuSystemStateObserver
|
||||
{
|
||||
pub last_run: Vec<RefinedFreeRTOSSystemState>,
|
||||
pub last_input: Vec<u8>,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl<S> Observer<S> for QemuSystemStateObserver
|
||||
where
|
||||
S: UsesInput,
|
||||
S::Input : HasTargetBytes,
|
||||
{
|
||||
#[inline]
|
||||
fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> {
|
||||
unsafe {CURRENT_SYSTEMSTATE_VEC.clear(); }
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn post_exec(&mut self, _state: &mut S, _input: &S::Input, _exit_kind: &ExitKind) -> Result<(), Error> {
|
||||
unsafe {self.last_run = refine_system_states(&mut CURRENT_SYSTEMSTATE_VEC);}
|
||||
self.last_input=_input.target_bytes().as_slice().to_owned();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for QemuSystemStateObserver
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
self.name.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasLen for QemuSystemStateObserver
|
||||
{
|
||||
#[inline]
|
||||
fn len(&self) -> usize {
|
||||
self.last_run.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl QemuSystemStateObserver {
|
||||
pub fn new() -> Self {
|
||||
Self{last_run: vec![], last_input: vec![], name: "systemstate".to_string()}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//============================= Parsing helpers
|
||||
|
||||
/// Parse a List_t containing TCB_t into Vec<TCB_t> from cache. Consumes the elements from cache
|
||||
fn tcb_list_to_vec_cached(list: List_t, dump: &mut HashMap<u32,rtos_struct>) -> Vec<TCB_t>
|
||||
{
|
||||
let mut ret : Vec<TCB_t> = Vec::new();
|
||||
if list.uxNumberOfItems == 0 {return ret;}
|
||||
let last_list_item = match dump.remove(&list.pxIndex).expect("List_t entry was not in Hashmap") {
|
||||
List_Item_struct(li) => li,
|
||||
List_MiniItem_struct(mli) => match dump.remove(&mli.pxNext).expect("MiniListItem pointer invaild") {
|
||||
List_Item_struct(li) => li,
|
||||
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
|
||||
},
|
||||
_ => panic!("List_t entry was not a ListItem"),
|
||||
};
|
||||
let mut next_index = last_list_item.pxNext;
|
||||
let last_tcb = match dump.remove(&last_list_item.pvOwner).expect("ListItem Owner not in Hashmap") {
|
||||
TCB_struct(t) => t,
|
||||
_ => panic!("List content does not equal type"),
|
||||
};
|
||||
for _ in 0..list.uxNumberOfItems-1 {
|
||||
let next_list_item = match dump.remove(&next_index).expect("List_t entry was not in Hashmap") {
|
||||
List_Item_struct(li) => li,
|
||||
List_MiniItem_struct(mli) => match dump.remove(&mli.pxNext).expect("MiniListItem pointer invaild") {
|
||||
List_Item_struct(li) => li,
|
||||
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
|
||||
},
|
||||
_ => panic!("List_t entry was not a ListItem"),
|
||||
};
|
||||
match dump.remove(&next_list_item.pvOwner).expect("ListItem Owner not in Hashmap") {
|
||||
TCB_struct(t) => {ret.push(t)},
|
||||
_ => panic!("List content does not equal type"),
|
||||
}
|
||||
next_index=next_list_item.pxNext;
|
||||
}
|
||||
ret.push(last_tcb);
|
||||
ret
|
||||
}
|
||||
/// Drains a List of raw SystemStates to produce a refined trace
|
||||
fn refine_system_states(input: &mut Vec<RawFreeRTOSSystemState>) -> Vec<RefinedFreeRTOSSystemState> {
|
||||
let mut ret = Vec::<RefinedFreeRTOSSystemState>::new();
|
||||
let mut start_tick : u64 = 0;
|
||||
for mut i in input.drain(..) {
|
||||
let mut collector = Vec::<RefinedTCB>::new();
|
||||
for j in i.prio_ready_lists.into_iter().rev() {
|
||||
let mut tmp = tcb_list_to_vec_cached(j,&mut i.dumping_ground).iter().map(|x| RefinedTCB::from_tcb(x)).collect();
|
||||
collector.append(&mut tmp);
|
||||
}
|
||||
ret.push(RefinedFreeRTOSSystemState {
|
||||
current_task: RefinedTCB::from_tcb_owned(i.current_tcb),
|
||||
start_tick: start_tick,
|
||||
end_tick: i.qemu_tick,
|
||||
ready_list_after: collector,
|
||||
input_counter: i.input_counter,//+IRQ_INPUT_BYTES_NUMBER,
|
||||
last_pc: i.last_pc,
|
||||
});
|
||||
start_tick=i.qemu_tick;
|
||||
}
|
||||
return ret;
|
||||
}
|
@ -1,267 +0,0 @@
|
||||
//! The Minimizer schedulers are a family of corpus schedulers that feed the fuzzer
|
||||
//! with testcases only from a subset of the total corpus.
|
||||
|
||||
use core::{marker::PhantomData};
|
||||
use std::{cmp::{max, min}, mem::swap, borrow::BorrowMut};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use libafl::{
|
||||
bolts::{rands::Rand, serdeany::SerdeAny, AsSlice, HasRefCnt},
|
||||
corpus::{Corpus, Testcase},
|
||||
inputs::UsesInput,
|
||||
schedulers::{Scheduler, TestcaseScore, minimizer::DEFAULT_SKIP_NON_FAVORED_PROB },
|
||||
state::{HasCorpus, HasMetadata, HasRand, UsesState, State},
|
||||
Error, SerdeAny, prelude::HasLen,
|
||||
|
||||
};
|
||||
|
||||
use crate::worst::MaxTimeFavFactor;
|
||||
|
||||
use super::FreeRTOSSystemStateMetadata;
|
||||
|
||||
/// A state metadata holding a map of favoreds testcases for each map entry
|
||||
#[derive(Debug, Serialize, Deserialize, SerdeAny, Default)]
|
||||
pub struct LongestTracesMetadata {
|
||||
/// map index -> corpus index
|
||||
pub max_trace_length: usize,
|
||||
}
|
||||
|
||||
impl LongestTracesMetadata {
|
||||
fn new(l : usize) -> Self {
|
||||
Self {max_trace_length: l}
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`MinimizerScheduler`] employs a genetic algorithm to compute a subset of the
|
||||
/// corpus that exercise all the requested features (e.g. all the coverage seen so far)
|
||||
/// prioritizing [`Testcase`]`s` using [`TestcaseScore`]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LongestTraceScheduler<CS> {
|
||||
base: CS,
|
||||
skip_non_favored_prob: u64,
|
||||
}
|
||||
|
||||
impl<CS> UsesState for LongestTraceScheduler<CS>
|
||||
where
|
||||
CS: UsesState,
|
||||
{
|
||||
type State = CS::State;
|
||||
}
|
||||
|
||||
impl<CS> Scheduler for LongestTraceScheduler<CS>
|
||||
where
|
||||
CS: Scheduler,
|
||||
CS::State: HasCorpus + HasMetadata + HasRand,
|
||||
{
|
||||
/// Add an entry to the corpus and return its index
|
||||
fn on_add(&self, state: &mut CS::State, idx: usize) -> Result<(), Error> {
|
||||
let l = state.corpus()
|
||||
.get(idx)?
|
||||
.borrow()
|
||||
.metadata()
|
||||
.get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
|
||||
self.get_update_trace_length(state,l);
|
||||
self.base.on_add(state, idx)
|
||||
}
|
||||
|
||||
/// Replaces the testcase at the given idx
|
||||
fn on_replace(
|
||||
&self,
|
||||
state: &mut CS::State,
|
||||
idx: usize,
|
||||
testcase: &Testcase<<CS::State as UsesInput>::Input>,
|
||||
) -> Result<(), Error> {
|
||||
let l = state.corpus()
|
||||
.get(idx)?
|
||||
.borrow()
|
||||
.metadata()
|
||||
.get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
|
||||
self.get_update_trace_length(state, l);
|
||||
self.base.on_replace(state, idx, testcase)
|
||||
}
|
||||
|
||||
/// Removes an entry from the corpus, returning M if M was present.
|
||||
fn on_remove(
|
||||
&self,
|
||||
state: &mut CS::State,
|
||||
idx: usize,
|
||||
testcase: &Option<Testcase<<CS::State as UsesInput>::Input>>,
|
||||
) -> Result<(), Error> {
|
||||
self.base.on_remove(state, idx, testcase)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the next entry
|
||||
fn next(&self, state: &mut CS::State) -> Result<usize, Error> {
|
||||
let mut idx = self.base.next(state)?;
|
||||
while {
|
||||
let l = state.corpus()
|
||||
.get(idx)?
|
||||
.borrow()
|
||||
.metadata()
|
||||
.get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
|
||||
let m = self.get_update_trace_length(state,l);
|
||||
state.rand_mut().below(m) > l as u64
|
||||
} && state.rand_mut().below(100) < self.skip_non_favored_prob
|
||||
{
|
||||
idx = self.base.next(state)?;
|
||||
}
|
||||
Ok(idx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<CS> LongestTraceScheduler<CS>
|
||||
where
|
||||
CS: Scheduler,
|
||||
CS::State: HasCorpus + HasMetadata + HasRand,
|
||||
{
|
||||
pub fn get_update_trace_length(&self, state: &mut CS::State, par: usize) -> u64 {
|
||||
// Create a new top rated meta if not existing
|
||||
if let Some(td) = state.metadata_mut().get_mut::<LongestTracesMetadata>() {
|
||||
let m = max(td.max_trace_length, par);
|
||||
td.max_trace_length = m;
|
||||
m as u64
|
||||
} else {
|
||||
state.add_metadata(LongestTracesMetadata::new(par));
|
||||
par as u64
|
||||
}
|
||||
}
|
||||
pub fn new(base: CS) -> Self {
|
||||
Self {
|
||||
base,
|
||||
skip_non_favored_prob: DEFAULT_SKIP_NON_FAVORED_PROB,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//==========================================================================================
|
||||
|
||||
/// A state metadata holding a map of favoreds testcases for each map entry
|
||||
#[derive(Debug, Serialize, Deserialize, SerdeAny, Default)]
|
||||
pub struct GeneticMetadata {
|
||||
pub current_gen: Vec<(usize, f64)>,
|
||||
pub current_cursor: usize,
|
||||
pub next_gen: Vec<(usize, f64)>,
|
||||
pub gen: usize
|
||||
}
|
||||
|
||||
impl GeneticMetadata {
|
||||
fn new(current_gen: Vec<(usize, f64)>, next_gen: Vec<(usize, f64)>) -> Self {
|
||||
Self {current_gen, current_cursor: 0, next_gen, gen: 0}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GenerationScheduler<S> {
|
||||
phantom: PhantomData<S>,
|
||||
gen_size: usize,
|
||||
}
|
||||
|
||||
impl<S> UsesState for GenerationScheduler<S>
|
||||
where
|
||||
S: UsesInput,
|
||||
{
|
||||
type State = S;
|
||||
}
|
||||
|
||||
impl<S> Scheduler for GenerationScheduler<S>
|
||||
where
|
||||
S: HasCorpus + HasMetadata,
|
||||
S::Input: HasLen,
|
||||
{
|
||||
/// get first element in current gen,
|
||||
/// if current_gen is empty, swap lists, sort by FavFactor, take top k and return first
|
||||
fn next(&self, state: &mut Self::State) -> Result<usize, Error> {
|
||||
let mut to_remove : Vec<(usize, f64)> = vec![];
|
||||
let mut to_return : usize = 0;
|
||||
let c = state.corpus().count();
|
||||
let gm = state.metadata_mut().get_mut::<GeneticMetadata>().expect("Corpus Scheduler empty");
|
||||
// println!("index: {} curr: {:?} next: {:?} gen: {} corp: {}", gm.current_cursor, gm.current_gen.len(), gm.next_gen.len(), gm.gen,
|
||||
// c);
|
||||
match gm.current_gen.get(gm.current_cursor) {
|
||||
Some(c) => {
|
||||
gm.current_cursor+=1;
|
||||
// println!("normal next: {}", (*c).0);
|
||||
return Ok((*c).0)
|
||||
},
|
||||
None => {
|
||||
swap(&mut to_remove, &mut gm.current_gen);
|
||||
swap(&mut gm.next_gen, &mut gm.current_gen);
|
||||
gm.current_gen.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
|
||||
// gm.current_gen.reverse();
|
||||
if gm.current_gen.len() == 0 {panic!("Corpus is empty");}
|
||||
let d : Vec<(usize, f64)> = gm.current_gen.drain(min(gm.current_gen.len(), self.gen_size)..).collect();
|
||||
to_remove.extend(d);
|
||||
// move all indices to the left, since all other indices will be deleted
|
||||
gm.current_gen.sort_by(|a,b| a.0.cmp(&(*b).0)); // in order of the corpus index
|
||||
for i in 0..gm.current_gen.len() {
|
||||
gm.current_gen[i] = (i, gm.current_gen[i].1);
|
||||
}
|
||||
to_return = gm.current_gen.get(0).unwrap().0;
|
||||
gm.current_cursor=1;
|
||||
gm.gen+=1;
|
||||
}
|
||||
};
|
||||
// removing these elements will move all indices left by to_remove.len()
|
||||
to_remove.sort_by(|x,y| x.0.cmp(&(*y).0));
|
||||
to_remove.reverse();
|
||||
for i in to_remove {
|
||||
state.corpus_mut().remove(i.0).unwrap();
|
||||
}
|
||||
// println!("switch next: {to_return}");
|
||||
return Ok(to_return);
|
||||
}
|
||||
|
||||
/// Add the new input to the next generation
|
||||
fn on_add(
|
||||
&self,
|
||||
state: &mut Self::State,
|
||||
idx: usize
|
||||
) -> Result<(), Error> {
|
||||
// println!("On Add {idx}");
|
||||
let mut tc = state.corpus_mut().get(idx).unwrap().borrow_mut().clone();
|
||||
let ff = MaxTimeFavFactor::compute(&mut tc, state).unwrap();
|
||||
if let Some(gm) = state.metadata_mut().get_mut::<GeneticMetadata>() {
|
||||
gm.next_gen.push((idx,ff));
|
||||
} else {
|
||||
state.add_metadata(GeneticMetadata::new(vec![], vec![(idx,ff)]));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn on_replace(
|
||||
&self,
|
||||
_state: &mut Self::State,
|
||||
_idx: usize,
|
||||
_prev: &Testcase<<Self::State as UsesInput>::Input>
|
||||
) -> Result<(), Error> {
|
||||
// println!("On Replace {_idx}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn on_remove(
|
||||
&self,
|
||||
state: &mut Self::State,
|
||||
idx: usize,
|
||||
_testcase: &Option<Testcase<<Self::State as UsesInput>::Input>>
|
||||
) -> Result<(), Error> {
|
||||
// println!("On Remove {idx}");
|
||||
if let Some(gm) = state.metadata_mut().get_mut::<GeneticMetadata>() {
|
||||
gm.next_gen = gm.next_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
|
||||
gm.current_gen = gm.current_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
|
||||
} else {
|
||||
state.add_metadata(GeneticMetadata::new(vec![], vec![]));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> GenerationScheduler<S>
|
||||
{
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
gen_size: 100,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,381 +0,0 @@
|
||||
use core::fmt::Debug;
|
||||
use core::cmp::Ordering::{Greater,Less,Equal};
|
||||
use libafl::inputs::BytesInput;
|
||||
use libafl::inputs::HasTargetBytes;
|
||||
use libafl::feedbacks::MapIndexesMetadata;
|
||||
use libafl::corpus::Testcase;
|
||||
use libafl::prelude::{UsesInput, AsSlice};
|
||||
use core::marker::PhantomData;
|
||||
use libafl::schedulers::{MinimizerScheduler, TestcaseScore};
|
||||
use std::path::PathBuf;
|
||||
use std::fs;
|
||||
use hashbrown::{HashMap};
|
||||
use libafl::observers::ObserversTuple;
|
||||
use libafl::executors::ExitKind;
|
||||
use libafl::events::EventFirer;
|
||||
use libafl::state::{HasClientPerfMonitor, HasCorpus, UsesState};
|
||||
use libafl::inputs::Input;
|
||||
use libafl::feedbacks::Feedback;
|
||||
use libafl::state::HasMetadata;
|
||||
use libafl_qemu::edges::QemuEdgesMapMetadata;
|
||||
use libafl::observers::MapObserver;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp;
|
||||
|
||||
use libafl::{
|
||||
bolts::{
|
||||
tuples::Named,
|
||||
HasLen,
|
||||
},
|
||||
observers::Observer,
|
||||
Error,
|
||||
};
|
||||
|
||||
use crate::clock::QemuClockObserver;
|
||||
use crate::systemstate::FreeRTOSSystemStateMetadata;
|
||||
//=========================== Scheduler
|
||||
|
||||
pub type TimeMaximizerCorpusScheduler<CS> =
|
||||
MinimizerScheduler<CS, MaxTimeFavFactor<<CS as UsesState>::State>, MapIndexesMetadata>;
|
||||
|
||||
/// Multiply the testcase size with the execution time.
|
||||
/// This favors small and quick testcases.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MaxTimeFavFactor<S>
|
||||
where
|
||||
S: HasCorpus + HasMetadata,
|
||||
S::Input: HasLen,
|
||||
{
|
||||
phantom: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<S> TestcaseScore<S> for MaxTimeFavFactor<S>
|
||||
where
|
||||
S: HasCorpus + HasMetadata,
|
||||
S::Input: HasLen,
|
||||
{
|
||||
fn compute(entry: &mut Testcase<<S as UsesInput>::Input>, state: &S) -> Result<f64, Error> {
|
||||
// TODO maybe enforce entry.exec_time().is_some()
|
||||
let et = entry.exec_time().expect("testcase.exec_time is needed for scheduler");
|
||||
let tns : i64 = et.as_nanos().try_into().expect("failed to convert time");
|
||||
Ok(-tns as f64)
|
||||
}
|
||||
}
|
||||
|
||||
pub type LenTimeMaximizerCorpusScheduler<CS> =
|
||||
MinimizerScheduler<CS, MaxExecsLenFavFactor<<CS as UsesState>::State>, MapIndexesMetadata>;
|
||||
|
||||
pub type TimeStateMaximizerCorpusScheduler<CS> =
|
||||
MinimizerScheduler<CS, MaxTimeFavFactor<<CS as UsesState>::State>, FreeRTOSSystemStateMetadata>;
|
||||
|
||||
/// Multiply the testcase size with the execution time.
|
||||
/// This favors small and quick testcases.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MaxExecsLenFavFactor<S>
|
||||
where
|
||||
S: HasCorpus + HasMetadata,
|
||||
S::Input: HasLen,
|
||||
{
|
||||
phantom: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<S> TestcaseScore<S> for MaxExecsLenFavFactor<S>
|
||||
where
|
||||
S: HasCorpus + HasMetadata,
|
||||
S::Input: HasLen,
|
||||
{
|
||||
fn compute(entry: &mut Testcase<S::Input>, state: &S) -> Result<f64, Error> {
|
||||
let execs_per_hour = (3600.0/entry.exec_time().expect("testcase.exec_time is needed for scheduler").as_secs_f64());
|
||||
let execs_times_length_per_hour = execs_per_hour*entry.cached_len()? as f64;
|
||||
Ok(execs_times_length_per_hour)
|
||||
}
|
||||
}
|
||||
|
||||
//===================================================================
|
||||
|
||||
/// A Feedback reporting if the Input consists of strictly decreasing bytes.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct SortedFeedback {
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for SortedFeedback
|
||||
where
|
||||
S: UsesInput + HasClientPerfMonitor,
|
||||
S::Input: HasTargetBytes,
|
||||
{
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
_state: &mut S,
|
||||
_manager: &mut EM,
|
||||
_input: &S::Input,
|
||||
observers: &OT,
|
||||
_exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>,
|
||||
{
|
||||
let t = _input.target_bytes();
|
||||
let tmp = t.as_slice();
|
||||
if tmp.len()<32 {return Ok(false);}
|
||||
let tmp = Vec::<u8>::from(&tmp[0..32]);
|
||||
// tmp.reverse();
|
||||
if tmp.is_sorted_by(|a,b| match a.partial_cmp(b).unwrap_or(Less) {
|
||||
Less => Some(Greater),
|
||||
Equal => Some(Greater),
|
||||
Greater => Some(Less),
|
||||
}) {return Ok(true)};
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for SortedFeedback {
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
"Sorted"
|
||||
}
|
||||
}
|
||||
|
||||
impl SortedFeedback {
|
||||
/// Creates a new [`HitFeedback`]
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SortedFeedback {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
//===================================================================
|
||||
/// A Feedback which expects a certain minimum execution time
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct ExecTimeReachedFeedback
|
||||
{
|
||||
target_time: u64,
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for ExecTimeReachedFeedback
|
||||
where
|
||||
S: UsesInput + HasClientPerfMonitor,
|
||||
{
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
_state: &mut S,
|
||||
_manager: &mut EM,
|
||||
_input: &S::Input,
|
||||
observers: &OT,
|
||||
_exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>,
|
||||
{
|
||||
let observer = observers.match_name::<QemuClockObserver>("clock")
|
||||
.expect("QemuClockObserver not found");
|
||||
Ok(observer.last_runtime() >= self.target_time)
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for ExecTimeReachedFeedback
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
"ExecTimeReachedFeedback"
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecTimeReachedFeedback
|
||||
where
|
||||
{
|
||||
/// Creates a new [`ExecTimeReachedFeedback`]
|
||||
#[must_use]
|
||||
pub fn new(target_time : u64) -> Self {
|
||||
Self {target_time: target_time}
|
||||
}
|
||||
}
|
||||
|
||||
pub static mut EXEC_TIME_COLLECTION : Vec<u32> = Vec::new();
|
||||
|
||||
/// A Noop Feedback which records a list of all execution times
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct ExecTimeCollectorFeedback
|
||||
{
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for ExecTimeCollectorFeedback
|
||||
where
|
||||
S: UsesInput + HasClientPerfMonitor,
|
||||
{
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
_state: &mut S,
|
||||
_manager: &mut EM,
|
||||
_input: &S::Input,
|
||||
observers: &OT,
|
||||
_exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>,
|
||||
{
|
||||
let observer = observers.match_name::<QemuClockObserver>("clock")
|
||||
.expect("QemuClockObserver not found");
|
||||
unsafe { EXEC_TIME_COLLECTION.push(observer.last_runtime().try_into().unwrap()); }
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for ExecTimeCollectorFeedback
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
"ExecTimeCollectorFeedback"
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecTimeCollectorFeedback
|
||||
where
|
||||
{
|
||||
/// Creates a new [`ExecTimeCollectorFeedback`]
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
/// Shared Metadata for a SysStateFeedback
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||
pub struct ExecTimeCollectorFeedbackState
|
||||
{
|
||||
collection: Vec<u32>,
|
||||
}
|
||||
impl Named for ExecTimeCollectorFeedbackState
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
"ExecTimeCollectorFeedbackState"
|
||||
}
|
||||
}
|
||||
|
||||
//===================================================================
|
||||
/// A Feedback which expects a certain minimum execution time
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct ExecTimeIncFeedback
|
||||
{
|
||||
longest_time: u64,
|
||||
last_is_longest: bool
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for ExecTimeIncFeedback
|
||||
where
|
||||
S: UsesInput + HasClientPerfMonitor,
|
||||
{
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
_state: &mut S,
|
||||
_manager: &mut EM,
|
||||
_input: &S::Input,
|
||||
observers: &OT,
|
||||
_exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>,
|
||||
{
|
||||
let observer = observers.match_name::<QemuClockObserver>("clocktime")
|
||||
.expect("QemuClockObserver not found");
|
||||
if observer.last_runtime() > self.longest_time {
|
||||
self.longest_time = observer.last_runtime();
|
||||
self.last_is_longest = true;
|
||||
Ok(true)
|
||||
} else {
|
||||
self.last_is_longest = false;
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
fn append_metadata(
|
||||
&mut self,
|
||||
_state: &mut S,
|
||||
testcase: &mut Testcase<<S as UsesInput>::Input>,
|
||||
) -> Result<(), Error> {
|
||||
#[cfg(feature = "feed_afl")]
|
||||
if self.last_is_longest {
|
||||
let mim : Option<&mut MapIndexesMetadata>= testcase.metadata_mut().get_mut();
|
||||
// pretend that the longest input alone excercises some non-existing edge, to keep it relevant
|
||||
mim.unwrap().list.push(usize::MAX);
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for ExecTimeIncFeedback
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
"ExecTimeReachedFeedback"
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecTimeIncFeedback
|
||||
where
|
||||
{
|
||||
/// Creates a new [`ExecTimeReachedFeedback`]
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {longest_time: 0, last_is_longest: false}
|
||||
}
|
||||
}
|
||||
|
||||
/// A Noop Feedback which records a list of all execution times
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct AlwaysTrueFeedback
|
||||
{
|
||||
}
|
||||
|
||||
impl<S> Feedback<S> for AlwaysTrueFeedback
|
||||
where
|
||||
S: UsesInput + HasClientPerfMonitor,
|
||||
{
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn is_interesting<EM, OT>(
|
||||
&mut self,
|
||||
_state: &mut S,
|
||||
_manager: &mut EM,
|
||||
_input: &S::Input,
|
||||
_observers: &OT,
|
||||
_exit_kind: &ExitKind,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
EM: EventFirer<State = S>,
|
||||
OT: ObserversTuple<S>,
|
||||
{
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
impl Named for AlwaysTrueFeedback
|
||||
{
|
||||
#[inline]
|
||||
fn name(&self) -> &str {
|
||||
"AlwaysTrueFeedback"
|
||||
}
|
||||
}
|
||||
|
||||
impl AlwaysTrueFeedback
|
||||
where
|
||||
{
|
||||
/// Creates a new [`ExecTimeCollectorFeedback`]
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
}
|
||||
}
|
||||
}
|
1
fuzzers/baby_fuzzer/.gitignore
vendored
1
fuzzers/baby_fuzzer/.gitignore
vendored
@ -1 +1,2 @@
|
||||
libpng-*
|
||||
corpus
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "baby_fuzzer"
|
||||
version = "0.7.1"
|
||||
version = "0.10.0"
|
||||
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
@ -21,3 +21,4 @@ debug = true
|
||||
|
||||
[dependencies]
|
||||
libafl = { path = "../../libafl/" }
|
||||
libafl_bolts = { path = "../../libafl_bolts/" }
|
||||
|
@ -6,3 +6,5 @@ It runs on a single core until a crash occurs and then exits.
|
||||
|
||||
The tested program is a simple Rust function without any instrumentation.
|
||||
For real fuzzing, you will want to add some sort to add coverage or other feedback.
|
||||
|
||||
You can run this example using `cargo run`, and you can enable the TUI feature by running `cargo run --features tui`.
|
@ -1,70 +0,0 @@
|
||||
from pylibafl import libafl
|
||||
|
||||
# LIBRARY WRAPPER
|
||||
|
||||
def map_observer_wrapper(map_observer):
|
||||
if type(map_observer).__name__ == "OwnedMapObserverI32":
|
||||
return libafl.MapObserverI32.new_from_owned(map_observer)
|
||||
|
||||
def executor_wrapper(executor):
|
||||
if type(executor).__name__ == "OwnedInProcessExecutorI32":
|
||||
return libafl.ExecutorI32.new_from_inprocess(executor)
|
||||
|
||||
def monitor_wrapper(monitor):
|
||||
if type(monitor).__name__ == "SimpleMonitor":
|
||||
return libafl.Monitor.new_from_simple(monitor)
|
||||
|
||||
def event_manager_wrapper(event_manager):
|
||||
if type(event_manager).__name__ == "SimpleEventManager":
|
||||
return libafl.EventManagerI32.new_from_simple(event_manager)
|
||||
|
||||
def corpus_wrapper(corpus):
|
||||
if type(corpus).__name__ == "InMemoryCorpus":
|
||||
return libafl.Corpus.new_from_in_memory(corpus)
|
||||
if type(corpus).__name__ == "OnDiskCorpus":
|
||||
return libafl.Corpus.new_from_on_disk(corpus)
|
||||
|
||||
def rand_wrapper(rand):
|
||||
if type(rand).__name__ == "StdRand":
|
||||
return libafl.Rand.new_from_std(rand)
|
||||
|
||||
def stage_wrapper(stage):
|
||||
if type(stage).__name__ == "StdScheduledHavocMutationsStageI32":
|
||||
return libafl.StageI32.new_from_std_scheduled(stage)
|
||||
|
||||
# CODE WRITTEN BY USER
|
||||
|
||||
def harness(inp):
|
||||
if len(inp.hex()) >= 2 and inp.hex()[:2] == '61':
|
||||
raise Exception("NOOOOOO =)")
|
||||
|
||||
map_observer = libafl.OwnedMapObserverI32("signals", [0] * 16)
|
||||
|
||||
feedback_state = libafl.MapFeedbackStateI32.with_observer(map_observer_wrapper(map_observer))
|
||||
|
||||
feedback = libafl.MaxMapFeedbackI32(feedback_state, map_observer_wrapper(map_observer))
|
||||
|
||||
state = libafl.StdStateI32(
|
||||
rand_wrapper(libafl.StdRand.with_current_nanos()),
|
||||
corpus_wrapper(libafl.InMemoryCorpus()),
|
||||
corpus_wrapper(libafl.OnDiskCorpus("./crashes")),
|
||||
feedback_state
|
||||
)
|
||||
|
||||
monitor = libafl.SimpleMonitor()
|
||||
|
||||
mgr = libafl.SimpleEventManager(monitor_wrapper(monitor))
|
||||
|
||||
fuzzer = libafl.StdFuzzerI32(feedback)
|
||||
|
||||
executor = libafl.OwnedInProcessExecutorI32(harness, map_observer_wrapper(map_observer), fuzzer, state, event_manager_wrapper(mgr))
|
||||
|
||||
generator = libafl.RandPrintablesGeneratorI32(32)
|
||||
|
||||
state.generate_initial_inputs(fuzzer, executor_wrapper(executor), generator, event_manager_wrapper(mgr), 8)
|
||||
|
||||
stage = libafl.StdScheduledHavocMutationsStageI32.new_from_scheduled_havoc_mutations()
|
||||
|
||||
stage_tuple_list = libafl.StagesOwnedListI32(stage_wrapper(stage))
|
||||
|
||||
fuzzer.fuzz_loop(executor_wrapper(executor), state, event_manager_wrapper(mgr), stage_tuple_list)
|
@ -1,13 +1,12 @@
|
||||
use std::path::PathBuf;
|
||||
#[cfg(windows)]
|
||||
use std::ptr::write_volatile;
|
||||
use std::{path::PathBuf, ptr::write};
|
||||
|
||||
#[cfg(feature = "tui")]
|
||||
use libafl::monitors::tui::TuiMonitor;
|
||||
use libafl::monitors::tui::{ui::TuiUI, TuiMonitor};
|
||||
#[cfg(not(feature = "tui"))]
|
||||
use libafl::monitors::SimpleMonitor;
|
||||
use libafl::{
|
||||
bolts::{current_nanos, rands::StdRand, tuples::tuple_list, AsSlice},
|
||||
corpus::{InMemoryCorpus, OnDiskCorpus},
|
||||
events::SimpleEventManager,
|
||||
executors::{inprocess::InProcessExecutor, ExitKind},
|
||||
@ -21,16 +20,18 @@ use libafl::{
|
||||
stages::mutational::StdMutationalStage,
|
||||
state::StdState,
|
||||
};
|
||||
use libafl_bolts::{current_nanos, rands::StdRand, tuples::tuple_list, AsSlice};
|
||||
|
||||
/// Coverage map with explicit assignments due to the lack of instrumentation
|
||||
static mut SIGNALS: [u8; 16] = [0; 16];
|
||||
static mut SIGNALS_PTR: *mut u8 = unsafe { SIGNALS.as_mut_ptr() };
|
||||
|
||||
/// Assign a signal to the signals map
|
||||
fn signals_set(idx: usize) {
|
||||
unsafe { SIGNALS[idx] = 1 };
|
||||
unsafe { write(SIGNALS_PTR.add(idx), 1) };
|
||||
}
|
||||
|
||||
#[allow(clippy::similar_names)]
|
||||
#[allow(clippy::similar_names, clippy::manual_assert)]
|
||||
pub fn main() {
|
||||
// The closure that we want to fuzz
|
||||
let mut harness = |input: &BytesInput| {
|
||||
@ -60,8 +61,7 @@ pub fn main() {
|
||||
};
|
||||
|
||||
// Create an observation channel using the signals map
|
||||
let observer =
|
||||
unsafe { StdMapObserver::new_from_ptr("signals", SIGNALS.as_mut_ptr(), SIGNALS.len()) };
|
||||
let observer = unsafe { StdMapObserver::from_mut_ptr("signals", SIGNALS_PTR, SIGNALS.len()) };
|
||||
|
||||
// Feedback to rate the interestingness of an input
|
||||
let mut feedback = MaxMapFeedback::new(&observer);
|
||||
@ -88,9 +88,11 @@ pub fn main() {
|
||||
|
||||
// The Monitor trait define how the fuzzer stats are displayed to the user
|
||||
#[cfg(not(feature = "tui"))]
|
||||
let mon = SimpleMonitor::new(|s| println!("{}", s));
|
||||
let mon = SimpleMonitor::new(|s| println!("{s}"));
|
||||
#[cfg(feature = "tui")]
|
||||
let mon = TuiMonitor::new(String::from("Baby Fuzzer"), false);
|
||||
let ui = TuiUI::with_version(String::from("Baby Fuzzer"), String::from("0.0.1"), false);
|
||||
#[cfg(feature = "tui")]
|
||||
let mon = TuiMonitor::new(ui);
|
||||
|
||||
// The event manager handle the various events generated during the fuzzing loop
|
||||
// such as the notification of the addition of a new item to the corpus
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "baby_fuzzer_gramatron"
|
||||
version = "0.8.2"
|
||||
version = "0.13.0"
|
||||
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
@ -20,4 +20,5 @@ debug = true
|
||||
|
||||
[dependencies]
|
||||
libafl = { path = "../../libafl/" }
|
||||
postcard = "0.7"
|
||||
libafl_bolts = { path = "../../libafl_bolts/" }
|
||||
postcard = { version = "1.0", features = ["alloc"], default-features = false } # no_std compatible serde serialization format
|
File diff suppressed because one or more lines are too long
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user