2025-08-18 12:27:43 +00:00

770 lines
34 KiB
Rust

use std::{cell::RefCell, collections::VecDeque, rc::Rc};
use freertos::USR_ISR_SYMBOLS;
use hashbrown::HashMap;
use crate::systemstate::{
target_os::{freertos::FreeRTOSStruct::*, *},
AtomicBasicBlock, CaptureEvent,
};
use super::{
bindings::*,
compute_hash, ExecInterval, FreeRTOSStruct, FreeRTOSSystemState,
FreeRTOSSystemStateContext, RawFreeRTOSSystemState, RefinedTCB,
};
//============================= Parsing helpers
/// Parse a List_t containing TCB_t into Vec<TCB_t> from cache. Consumes the elements from cache
///
/// # Arguments
///
/// * `list` - The FreeRTOS List_t structure containing TCB_t elements.
/// * `dump` - A mutable reference to a HashMap cache of FreeRTOSStructs, from which elements are consumed.
///
/// # Returns
///
/// A Vec of TCB_t extracted from the list.
pub fn tcb_list_to_vec_cached(list: List_t, dump: &mut HashMap<u32, FreeRTOSStruct>) -> Vec<TCB_t> {
let mut ret: Vec<TCB_t> = Vec::new();
if list.uxNumberOfItems == 0 {
return ret;
}
let last_list_item = match dump
.remove(&list.pxIndex)
.expect("List_t entry was not in Hashmap")
{
List_Item_struct(li) => li,
List_MiniItem_struct(mli) => match dump
.remove(&mli.pxNext)
.expect("MiniListItem pointer invaild")
{
List_Item_struct(li) => li,
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
},
_ => panic!("List_t entry was not a ListItem"),
};
let mut next_index = last_list_item.pxNext;
let last_tcb = match dump
.remove(&last_list_item.pvOwner)
.expect("ListItem Owner not in Hashmap")
{
TCB_struct(t) => t,
_ => panic!("List content does not equal type"),
};
for _ in 0..list.uxNumberOfItems - 1 {
let next_list_item = match dump
.remove(&next_index)
.expect("List_t entry was not in Hashmap")
{
List_Item_struct(li) => li,
List_MiniItem_struct(mli) => match dump
.remove(&mli.pxNext)
.expect("MiniListItem pointer invaild")
{
List_Item_struct(li) => li,
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
},
_ => panic!("List_t entry was not a ListItem"),
};
match dump
.remove(&next_list_item.pvOwner)
.expect("ListItem Owner not in Hashmap")
{
TCB_struct(t) => ret.push(t),
_ => panic!("List content does not equal type"),
}
next_index = next_list_item.pxNext;
}
ret.push(last_tcb);
ret
}
//============================= State refinement
/// Drains a List of RawFreeRTOSSystemState to split them up into lists of FreeRTOSSystemState FreeRTOSSystemStateContext.
///
/// # Arguments
///
/// * `input` - A Vec of RawFreeRTOSSystemState to be refined.
///
/// # Returns
///
/// A tuple containing:
/// - Vec of FreeRTOSSystemState
/// - Vec of FreeRTOSSystemStateContext (qemu_tick, (capture_event, capture_name), edge, mem_reads)
pub(crate) fn refine_system_states(
mut input: Vec<RawFreeRTOSSystemState>,
) -> (Vec<FreeRTOSSystemState>, Vec<FreeRTOSSystemStateContext>) {
let mut ret = (Vec::<_>::new(), Vec::<_>::new());
for mut i in input.drain(..) {
let cur = RefinedTCB::from_tcb_owned(i.current_tcb);
// println!("Refine: {} {:?} {:?} {:x}-{:x}", cur.task_name, i.capture_point.0, i.capture_point.1.to_string(), i.edge.0, i.edge.1);
// collect ready list
let mut collector = Vec::<RefinedTCB>::new();
for j in i.prio_ready_lists.into_iter().rev() {
let mut tmp = tcb_list_to_vec_cached(j, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
collector.append(&mut tmp);
}
#[cfg(feature = "observe_systemstate_unordered")]
{
// respect the order of the first ``lookahead`` tasks and sort the rest by task name
const lookahead : usize = 2;
collector.get_mut(lookahead..).map(|x| x.sort_by(|a, b| a.task_name.cmp(&b.task_name)));
}
// collect delay list
let mut delay_list: Vec<RefinedTCB> =
tcb_list_to_vec_cached(i.delay_list, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
let mut delay_list_overflow: Vec<RefinedTCB> =
tcb_list_to_vec_cached(i.delay_list_overflow, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
delay_list.append(&mut delay_list_overflow);
delay_list.sort_by(|a, b| a.task_name.cmp(&b.task_name));
ret.0.push(FreeRTOSSystemState {
current_task: cur,
ready_list_after: collector,
delay_list_after: delay_list,
read_invalid: i.read_invalid,
// input_counter: i.input_counter,//+IRQ_INPUT_BYTES_NUMBER,
});
ret.1.push(FreeRTOSSystemStateContext {
qemu_tick: i.qemu_tick,
capture_point: (i.capture_point.0, i.capture_point.1),
edge: i.edge,
mem_reads: i.mem_reads,
});
}
return ret;
}
/// Transform the states and metadata into a list of ExecIntervals, along with a HashMap of states, a list of HashSets marking memory reads and a bool indicating success.
///
/// # Arguments
///
/// * `trace` - A Vec of FreeRTOSSystemState representing the trace.
/// * `meta` - A Vec of FreeRTOSSystemStateContext containing metadata for each state.
///
/// # Returns
///
/// A tuple containing:
/// - Vec of ExecInterval
/// - Vec of Vec<(u32, u8)> marking memory reads during these intervals
/// - HashMap<u64, FreeRTOSSystemState> by hash
/// - bool indicating success
pub(crate) fn states2intervals(
trace: Vec<FreeRTOSSystemState>,
meta: Vec<FreeRTOSSystemStateContext>,
) -> (
Vec<ExecInterval>,
Vec<Vec<(u32, u8)>>,
HashMap<u64, FreeRTOSSystemState>,
bool,
) {
if trace.len() == 0 {
return (Vec::new(), Vec::new(), HashMap::new(), true);
}
let mut isr_stack: VecDeque<u8> = VecDeque::from([]); // 2+ = ISR, 1 = systemcall, 0 = APP. Trace starts with an ISREnd and executes the app
let mut level_of_task: HashMap<&str, u8> = HashMap::new();
let mut ret: Vec<ExecInterval> = vec![];
let mut reads: Vec<Vec<(u32, u8)>> = vec![];
let mut edges: Vec<(u32, u32)> = vec![];
let mut last_hash: u64 = compute_hash(&trace[0]);
let mut table: HashMap<u64, FreeRTOSSystemState> = HashMap::new();
table.insert(last_hash, trace[0].clone());
for i in 0..trace.len() - 1 {
let curr_name = trace[i].current_task().task_name().as_str();
// let mut interval_name = curr_name; // Name of the interval, either the task name or the isr/api funtion name
let level = match meta[i].capture_point.0 {
CaptureEvent::APIEnd => {
// API end always exits towards the app
if !level_of_task.contains_key(curr_name) {
level_of_task.insert(curr_name, 0);
}
*level_of_task.get_mut(curr_name).unwrap() = 0;
0
}
CaptureEvent::APIStart => {
// API start can only be called in the app
if !level_of_task.contains_key(curr_name) {
// Should not happen, apps start from an ISR End. Some input exibited this behavior for unknown reasons
level_of_task.insert(curr_name, 0);
}
*level_of_task.get_mut(curr_name).unwrap() = 1;
// interval_name = &meta[i].2;
1
}
CaptureEvent::ISREnd => {
// special case where the next block is an app start
if !level_of_task.contains_key(curr_name) {
level_of_task.insert(curr_name, 0);
}
// nested isr, TODO: Test level > 2
if isr_stack.len() > 1 {
// interval_name = ""; // We can't know which isr is running
isr_stack.pop_back().unwrap();
*isr_stack.back().unwrap()
} else {
isr_stack.pop_back();
// possibly go back to an api call that is still running for this task
if level_of_task.get(curr_name).unwrap() == &1 {
// interval_name = ""; // We can't know which api is running
}
*level_of_task.get(curr_name).unwrap()
}
}
CaptureEvent::ISRStart => {
// special case for isrs which do not capture their end
// if meta[i].2 == "ISR_0_Handler" {
// &2
// } else {
// regular case
// interval_name = &meta[i].2;
if isr_stack.len() > 0 {
let l = *isr_stack.back().unwrap();
isr_stack.push_back(l + 1);
l + 1
} else {
isr_stack.push_back(2);
2
}
// }
}
_ => 100,
};
// if trace[i].2 == CaptureEvent::End {break;}
let next_hash = compute_hash(&trace[i + 1]);
if !table.contains_key(&next_hash) {
table.insert(next_hash, trace[i + 1].clone());
}
ret.push(ExecInterval {
start_tick: meta[i].qemu_tick,
end_tick: meta[i + 1].qemu_tick,
start_state: last_hash,
end_state: next_hash,
start_capture: meta[i].capture_point.clone(),
end_capture: meta[i + 1].capture_point.clone(),
level: level,
abb: None,
});
reads.push(meta[i + 1].mem_reads.clone());
last_hash = next_hash;
edges.push((meta[i].edge.1, meta[i + 1].edge.0));
}
let t = add_abb_info(&mut ret, &table, &edges);
(ret, reads, table, t)
}
/// Marks which abbs were executed at each interval.
///
/// # Arguments
///
/// * `trace` - A mutable reference to a Vec of ExecInterval to annotate with ABB info.
/// * `state_table` - A reference to a HashMap of state hashes to FreeRTOSSystemState.
/// * `edges` - A reference to a Vec of (u32, u32) representing edges between states.
///
/// # Returns
///
/// A bool indicating whether ABB annotation was successful.
pub(crate) fn add_abb_info(
trace: &mut Vec<ExecInterval>,
state_table: &HashMap<u64, FreeRTOSSystemState>,
edges: &Vec<(u32, u32)>,
) -> bool {
let mut id_count = 0;
let mut ret = true;
let mut task_has_started: HashSet<&String> = HashSet::new();
let mut wip_abb_trace: Vec<Rc<RefCell<AtomicBasicBlock>>> = vec![];
// let mut open_abb_at_this_task_or_level : HashMap<(u8,&str),usize> = HashMap::new();
let mut open_abb_at_this_ret_addr_and_task: HashMap<(u32, &str), usize> = HashMap::new();
for i in 0..trace.len() {
let curr_name = state_table[&trace[i].start_state].current_task().task_name();
// let last : Option<&usize> = last_abb_start_of_task.get(&curr_name);
// let open_abb = open_abb_at_this_task_or_level.get(&(trace[i].level, if trace[i].level<2 {&curr_name} else {""})).to_owned(); // apps/apis are differentiated by task name, isrs by nested level
let open_abb = open_abb_at_this_ret_addr_and_task
.get(&(edges[i].0, if trace[i].level < 2 { &curr_name } else { "" }))
.to_owned(); // apps/apis are differentiated by task name, isrs by nested level
// println!("Edge {:x}-{:x}", edges[i].0.unwrap_or(0xffff), edges[i].1.unwrap_or(0xffff));
match trace[i].start_capture.0 {
// generic api abb start
CaptureEvent::APIStart => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(trace[i].start_capture.1.clone()),
})));
id_count += 1;
}
// generic isr abb start
CaptureEvent::ISRStart => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(trace[i].start_capture.1.clone()),
})));
id_count += 1;
}
// generic app abb start
CaptureEvent::APIEnd => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: if trace[i].level < 2 {
Some(Cow::Owned(curr_name.to_owned()))
} else {
None
},
})));
id_count += 1;
}
// generic continued blocks
CaptureEvent::ISREnd => {
// special case app abb start
if trace[i].start_capture.1 == "xPortPendSVHandler"
&& !task_has_started.contains(&curr_name)
{
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: 0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(Cow::Owned(curr_name.to_owned())),
})));
id_count += 1;
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
task_has_started.insert(&curr_name);
} else {
if let Some(last) = open_abb_at_this_ret_addr_and_task
.get(&(edges[i].0, if trace[i].level < 2 { &curr_name } else { "" }))
{
let last = last.clone(); // required to drop immutable reference
wip_abb_trace.push(wip_abb_trace[last].clone());
// if the abb is interrupted again, it will need to continue at edge[i].1
open_abb_at_this_ret_addr_and_task.remove(&(
edges[i].0,
if trace[i].level < 2 { &curr_name } else { "" },
));
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
last,
); // order matters!
} else {
// panic!();
// println!("Continued block with no start {} {} {:?} {:?} {:x}-{:x} {} {}", curr_name, trace[i].start_tick, trace[i].start_capture, trace[i].end_capture, edges[i].0, edges[i].1, task_has_started.contains(curr_name),trace[i].level);
// println!("{:x?}", open_abb_at_this_ret_addr_and_task);
ret = false;
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].1,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: if trace[i].level < 1 {
Some(Cow::Owned(curr_name.to_owned()))
} else {
None
},
})));
id_count += 1;
}
}
}
_ => panic!("Undefined block start"),
}
match trace[i].end_capture.0 {
// generic app abb end
CaptureEvent::APIStart => {
let _t = &wip_abb_trace[i];
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// generic api abb end
CaptureEvent::APIEnd => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// generic isr abb end
CaptureEvent::ISREnd => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// end anything
CaptureEvent::End => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
CaptureEvent::ISRStart => (),
_ => panic!("Undefined block end"),
}
// println!("{} {} {:x}-{:x} {:x}-{:x} {:?} {:?} {}",curr_name, trace[i].level, edges[i].0, edges[i].1, ((*wip_abb_trace[i])).borrow().start, ((*wip_abb_trace[i])).borrow().ends.iter().next().unwrap_or(&0xffff), trace[i].start_capture, trace[i].end_capture, trace[i].start_tick);
// println!("{:x?}", open_abb_at_this_ret_addr_and_task);
}
// drop(open_abb_at_this_task_or_level);
for i in 0..trace.len() {
trace[i].abb = Some((*wip_abb_trace[i]).borrow().clone());
}
return ret;
}
//============================================= Task release times
// Find all task release times.
///
/// # Arguments
///
/// * `trace` - A reference to a Vec of ExecInterval representing the execution trace.
/// * `states` - A reference to a HashMap of state hashes to FreeRTOSSystemState.
///
/// # Returns
///
/// A Vec of (u64, String) tuples, where each tuple is (tick, task_name) for each release event.
pub(crate) fn get_releases(
trace: &Vec<ExecInterval>,
states: &HashMap<u64, FreeRTOSSystemState>,
) -> Vec<(u64, String)> {
let mut ret = Vec::new();
let mut initial_released = false;
for (_n, i) in trace.iter().enumerate() {
// The first release starts from xPortPendSVHandler
if !initial_released
&& i.start_capture.0 == CaptureEvent::ISREnd
&& i.start_capture.1 == "xPortPendSVHandler"
{
let start_state = states.get(&i.start_state).expect("State not found");
initial_released = true;
start_state.get_ready_lists().iter().for_each(|x| {
ret.push((i.start_tick, x.task_name().clone()));
});
continue;
}
// A timed release is SysTickHandler isr block that moves a task from the delay list to the ready list.
if i.start_capture.0 == CaptureEvent::ISRStart
&& (i.start_capture.1 == "xPortSysTickHandler"
|| USR_ISR_SYMBOLS.contains(&&*i.start_capture.1))
{
// detect race-conditions, get start and end state from the nearest valid intervals
if states
.get(&i.start_state)
.map(|x| x.read_invalid)
.unwrap_or(true)
{
let mut start_index = None;
for n in 1.._n {
if let Some(interval_start) = trace.get(_n - n) {
let start_state = states.get(&interval_start.start_state).unwrap();
if !start_state.read_invalid {
start_index = Some(_n - n);
break;
}
} else {
break;
}
}
let mut end_index = None;
for n in (_n + 1)..trace.len() {
if let Some(interval_end) = trace.get(n) {
let end_state = states.get(&interval_end.end_state).unwrap();
if !end_state.read_invalid {
end_index = Some(n);
break;
}
} else {
break;
}
}
if let Some(Some(start_state)) =
start_index.map(|x| states.get(&trace[x].start_state))
{
if let Some(Some(end_state)) =
end_index.map(|x| states.get(&trace[x].end_state))
{
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
}
}
} else
// canonical case, userspace -> isr -> userspace
if i.end_capture.0 == CaptureEvent::ISREnd {
let start_state = states.get(&i.start_state).expect("State not found");
let end_state = states.get(&i.end_state).expect("State not found");
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
// start_state.delay_list_after.iter().for_each(|x| {
// if !end_state.delay_list_after.iter().any(|y| x.task_name == y.task_name) {
// ret.push((i.end_tick, x.task_name.clone()));
// }
// });
} else if i.end_capture.0 == CaptureEvent::ISRStart {
// Nested interrupts. Fast-forward to the end of the original interrupt, or the first valid state thereafter
// TODO: this may cause the same release to be registered multiple times
let mut isr_has_ended = false;
let start_state = states.get(&i.start_state).expect("State not found");
for n in (_n + 1)..trace.len() {
if let Some(interval_end) = trace.get(n) {
if interval_end.end_capture.1 == i.start_capture.1 || isr_has_ended {
let end_state = states.get(&interval_end.end_state).unwrap();
isr_has_ended = true;
if !end_state.read_invalid {
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
break;
}
}
} else {
break;
}
}
// if let Some(interval_end) = trace.get(_n+2) {
// if interval_end.start_capture.0 == CaptureEvent::ISREnd && interval_end.end_capture.0 == CaptureEvent::ISREnd && interval_end.end_capture.1 == i.start_capture.1 {
// let start_state = states.get(&i.start_state).expect("State not found");
// let end_state = states.get(&interval_end.end_state).expect("State not found");
// end_state.ready_list_after.iter().for_each(|x| {
// if x.task_name != end_state.current_task.task_name && x.task_name != start_state.current_task.task_name && !start_state.ready_list_after.iter().any(|y| x.task_name == y.task_name) {
// ret.push((i.end_tick, x.task_name.clone()));
// }
// });
// }
// }
}
}
// Release driven by an API call. This produces a lot of false positives, as a job may block multiple times per instance. Despite this, aperiodic jobs not be modeled otherwise. If we assume the first release is the real one, we can filter out the rest.
if i.start_capture.0 == CaptureEvent::APIStart {
let api_start_state = states.get(&i.start_state).expect("State not found");
let api_end_state = {
let mut end_index = _n;
for n in (_n)..trace.len() {
if trace[n].end_capture.0 == CaptureEvent::APIEnd
|| trace[n].end_capture.0 == CaptureEvent::End
{
end_index = n;
break;
} else if n > _n && trace[n].level == 0 {
// API Start -> ISR Start+End -> APP Continue
end_index = n - 1; // any return to a regular app block is a fair point of comparison for the ready list, because scheduling has been performed
break;
}
}
states
.get(&trace[end_index].end_state)
.expect("State not found")
};
api_end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != api_start_state.current_task.task_name
&& !api_start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
// eprintln!("Task {} released by API call at {:.1}ms", x.task_name, crate::time::clock::tick_to_time(i.end_tick).as_micros() as f32/1000.0);
}
});
}
}
ret
}
/// Pair up task releases and responses.
///
/// # Arguments
///
/// * `rel` - A reference to a Vec of (u64, String) representing release events.
/// * `resp` - A reference to a Vec of (u64, String) representing response events.
///
/// # Returns
///
/// A tuple containing:
/// - Vec of (u64, u64, String) tuples for (release_tick, response_tick, task_name)
/// - bool indicating if there was a possible error in pairing
pub(crate) fn get_release_response_pairs(
rel: &Vec<(u64, String)>,
resp: &Vec<(u64, String)>,
) -> (Vec<(u64, u64, String)>, bool) {
let mut maybe_error = false;
let mut ret = Vec::new();
let mut ready: HashMap<&String, u64> = HashMap::new();
let mut last_response: HashMap<&String, u64> = HashMap::new();
let mut r = rel.iter().peekable();
let mut d = resp.iter().peekable();
loop {
while let Some(peek_rel) = r.peek() {
// Fill releases as soon as possible
if !ready.contains_key(&peek_rel.1) {
ready.insert(&peek_rel.1, peek_rel.0);
r.next();
} else {
if let Some(peek_resp) = d.peek() {
if peek_resp.0 > peek_rel.0 {
// multiple releases before response
// It is unclear which release is real
// maybe_error = true;
// eprintln!("Task {} released multiple times before response ({:.1}ms and {:.1}ms)", peek_rel.1, crate::time::clock::tick_to_time(ready[&peek_rel.1]).as_micros()/1000, crate::time::clock::tick_to_time(peek_rel.0).as_micros()/1000);
// ready.insert(&peek_rel.1, peek_rel.0);
r.next();
} else {
// releases have overtaken responses, wait until the ready list clears up a bit
break;
}
} else {
// no more responses
break;
}
}
}
if let Some(next_resp) = d.next() {
if ready.contains_key(&next_resp.1) {
if ready[&next_resp.1] >= next_resp.0 {
if let Some(lr) = last_response.get(&next_resp.1) {
if u128::abs_diff(
crate::time::clock::tick_to_time(next_resp.0).as_micros(),
crate::time::clock::tick_to_time(*lr).as_micros(),
) > 500
{
// tolerate pending notifications for 500us
maybe_error = true;
// eprintln!("Task {} response at {:.1}ms before next release at {:.1}ms. Fallback to last response at {:.1}ms.", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(ready[&next_resp.1]).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(*lr).as_micros() as f32/1000.0);
}
// Sometimes a task is released immediately after a response. This might not be detected.
// Assume that the release occured with the last response
ret.push((*lr, next_resp.0, next_resp.1.clone()));
last_response.insert(&next_resp.1, next_resp.0);
} else {
maybe_error = true;
// eprintln!("Task {} released after response", next_resp.1);
}
} else {
// assert!(peek_resp.0 >= ready[&peek_resp.1]);
last_response.insert(&next_resp.1, next_resp.0);
ret.push((ready[&next_resp.1], next_resp.0, next_resp.1.clone()));
ready.remove(&next_resp.1);
}
} else {
if let Some(lr) = last_response.get(&next_resp.1) {
if u128::abs_diff(
crate::time::clock::tick_to_time(next_resp.0).as_micros(),
crate::time::clock::tick_to_time(*lr).as_micros(),
) > 1000
{ // tolerate pending notifications for 1ms
// maybe_error = true;
// eprintln!("Task {} response at {:.1}ms not found in ready list. Fallback to last response at {:.1}ms.", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(*lr).as_micros() as f32/1000.0);
}
// Sometimes a task is released immediately after a response (e.g. pending notification). This might not be detected.
// Assume that the release occured with the last response
ret.push((*lr, next_resp.0, next_resp.1.clone()));
last_response.insert(&next_resp.1, next_resp.0);
} else {
maybe_error = true;
// eprintln!("Task {} response at {:.1}ms not found in ready list", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0);
}
}
} else {
// TODO: should remaining released tasks be counted as finished?
return (ret, maybe_error);
}
}
}