in resctl-bench/src/bench/iocost_qos.rs [466:621]
fn run(&mut self, rctx: &mut RunCtx) -> Result<serde_json::Value> {
// We'll be changing bench params mutliples times, revert when done.
rctx.set_revert_bench();
// Make sure we have iocost parameters available.
let mut bench_knobs = rctx.bench_knobs().clone();
if bench_knobs.iocost_seq == 0 {
rctx.maybe_run_nested_iocost_params()?;
bench_knobs = rctx.bench_knobs().clone();
}
let (prev_matches, mut prev_rec) = match rctx.prev_job_data() {
Some(pd) => {
let prec: IoCostQoSRecord = pd.parse_record()?;
(
self.prev_matches(&prec, rctx.mem_info().profile, &bench_knobs),
prec,
)
}
None => (
true,
IoCostQoSRecord {
base_model: bench_knobs.iocost.model.clone(),
base_qos: bench_knobs.iocost.qos.clone(),
dither_dist: self.dither_dist,
..Default::default()
},
),
};
// Mark the ones with too low a max rate to run.
if !self.ign_min_perf {
let abs_min_vrate = iocost_min_vrate(&bench_knobs.iocost.model);
for ovr in self.runs.iter_mut() {
ovr.skip_or_adj(abs_min_vrate);
}
}
// Print out what to do beforehand so that the user can spot errors
// without waiting for the benches to run.
let mut nr_to_run = 0;
for (i, ovr) in self.runs.iter().enumerate() {
let qos_cfg = IoCostQoSCfg::new(&bench_knobs.iocost.qos, ovr);
let mut skip = false;
let mut extra_state = " ";
if ovr.skip {
skip = true;
extra_state = "s";
} else if ovr.min_adj {
extra_state = "a";
}
let new = if !skip && Self::find_matching_rec_run(&ovr, &prev_rec).is_none() {
nr_to_run += 1;
true
} else {
false
};
info!(
"iocost-qos[{:02}]: {}{} {}",
i,
if new { "+" } else { "-" },
extra_state,
qos_cfg.format(),
);
}
if nr_to_run > 0 {
if prev_matches || nr_to_run == self.runs.len() {
info!(
"iocost-qos: {} storage and protection bench sets to run, isol-{} >= {}%",
nr_to_run,
self.isol_pct,
format_pct(self.isol_thr),
);
} else {
bail!(
"iocost-qos: {} bench sets to run but existing result doesn't match \
the current configuration, consider removing the result file",
nr_to_run
);
}
} else {
info!("iocost-qos: All results are available in the result file, nothing to do");
}
let mut runs = vec![];
for (i, ovr) in self.runs.iter().enumerate() {
let qos_cfg = IoCostQoSCfg::new(&bench_knobs.iocost.qos, ovr);
if let Some(recr) = Self::find_matching_rec_run(&ovr, &prev_rec) {
runs.push(Some(recr.clone()));
continue;
} else if ovr.skip {
runs.push(None);
continue;
}
info!(
"iocost-qos[{:02}]: Running storage benchmark with QoS parameters:",
i
);
info!("iocost-qos[{:02}]: {}", i, qos_cfg.format());
loop {
let mut sjob = self.stor_job.clone();
sjob.loops = match i {
0 => self.stor_base_loops,
_ => self.stor_loops,
};
let mut pjob = self.prot_job.clone();
match Self::run_one(rctx, &mut sjob, &mut pjob, &qos_cfg, self.retries) {
Ok(recr) => {
// Sanity check QoS params.
if recr.qos.is_some() {
let target_qos = qos_cfg.calc();
if recr.qos != target_qos {
bail!(
"iocost-qos: result qos ({}) != target qos ({})",
&recr.qos.as_ref().unwrap(),
target_qos.as_ref().unwrap(),
);
}
}
prev_rec.inc_runs.push(recr.clone());
rctx.update_incremental_record(serde_json::to_value(&prev_rec).unwrap());
runs.push(Some(recr));
break;
}
Err(e) => {
if !self.allow_fail || prog_exiting() {
error!("iocost-qos[{:02}]: Failed ({:#}), giving up...", i, &e);
return Err(e);
}
error!("iocost-qos[{:02}]: Failed ({:#}), skipping...", i, &e);
runs.push(None);
}
}
}
}
// We could have broken out early due to allow_fail, pad it to the
// configured number of runs.
runs.resize(self.runs.len(), None);
Ok(serde_json::to_value(&IoCostQoSRecord {
base_model: bench_knobs.iocost.model,
base_qos: bench_knobs.iocost.qos,
mem_profile: rctx.mem_info().profile,
runs,
dither_dist: self.dither_dist,
inc_runs: vec![],
})
.unwrap())
}