in core/benches/ops/read.rs [105:156]
fn bench_read_parallel(c: &mut Criterion, name: &str, op: Operator) {
let mut group = c.benchmark_group(format!("service_{name}_read_parallel"));
let mut rng = thread_rng();
for size in [
Size::from_kibibytes(4),
Size::from_kibibytes(256),
Size::from_mebibytes(4),
Size::from_mebibytes(16),
] {
let content = gen_bytes(&mut rng, (size.bytes() * 2) as usize);
let path = uuid::Uuid::new_v4().to_string();
let offset = (size.bytes() / 2) as u64;
let buf = vec![0; size.bytes() as usize];
let temp_data = TempData::generate(op.clone(), &path, content.clone());
for parallel in [1, 2, 4, 8, 16] {
group.throughput(criterion::Throughput::Bytes(parallel * size.bytes() as u64));
group.bench_with_input(
format!("{}x{}", parallel, size.to_string()),
&(op.clone(), &path, buf.clone()),
|b, (op, path, buf)| {
b.to_async(&*TOKIO).iter(|| async {
let futures = (0..parallel)
.map(|_| async {
let mut buf = buf.clone();
let mut r = op
.range_reader(path, offset..=offset + size.bytes() as u64)
.await
.unwrap();
r.read_exact(&mut buf).await.unwrap();
let mut d = 0;
// mock same little cpu work
for c in offset..offset + 100u64 {
d += c & (0x1f1f1f1f + c % 256);
}
let _ = d;
})
.collect::<Vec<_>>();
futures::future::join_all(futures).await
})
},
);
}
drop(temp_data);
}
group.finish()
}