in cmd/apmbench/run.go [36:97]
func Run(
extraMetrics func(*testing.B),
resetStore func(),
fns ...BenchmarkFunc,
) error {
type benchmark struct {
name string
fn BenchmarkFunc
}
// Set `test.benchtime` flag based on the custom `benchtime` flag.
if err := flag.Set("test.benchtime", cfg.Benchtime); err != nil {
return fmt.Errorf("failed to set test.benchtime flag: %w", err)
}
var maxLenBenchName string
benchmarks := make([]benchmark, 0, len(fns))
for _, fn := range fns {
name, err := benchmarkFuncName(fn)
if err != nil {
return err
}
if shouldRun(name, cfg.RunRE, cfg.SkipRE) {
if len(name) > len(maxLenBenchName) {
maxLenBenchName = name
}
benchmarks = append(benchmarks, benchmark{
name: name,
fn: fn,
})
} else {
fmt.Printf("--- SKIP: %s\n", name)
}
}
// maxLen is the max length of benchmark function that needs to be printed
maxLen := len(fullBenchmarkName(
maxLenBenchName, cfg.AgentsList[len(cfg.AgentsList)-1]))
for _, agents := range cfg.AgentsList {
runtime.GOMAXPROCS(agents)
for _, b := range benchmarks {
name := fullBenchmarkName(b.name, agents)
for i := 0; i < int(cfg.Count); i++ {
resetStore() // reset the metric store before starting any benchmark
result := runOne(extraMetrics, b.fn)
// testing.Benchmark discards all output so the only thing we can
// retrive is the benchmark status and result.
if result.skipped {
fmt.Printf("--- SKIP: %s\n", name)
continue
}
if result.failed {
fmt.Printf("--- FAIL: %s\n", name)
return fmt.Errorf("benchmark %q failed", name)
}
fmt.Printf("%-*s\t%s\n", maxLen, name, result.benchResult)
}
}
}
return nil
}