in perfkitbenchmarker/linux_packages/speccpu.py [0:0]
def _ExtractScore(stdout, vm, keep_partial_results, runspec_metric):
"""Extracts the SPEC(int|fp) score from stdout.
Args:
stdout: String. stdout from running RemoteCommand.
vm: The vm instance where SPEC CPU was run.
keep_partial_results: Boolean. True if partial results should be extracted
in the event that not all benchmarks were successfully run. See the
"runspec_keep_partial_results" flag for more info.
runspec_metric: String. Indicates whether this is spec speed or rate run.
pyformat: disable
Sample input for SPECint (Refer to unit test for more examples):
...
...Base Peak
============================================= ==========================
400.perlbench 9770 417 23.4 * 9770 417 23.4 *
401.bzip2 9650 565 17.1 * 9650 565 17.1 *
403.gcc 8050 364 22.1 *
429.mcf 9120 364 25.1 *
445.gobmk 10490 499 21.0 *
456.hmmer 9330 491 19.0 *
458.sjeng 12100 588 20.6 *
462.libquantum 20720 468 44.2 *
464.h264ref 22130 700 31.6 *
471.omnetpp 6250 349 17.9 *
473.astar 7020 482 14.6 *
483.xalancbmk 6900 248 27.8 *
Est. SPECint(R)_base2006 22.7
Est. SPECint(R)_peak2006 20
Sample input for SPECfp:
...
...Base Peak
============================================= ============================
410.bwaves 13590 717 19.0 * 13550 710 19.0 *
416.gamess 19580 923 21.2 *
433.milc 9180 480 19.1 *
434.zeusmp 9100 600 15.2 *
435.gromacs 7140 605 11.8 *
436.cactusADM 11950 1289 9.27 *
437.leslie3d 9400 859 10.9 *
444.namd 8020 504 15.9 *
447.dealII 11440 409 28.0 *
450.soplex 8340 272 30.6 *
453.povray 5320 231 23.0 *
454.calculix 8250 993 8.31 *
459.GemsFDTD 10610 775 13.7 *
465.tonto 9840 565 17.4 *
470.lbm 13740 365 37.7 *
481.wrf 11170 788 14.2 *
482.sphinx3 19490 668 29.2 *
Est. SPECfp(R)_base2006 17.5
Est. SPECfp(R)_peak2006 20
pyformat: enable
Returns:
A list of sample.Sample objects.
"""
results = []
speccpu_vm_state = getattr(vm, VM_STATE_ATTR, None)
re_begin_section = re.compile('^={1,}')
re_end_section = re.compile(speccpu_vm_state.log_format)
result_section = []
in_result_section = False
at_peak_results_line, peak_name, peak_score = False, None, None
# Extract the CPU version
cpu2017_version = ''
for line in stdout.splitlines():
if re.search(r'Tested with SPEC CPU.*', line):
version_groups = re.search(r'2017 v(.*?) on', line)
if version_groups:
cpu2017_version = version_groups.group(1)
break
continue
# Extract the summary section
for line in stdout.splitlines():
if in_result_section:
result_section.append(line)
# search for begin of result section
match = re.search(re_begin_section, line)
if match:
assert not in_result_section
in_result_section = True
continue
# search for end of result section
match = re.search(re_end_section, line)
if at_peak_results_line:
_, peak_name, peak_score = line.split()
at_peak_results_line = False
if match:
assert in_result_section
spec_name = str(match.group(1))
if runspec_metric == 'speed':
spec_name += ':speed'
try:
spec_score = float(match.group(2))
except ValueError:
# Partial results may get reported as '--' instead of a number.
spec_score = None
if FLAGS.spec_runmode != BASE_MODE:
at_peak_results_line = True
in_result_section = False
# remove the final SPEC(int|fp) score, which has only 2 columns.
result_section.pop()
metadata = {
'runspec_config': speccpu_vm_state.runspec_config,
'runspec_config_md5sum': _GenerateMd5sum(speccpu_vm_state.runspec_config),
'runspec_iterations': str(FLAGS.runspec_iterations),
'runspec_enable_32bit': str(FLAGS.runspec_enable_32bit),
'runspec_define': FLAGS.runspec_define,
'runspec_metric': runspec_metric,
'spec_runmode': FLAGS.spec_runmode,
'spec17_copies': FLAGS.spec17_copies,
'spec17_threads': FLAGS.spec17_threads,
'spec17_fdo': FLAGS.spec17_fdo,
'spec17_subset': FLAGS.spec17_subset,
'gcc_version': build_tools.GetVersion(vm, 'gcc'),
'CPU2017_version': cpu2017_version,
}
missing_results = []
scores = []
for benchmark in result_section:
# Skip over failed runs, but count them since they make the overall
# result invalid.
not_reported = benchmark.count('NR')
if not_reported > 1 or (
not_reported == 1 and FLAGS.spec_runmode != PEAK_MODE
):
logging.warning('SPEC CPU missing result: %s', benchmark)
missing_results.append(str(benchmark.split()[0]))
continue
base_score_str, peak_score_str = None, None
if FLAGS.spec_runmode == BASE_MODE:
# name, copies/threads, time, score, misc
name, _, _, base_score_str, _ = benchmark.split()
elif FLAGS.spec_runmode == PEAK_MODE:
# name, base_not_reported(NR), copies/threads, time, score, misc
name, _, _, _, peak_score_str, _ = benchmark.split()
else:
# name, copies/threads, base time, base score, base misc,
# copies/threads, peak time, peak score, peak misc
name, _, _, base_score_str, _, _, _, peak_score_str, _ = benchmark.split()
if runspec_metric == 'speed':
name += ':speed'
if base_score_str:
base_score_float = float(base_score_str)
scores.append(base_score_float)
results.append(sample.Sample(str(name), base_score_float, '', metadata))
if peak_score_str:
peak_score_float = float(peak_score_str)
results.append(
sample.Sample(str(name) + ':peak', peak_score_float, '', metadata)
)
if spec_score is None and FLAGS.spec_runmode != PEAK_MODE:
missing_results.append(spec_name)
if missing_results:
if keep_partial_results:
metadata['partial'] = 'true'
metadata['missing_results'] = ','.join(missing_results)
else:
raise errors.Benchmarks.RunError(
'speccpu: results missing, see log: ' + ','.join(missing_results)
)
if spec_score:
results.append(sample.Sample(spec_name, spec_score, '', metadata))
elif FLAGS.runspec_estimate_spec:
estimated_spec_score = _GeometricMean(scores)
results.append(
sample.Sample(
'estimated_' + spec_name, estimated_spec_score, '', metadata
)
)
if peak_score:
results.append(sample.Sample(peak_name, float(peak_score), '', metadata))
return results