in perfrunbook/utilities/measure_and_plot_basic_pmu_counters.py [0:0]
def plot_counter_stat(csv, logfile, plot, stat_name, counter_numerator,
counter_denominator, scale):
"""
Process the returned csv file into a time-series statistic to plot and
also calculate some useful aggregate stats.
"""
df = pd.read_csv(csv, sep='|',
names=['time', 'count', 'rsrvd1', 'event',
'rsrvd2', 'frac', 'rsrvd3', 'rsrvd4'],
dtype={'time': np.float64, 'count': np.float64,
'rsrvd1': str, 'event': str, 'rsrvd2': str,
'frac': np.float64, 'rsrvd3': str, 'rsrvd4': str})
df_processed = pd.DataFrame()
df_processed[stat_name] = (df[df['event'] == counter_numerator]['count'].reset_index(drop=True)) / (df[df['event'] == counter_denominator]['count'].reset_index(drop=True)) * scale
df_processed[counter_numerator] = df[df['event'] == counter_numerator]['count'].reset_index(drop=True)
df_processed[counter_denominator] = df[df['event'] == counter_denominator]['count'].reset_index(drop=True)
df_processed.dropna(inplace=True)
# Calculate some meaningful aggregate stats for comparing time-series plots
geomean = stats.gmean(df_processed[stat_name])
p50 = stats.scoreatpercentile(df_processed[stat_name], 50)
p90 = stats.scoreatpercentile(df_processed[stat_name], 90)
p99 = stats.scoreatpercentile(df_processed[stat_name], 99)
xtitle = f"gmean:{geomean:>6.2f} p50:{p50:>6.2f} p90:{p90:>6.2f} p99:{p99:>6.2f}"
if logfile:
df_processed.to_csv(logfile)
if plot:
plot_terminal(df_processed, stat_name, xtitle)