def fetch_metrics_()

in reporting/utils/benchmarks.py [0:0]


    def fetch_metrics_(self):
        config = yaml.load(open('config/benchmarks.yaml', 'r'))
        for benchmark_keys in config['benchmarks']:
            metric_prefix =  benchmark_keys['Metric Prefix']
            metric_suffix =  benchmark_keys['Metric Suffix']
            headers = Benchmarks.HEADERS[benchmark_keys['Type']]
            benchmark = {}
            alarms = {}
            for k in [*self.META_INFO_HEADERS, *headers]:
                # Find a key and value pair that corresponds to a header and metric.
                v = None
                if k in benchmark_keys:
                    v = benchmark_keys[k]  # v may be None
                elif k in Benchmarks.DEFAULT_METRIC_KEYS:
                    v = Benchmarks.DEFAULT_METRIC_KEYS[k]

                if v is None:
                    continue
                elif k in Benchmarks.CATEGORICAL_HEADERS or k in Benchmarks.META_INFO_HEADERS:
                    benchmark[k] = v
                else:
                    metric = "{}.{}.{}".format(metric_prefix, v, metric_suffix)
                    benchmark[k] = self._get_metric(metric)
                    alarms[k] = self._get_alarm_uris(metric)

            benchmark_type = benchmark_keys['Type']
            if benchmark_type not in self.HEADERS:
                logging.error("metric {} with invalid type".format(metric))
                continue

            headers = self.HEADERS[benchmark_type]
            # Fill in any missing headers with blank values.
            for h in headers:
                if h not in benchmark:
                    benchmark[h] = ''

            self._benchmarks.append((benchmark, alarms))

            self._metrics_fetched = True