def _maybeAddBenchSummary()

in benchmarking/frameworks/glow/glow.py [0:0]


    def _maybeAddBenchSummary(self, output, results):
        existingMaps = {
            "AddBench": (10, 11),
            "BatchGemmBench": (12, 13),
            "GemmBench": (12, 13),
            "GemmParallelBench": (11, 12),
            "SLSBench": (10, 11),
            "TransposeBench": (11, 12),
        }

        fieldMap = defaultdict(lambda: (10, 11))
        for k in existingMaps:
            fieldMap[k] = existingMaps[k]

        if output is None:
            return False
        rows = output
        if isinstance(output, string_types):
            rows = output.split("\n")
        i = 0
        while i < len(rows):
            try:
                fields = rows[i].split(",")
                if fields[0] == "BenchResult":
                    benchName = fields[1]

                    runtimeRecord = {
                        "type": "NET",
                        "metric": "{}:runtime".format(benchName),
                        "unit": "second",
                        "values": [],
                    }
                    throughputRecord = {
                        "type": "SECONDARY",
                        "metric": "{}:throughput".format(benchName),
                        "unit": "Gb/second",
                        "values": [],
                    }

                    self._addOrAppendResult(
                        results,
                        "NET {}:runtime".format(benchName),
                        float(fields[fieldMap[benchName][0]]),
                        runtimeRecord,
                    )
                    self._addOrAppendResult(
                        results,
                        "SECONDARY {}:throughput".format(benchName),
                        float(fields[fieldMap[benchName][1]]),
                        throughputRecord,
                    )

            except IndexError:
                pass
            except ValueError:
                pass
            i += 1