in benchmarking/frameworks/glow/glow.py [0:0]
def _maybeNetRunner(self, output, results):
if output is None:
return False
rows = output
if isinstance(output, string_types):
rows = output.split("\n")
i = 0
while i < len(rows):
match = re.search(r"(.*)latency per (.*) \[(.*)\]:", rows[i])
if match:
if match.group(3) == "glow":
mtype = "NET"
else:
mtype = "SECONDARY"
name = match.group(3)
latency_kind = match.group(2)
card = match.group(1)
if card:
latency_kind = "card " + latency_kind
i += 1
while i < len(rows) and "latency per" not in rows[i].lower():
match = re.search(
r".*latency\((.*)\): p(.*): (.*)", rows[i].lower()
)
if match:
unit = match.group(1)
percentile = "p" + match.group(2)
value = float(match.group(3))
self._addOrAppendResult(
results,
" ".join(
[mtype, name, "net_runner", latency_kind, percentile]
),
value,
{
"type": mtype,
"metric": " ".join(
[name, "net_runner", latency_kind, percentile]
),
"unit": unit,
"values": [],
},
)
i += 1
else:
i += 1
i = 0
while i < len(rows):
match = re.search(r"(.*): (.*) vs (.*)\((.*)\)", rows[i])
if match:
test_impls1, test_impls2 = sorted([match.group(2), match.group(3)])
i += 1
while i < len(rows) and "abs error" in rows[i].lower():
match = re.search(r".*abs error p(.*): (.*)", rows[i].lower())
if match:
percentile = "p" + match.group(1)
value = float(match.group(2))
self._addOrAppendResult(
results,
" ".join(
[
"NET",
test_impls1,
"vs",
test_impls2,
"abs error",
percentile,
]
),
value,
{
"type": "NET",
"metric": " ".join(
[
test_impls1,
"vs",
test_impls2,
"abs error",
percentile,
]
),
"unit": "scalar",
"values": [],
},
)
i += 1
else:
i += 1