in Dassl.pytorch/tools/parse_test_res.py [0:0]
def parse_function(*metrics, directory="", args=None, end_signal=None):
print("===")
print(f"Parsing files in {directory}")
subdirs = listdir_nohidden(directory, sort=True)
outputs = []
for subdir in subdirs:
fpath = osp.join(directory, subdir, "log.txt")
assert check_isfile(fpath)
good_to_go = False
output = OrderedDict()
with open(fpath, "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line == end_signal:
good_to_go = True
for metric in metrics:
match = metric["regex"].search(line)
if match and good_to_go:
if "file" not in output:
output["file"] = fpath
num = float(match.group(1))
name = metric["name"]
output[name] = num
if output:
outputs.append(output)
assert len(outputs) > 0, f"Nothing found in {directory}"
metrics_results = defaultdict(list)
for output in outputs:
msg = ""
for key, value in output.items():
if isinstance(value, float):
msg += f"{key}: {value:.1f}%. "
else:
msg += f"{key}: {value}. "
if key != "file":
metrics_results[key].append(value)
print(msg)
output_results = OrderedDict()
for key, values in metrics_results.items():
avg = np.mean(values)
std = compute_ci95(values) if args.ci95 else np.std(values)
print(f"* average {key}: {avg:.1f}% +- {std:.1f}%")
output_results[key] = avg
print("===")
return output_results