in benchmarking/harness.py [0:0]
def runBenchmark(self, info, platform, benchmarks):
if self.args.reboot:
platform.rebootDevice()
for idx in range(len(benchmarks)):
tempdir = tempfile.mkdtemp(
prefix="_".join(["aibench", str(self.args.user_identifier), ""])
)
# we need to get a different framework instance per thread
# will consolidate later. For now create a new framework
frameworks = getFrameworks()
framework = frameworks[self.args.framework](tempdir, self.args)
reporters = getReporters(self.args)
benchmark = benchmarks[idx]
# check the framework matches
if "model" in benchmark and "framework" in benchmark["model"]:
assert benchmark["model"]["framework"] == self.args.framework, (
"Framework specified in the json file "
"{} ".format(benchmark["model"]["framework"])
+ "does not match the command line argument "
"{}".format(self.args.framework)
)
if self.args.debug:
for test in benchmark["tests"]:
test["log_output"] = True
if self.args.env:
for test in benchmark["tests"]:
cmd_env = dict(self.args.env)
if "env" in test:
cmd_env.update(test["env"])
test["env"] = cmd_env
b = copy.deepcopy(benchmark)
i = copy.deepcopy(info)
status = runOneBenchmark(
i,
b,
framework,
platform,
self.args.platform,
reporters,
self._lock,
self.args.cooldown,
self.args.user_identifier,
self.args.local_reporter,
)
self.status = self.status | status
if idx != len(benchmarks) - 1:
# cool down period between multiple benchmark runs
cooldown = self.args.cooldown
if "model" in benchmark and "cooldown" in benchmark["model"]:
cooldown = float(benchmark["model"]["cooldown"])
time.sleep(cooldown)
if not self.args.debug:
shutil.rmtree(tempdir, True)
for test in benchmark["tests"]:
if "preprocess" in test and "files" in test["preprocess"]:
for f in test["preprocess"]["files"].values():
shutil.rmtree(f["location"], True)