def run()

in benchmarking/run_remote.py [0:0]


    def run(self):
        if self.args.list_devices:
            devices = self.db.listDevices(self.args.job_queue)
            self._listDevices()
            return devices
        if self.args.list_job_queues:
            self._printJobQueues()
            return
        if self.args.fetch_status or self.args.fetch_result:
            result = self._fetchResult()
            return result
        if self.args.kill:
            self._killJob()
            return
        if self.args.query_num_devices:
            return self._queryNumDevices(self.args.query_num_devices)

        assert self.args.benchmark_file, "--benchmark_file (-b) must be specified"
        assert self.args.devices, "--devices must be specified"
        assert self.args.framework, "--framework must be specified"
        assert self.args.platform, "--platform must be specified"
        assert self.args.repo_dir, "--repo_dir must be specified"
        assert (
            (self.args.info is not None)
            and (self.args.custom_binary is None)
            and (self.args.pre_built_binary is None)
        ) or (
            self.args.info is None
        ), "--info cannot co-exist with --custom_binary and --pre_built_binary"

        list_job_queues = self._listJobQueues()
        if not self.args.force_submit:
            self._checkDevices(self.args.devices, self.args.hashes)
            assert (
                self.args.job_queue != "*" and self.args.job_queue in list_job_queues
            ), "--job_queue must be choosen from " + " ".join(list_job_queues)

        self.tempdir = tempfile.mkdtemp(prefix="aibench")
        program_filenames = {}
        if self.args.info:
            self.info = json.loads(self.args.info)
        else:
            self.info = {"treatment": {"programs": {}}}
            if self.args.string_map:
                self.info["treatment"]["string_map"] = str(self.args.string_map)

        assert ("treatment" in self.info) and ("programs" in self.info["treatment"]), (
            'In --info, field treatment must exist. In info["treatment"] '
            "program field must exist (may be None)"
        )

        binary = (
            self.info["treatment"]["programs"]["program"]["location"]
            if (
                "programs" in self.info["treatment"]
                and "program" in self.info["treatment"]["programs"]
            )
            else self.args.custom_binary
            if self.args.custom_binary
            else self.args.pre_built_binary
        )
        t = BuildProgram(
            self.args, self.file_handler, self.tempdir, program_filenames, binary
        )
        t.start()

        benchmarks = getBenchmarks(self.args.benchmark_file, self.args.framework)

        self._updateBenchmarksWithArgs(benchmarks, self.args)

        for benchmark in benchmarks:
            self._uploadOneBenchmark(benchmark)
            if self.args.debug:
                for test in benchmark["content"]["tests"]:
                    test["log_output"] = True
            if self.args.env:
                env = {}
                env_vars = self.args.env.split()
                for env_var in env_vars:
                    k, v = parse_kwarg(env_var)
                    env[k] = v
                for test in benchmark["content"]["tests"]:
                    cmd_env = {}
                    cmd_env.update(env)
                    if "env" in test:
                        cmd_env.update(test["env"])
                    test["env"] = cmd_env
        t.join()

        assert (
            "program" in program_filenames
        ), "program does not exist. Build may be failed."

        for fn in program_filenames:
            self.info["treatment"]["programs"][fn] = {"location": program_filenames[fn]}

        # Pass meta file from build to benchmark
        meta = getMeta(self.args, self.args.platform)
        if meta:
            assert "meta" not in self.info, "info field already has a meta field"
            self.info["meta"] = meta

        new_devices = self.devices.getFullNames(self.args.devices)
        user_identifier = (
            int(self.args.user_identifier)
            if self.args.user_identifier
            else randint(1, 1000000000000000)
        )
        user = getuser() if not self.args.user_string else self.args.user_string
        hashes = self.args.hashes
        for benchmark in benchmarks:
            data = {
                "benchmark": benchmark,
                "info": self.info,
            }
            self.db.submitBenchmarks(data, new_devices, user_identifier, user, hashes)

        if self.args.async_submit:
            print("Job submitted.")
            self._printRunDetailsURL(user_identifier)
            return

        self.url_printer.printURL(self.scuba_dataset, user_identifier, benchmarks)

        if not self.args.debug:
            shutil.rmtree(self.tempdir, True)

        if self.args.screen_reporter:
            self._screenReporter(user_identifier)
            self._printRunDetailsURL(user_identifier)

        # Clean up
        try:
            rm_list = glob.glob("/tmp/aibench*")
            rm_list.extend(glob.iglob("/tmp/aibench*"))
            for f in rm_list:
                if os.path.isdir(f):
                    shutil.rmtree(f, True)
                if os.path.isfile(f):
                    os.remove(f)
        except Exception:
            pass