def runBenchmark()

in benchmarking/platforms/ios/ios_platform.py [0:0]


    def runBenchmark(self, cmd, *args, **kwargs):
        if not isinstance(cmd, list):
            cmd = shlex.split(cmd)
        assert self.util.bundle_id is not None, "Bundle id is not specified"

        arguments = self.getPairedArguments(cmd)
        argument_filename = os.path.join(self.tempdir, "benchmark.json")
        arguments_json = json.dumps(arguments, indent=2, sort_keys=True)
        with open(argument_filename, "w") as f:
            f.write(arguments_json)
        tgt_argument_filename = os.path.join(self.tgt_dir, "benchmark.json")
        self.util.push(argument_filename, tgt_argument_filename)

        run_cmd = [
            "--bundle",
            self.app,
            "--noninteractive",
            "--noinstall",
            "--unbuffered",
        ]
        platform_args = {}
        if "platform_args" in kwargs:
            platform_args = kwargs["platform_args"]
            if "power" in platform_args and platform_args["power"]:
                platform_args["timeout"] = 10
                run_cmd += ["--justlaunch"]
        if platform_args.get("enable_profiling", False):
            # attempt to run with profiling, else fallback to standard run
            try:
                args = " ".join(["--" + x + " " + arguments[x] for x in arguments])
                xctrace = getProfilerByUsage(
                    "ios",
                    None,
                    platform=self,
                    model_name=platform_args.get("model_name", None),
                    args=args,
                )
                if xctrace:
                    f = xctrace.start()
                    output, meta = f.result()
                    if not output or not meta:
                        raise RuntimeError("No data returned from XCTrace profiler.")
                    return output, meta
            except Exception:
                getLogger().critical(
                    f"An error occurred when running XCTrace profiler on device {self.platform} {self.platform_hash}.",
                    exc_info=True,
                )
        # meta is used to store any data about the benchmark run
        # that is not the output of the command
        meta = {}

        if arguments:
            run_cmd += [
                "--args",
                " ".join(["--" + x + " " + arguments[x] for x in arguments]),
            ]
        # the command may fail, but the err_output is what we need
        log_screen = self.util.run(run_cmd, **platform_args)
        return log_screen, meta