def _uploadOneBenchmark()

in benchmarking/run_remote.py [0:0]


    def _uploadOneBenchmark(self, benchmark):
        filename = benchmark["filename"]
        one_benchmark = benchmark["content"]
        # TODO refactor the code to collect all files to upload
        del_paths = []
        if "model" in one_benchmark:
            if "files" in one_benchmark["model"]:
                for field in one_benchmark["model"]["files"]:
                    value = one_benchmark["model"]["files"][field]
                    assert (
                        "location" in value
                    ), "location field is missing in benchmark " "{}".format(filename)
                    ref_path = ["files", field]
                    if self._uploadFile(value, filename, benchmark, ref_path):
                        del_paths.append(ref_path)
            if "libraries" in one_benchmark["model"]:
                for value in one_benchmark["model"]["libraries"]:
                    assert (
                        "location" in value
                    ), "location field is missing in benchmark " "{}".format(filename)
                    self._uploadFile(value, filename, benchmark)

        for del_path in del_paths:
            self._del_from_benchmark(benchmark["content"]["model"], del_path)

        # upload test file
        assert (
            "tests" in one_benchmark
        ), "tests field is missing in benchmark {}".format(filename)
        tests = one_benchmark["tests"]
        for test in tests:
            if "input_files" in test:
                self._uploadTestFiles(test["input_files"], filename)
            # ignore the outputs for non accuracy metrics
            if "output_files" in test and test["metric"] == "error":
                self._uploadTestFiles(test["output_files"], filename)