def as_flat_list()

in osbenchmark/metrics.py [0:0]


    def as_flat_list(self):
        def op_metrics(op_item, key, single_value=False):
            doc = {
                "task": op_item["task"],
                "operation": op_item["operation"],
                "name": key
            }
            if single_value:
                doc["value"] = {"single":  op_item[key]}
            else:
                doc["value"] = op_item[key]
            if "meta" in op_item:
                doc["meta"] = op_item["meta"]
            return doc

        all_results = []
        for metric, value in self.as_dict().items():
            if metric == "op_metrics":
                for item in value:
                    if "throughput" in item:
                        all_results.append(op_metrics(item, "throughput"))
                    if "latency" in item:
                        all_results.append(op_metrics(item, "latency"))
                    if "service_time" in item:
                        all_results.append(op_metrics(item, "service_time"))
                    if "processing_time" in item:
                        all_results.append(op_metrics(item, "processing_time"))
                    if "error_rate" in item:
                        all_results.append(op_metrics(item, "error_rate", single_value=True))
                    if "duration" in item:
                        all_results.append(op_metrics(item, "duration", single_value=True))
            elif metric == "ml_processing_time":
                for item in value:
                    all_results.append({
                        "job": item["job"],
                        "name": "ml_processing_time",
                        "value": {
                            "min": item["min"],
                            "mean": item["mean"],
                            "median": item["median"],
                            "max": item["max"]
                        }
                    })
            elif metric.startswith("total_transform_") and value is not None:
                for item in value:
                    all_results.append({
                        "id": item["id"],
                        "name": metric,
                        "value": {
                            "single": item["mean"]
                        }
                    })
            elif metric.endswith("_time_per_shard"):
                if value:
                    all_results.append({"name": metric, "value": value})
            elif value is not None:
                result = {
                    "name": metric,
                    "value": {
                        "single": value
                    }
                }
                all_results.append(result)
        # sorting is just necessary to have a stable order for tests. As we just have a small number of metrics, the overhead is neglible.
        return sorted(all_results, key=lambda m: m["name"])