def _displayResult()

in benchmarking/remote/screen_reporter.py [0:0]


    def _displayResult(self, s):
        if (
            s["status"] != "DONE"
            and s["status"] != "FAILED"
            and s["status"] != "USER_ERROR"
        ):
            return
        output = self.xdb.getBenchmarks(str(s["id"]))
        for r in output:
            if s["status"] == "DONE":
                res = json.loads(r["result"]) if r["result"] else []
                benchmarks = json.loads(r["benchmarks"])
                metric = benchmarks["benchmark"]["content"]["tests"][0]["metric"]
                for identifier in res:
                    data = res[identifier]
                    if "NET latency" in data:
                        net_latency = data["NET latency"]
                        if "p50" in net_latency["summary"]:
                            net_delay = data["NET latency"]["summary"]["p50"]
                        elif "mean" in net_latency["summary"]:
                            net_delay = data["NET latency"]["summary"]["mean"]
                        else:
                            raise AssertionError("Net latency is not specified")
                        print("ID:{}\tNET latency: {}".format(identifier, net_delay))
                    elif metric == "generic":
                        if isinstance(data, dict):
                            if "meta" in data:
                                del data["meta"]
                            if not data:
                                return
                        # dump std printout to screen for custom_binary
                        if isinstance(data, list):
                            data = "\n".join(data)
                        print(data)
                if self.debug:
                    self._printLog(r)
            else:
                self._printLog(r)