def list()

in treeherder/webapp/api/performance_data.py [0:0]


    def list(self, request):
        query_params = PerfCompareResultsQueryParamsSerializer(data=request.query_params)
        if not query_params.is_valid():
            return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST)

        base_rev = query_params.validated_data["base_revision"]
        new_rev = query_params.validated_data["new_revision"]
        base_repo_name = query_params.validated_data["base_repository"]
        new_repo_name = query_params.validated_data["new_repository"]
        interval = query_params.validated_data["interval"]
        framework = query_params.validated_data["framework"]
        no_subtests = query_params.validated_data["no_subtests"]
        base_parent_signature = query_params.validated_data["base_parent_signature"]
        new_parent_signature = query_params.validated_data["new_parent_signature"]
        replicates = query_params.validated_data["replicates"]

        try:
            new_push = models.Push.objects.get(revision=new_rev, repository__name=new_repo_name)
        except models.Push.DoesNotExist:
            return Response(
                f"No new push with revision {new_rev} from repo {new_repo_name}.",
                status=HTTP_400_BAD_REQUEST,
            )

        try:
            base_push, start_day, end_day = None, None, None
            if base_rev:
                base_push = models.Push.objects.get(
                    revision=base_rev, repository__name=base_repo_name
                )
                # Dynamically calculate a time interval based on the base and new push
                interval = self._get_interval(base_push, new_push)
            else:
                # Comparing without a base needs a timerange from which to gather the data needed
                # based on the interval param received, which can be last day or last 2/ 7/ 14 /30 /90 days or last year
                start_day = datetime.datetime.utcfromtimestamp(
                    int(to_timestamp(str(new_push.time)) - int(interval))
                )
                end_day = new_push.time
        except models.Push.DoesNotExist:
            return Response(
                f"No base push with revision {base_rev} from repo {base_repo_name}.",
                status=HTTP_400_BAD_REQUEST,
            )

        push_timestamp = self._get_push_timestamp(base_push, new_push)

        base_signatures = self._get_signatures(
            base_repo_name, framework, base_parent_signature, interval, no_subtests
        )

        new_signatures = self._get_signatures(
            new_repo_name, framework, new_parent_signature, interval, no_subtests
        )

        base_perf_data = self._get_perf_data(
            base_repo_name, base_rev, base_signatures, interval, start_day, end_day
        )
        new_perf_data = self._get_perf_data(
            new_repo_name, new_rev, new_signatures, interval, None, None
        )

        option_collection_map = perfcompare_utils.get_option_collection_map()

        (
            base_grouped_job_ids,
            base_grouped_values,
            base_grouped_replicates,
        ) = self._get_grouped_perf_data(base_perf_data)
        (
            new_grouped_job_ids,
            new_grouped_values,
            new_grouped_replicates,
        ) = self._get_grouped_perf_data(new_perf_data)

        statistics_base_grouped_data = base_grouped_values
        statistics_new_grouped_data = new_grouped_values
        if replicates:
            statistics_base_grouped_data = base_grouped_replicates
            statistics_new_grouped_data = new_grouped_replicates

        base_signatures_map, base_header_names, base_platforms = self._get_signatures_map(
            base_signatures, statistics_base_grouped_data, option_collection_map
        )
        new_signatures_map, new_header_names, new_platforms = self._get_signatures_map(
            new_signatures, statistics_new_grouped_data, option_collection_map
        )

        header_names = list(set(base_header_names + new_header_names))
        header_names.sort()
        platforms = set(base_platforms + new_platforms)
        self.queryset = []

        for header in header_names:
            for platform in platforms:
                sig_identifier = perfcompare_utils.get_sig_identifier(header, platform)
                base_sig = base_signatures_map.get(sig_identifier, {})
                base_sig_id = base_sig.get("id", None)
                new_sig = new_signatures_map.get(sig_identifier, {})
                new_sig_id = new_sig.get("id", None)
                if base_sig:
                    (
                        extra_options,
                        lower_is_better,
                        option_name,
                        sig_hash,
                        suite,
                        test,
                    ) = self._get_signature_based_properties(base_sig, option_collection_map)
                else:
                    (
                        extra_options,
                        lower_is_better,
                        option_name,
                        sig_hash,
                        suite,
                        test,
                    ) = self._get_signature_based_properties(new_sig, option_collection_map)
                base_perf_data_values = base_grouped_values.get(base_sig_id, [])
                new_perf_data_values = new_grouped_values.get(new_sig_id, [])
                base_perf_data_replicates = base_grouped_replicates.get(base_sig_id, [])
                new_perf_data_replicates = new_grouped_replicates.get(new_sig_id, [])
                statistics_base_perf_data = statistics_base_grouped_data.get(base_sig_id, [])
                statistics_new_perf_data = statistics_new_grouped_data.get(new_sig_id, [])
                base_runs_count = len(statistics_base_perf_data)
                new_runs_count = len(statistics_new_perf_data)
                is_complete = base_runs_count and new_runs_count
                no_results_to_show = not base_runs_count and not new_runs_count
                if no_results_to_show:
                    continue
                base_avg_value = perfcompare_utils.get_avg(statistics_base_perf_data, header)
                base_stddev = perfcompare_utils.get_stddev(statistics_base_perf_data, header)
                base_median_value = perfcompare_utils.get_median(statistics_base_perf_data)
                new_avg_value = perfcompare_utils.get_avg(statistics_new_perf_data, header)
                new_stddev = perfcompare_utils.get_stddev(statistics_new_perf_data, header)
                new_median_value = perfcompare_utils.get_median(statistics_new_perf_data)
                base_stddev_pct = perfcompare_utils.get_stddev_pct(base_avg_value, base_stddev)
                new_stddev_pct = perfcompare_utils.get_stddev_pct(new_avg_value, new_stddev)
                confidence = perfcompare_utils.get_abs_ttest_value(
                    statistics_base_perf_data, statistics_new_perf_data
                )
                confidence_text = perfcompare_utils.get_confidence_text(confidence)
                delta_value = perfcompare_utils.get_delta_value(new_avg_value, base_avg_value)
                delta_percentage = perfcompare_utils.get_delta_percentage(
                    delta_value, base_avg_value
                )
                magnitude = perfcompare_utils.get_magnitude(delta_percentage)
                new_is_better = perfcompare_utils.is_new_better(delta_value, lower_is_better)
                is_confident = perfcompare_utils.is_confident(
                    base_runs_count, new_runs_count, confidence
                )
                more_runs_are_needed = perfcompare_utils.more_runs_are_needed(
                    is_complete, is_confident, base_runs_count
                )
                class_name = perfcompare_utils.get_class_name(
                    new_is_better, base_avg_value, new_avg_value, confidence
                )

                is_improvement = class_name == "success"
                is_regression = class_name == "danger"
                is_meaningful = class_name == ""

                row_result = {
                    "base_rev": base_rev,
                    "new_rev": new_rev,
                    "header_name": header,
                    "platform": platform,
                    "base_app": base_sig.get("application", ""),
                    "new_app": new_sig.get("application", ""),
                    "suite": suite,  # same suite for base_result and new_result
                    "test": test,  # same test for base_result and new_result
                    "is_complete": is_complete,
                    "framework_id": framework,
                    "option_name": option_name,
                    "extra_options": extra_options,
                    "base_repository_name": base_repo_name,
                    "new_repository_name": new_repo_name,
                    "base_measurement_unit": base_sig.get("measurement_unit", ""),
                    "new_measurement_unit": new_sig.get("measurement_unit", ""),
                    "base_runs": sorted(base_perf_data_values),
                    "new_runs": sorted(new_perf_data_values),
                    "base_runs_replicates": sorted(base_perf_data_replicates),
                    "new_runs_replicates": sorted(new_perf_data_replicates),
                    "base_avg_value": base_avg_value,
                    "new_avg_value": new_avg_value,
                    "base_median_value": base_median_value,
                    "new_median_value": new_median_value,
                    "base_stddev": base_stddev,
                    "new_stddev": new_stddev,
                    "base_stddev_pct": base_stddev_pct,
                    "new_stddev_pct": new_stddev_pct,
                    "base_retriggerable_job_ids": base_grouped_job_ids.get(base_sig_id, []),
                    "new_retriggerable_job_ids": new_grouped_job_ids.get(new_sig_id, []),
                    "confidence": confidence,
                    "confidence_text": confidence_text,
                    "delta_value": delta_value,
                    "delta_percentage": delta_percentage,
                    "magnitude": magnitude,
                    "new_is_better": new_is_better,
                    "lower_is_better": lower_is_better,
                    "is_confident": is_confident,
                    "more_runs_are_needed": more_runs_are_needed,
                    # highlighted revisions is the base_revision and the other highlighted revisions is new_revision
                    "graphs_link": self._create_graph_links(
                        base_repo_name,
                        new_repo_name,
                        base_rev,
                        new_rev,
                        str(framework),
                        push_timestamp,
                        str(sig_hash),
                    ),
                    "is_improvement": is_improvement,
                    "is_regression": is_regression,
                    "is_meaningful": is_meaningful,
                    "base_parent_signature": base_sig.get("parent_signature_id", None),
                    "new_parent_signature": new_sig.get("parent_signature_id", None),
                    "base_signature_id": base_sig_id,
                    "new_signature_id": new_sig_id,
                    "has_subtests": (
                        base_sig.get("has_subtests", None) or new_sig.get("has_subtests", None)
                    ),
                }
                self.queryset.append(row_result)

        serializer = self.get_serializer(self.queryset, many=True)
        serialized_data = serializer.data

        return Response(data=serialized_data)