def __call__()

in esrally/metrics.py [0:0]


    def __call__(self):
        result = GlobalStats()

        for tasks in self.challenge.schedule:
            for task in tasks:
                t = task.name
                op_type = task.operation.type
                error_rate = self.error_rate(t, op_type)
                duration = self.duration(t)
                if task.operation.include_in_reporting or error_rate > 0:
                    self.logger.debug("Gathering request metrics for [%s].", t)
                    result.add_op_metrics(
                        t,
                        task.operation.name,
                        self.summary_stats("throughput", t, op_type),
                        self.single_latency(t, op_type),
                        self.single_latency(t, op_type, metric_name="service_time"),
                        self.single_latency(t, op_type, metric_name="processing_time"),
                        error_rate,
                        duration,
                        self.merge(self.track.meta_data, self.challenge.meta_data, task.operation.meta_data, task.meta_data),
                    )
        self.logger.debug("Gathering indexing metrics.")
        result.total_time = self.sum("indexing_total_time")
        result.total_time_per_shard = self.shard_stats("indexing_total_time")
        result.indexing_throttle_time = self.sum("indexing_throttle_time")
        result.indexing_throttle_time_per_shard = self.shard_stats("indexing_throttle_time")
        result.merge_time = self.sum("merges_total_time")
        result.merge_time_per_shard = self.shard_stats("merges_total_time")
        result.merge_count = self.sum("merges_total_count")
        result.refresh_time = self.sum("refresh_total_time")
        result.refresh_time_per_shard = self.shard_stats("refresh_total_time")
        result.refresh_count = self.sum("refresh_total_count")
        result.flush_time = self.sum("flush_total_time")
        result.flush_time_per_shard = self.shard_stats("flush_total_time")
        result.flush_count = self.sum("flush_total_count")
        result.merge_throttle_time = self.sum("merges_total_throttled_time")
        result.merge_throttle_time_per_shard = self.shard_stats("merges_total_throttled_time")

        self.logger.debug("Gathering ML max processing times.")
        result.ml_processing_time = self.ml_processing_time_stats()

        self.logger.debug("Gathering garbage collection metrics.")
        result.young_gc_time = self.sum("node_total_young_gen_gc_time")
        result.young_gc_count = self.sum("node_total_young_gen_gc_count")
        result.old_gc_time = self.sum("node_total_old_gen_gc_time")
        result.old_gc_count = self.sum("node_total_old_gen_gc_count")
        result.zgc_cycles_gc_time = self.sum("node_total_zgc_cycles_gc_time")
        result.zgc_cycles_gc_count = self.sum("node_total_zgc_cycles_gc_count")
        result.zgc_pauses_gc_time = self.sum("node_total_zgc_pauses_gc_time")
        result.zgc_pauses_gc_count = self.sum("node_total_zgc_pauses_gc_count")

        self.logger.debug("Gathering segment memory metrics.")
        result.memory_segments = self.median("segments_memory_in_bytes")
        result.memory_doc_values = self.median("segments_doc_values_memory_in_bytes")
        result.memory_terms = self.median("segments_terms_memory_in_bytes")
        result.memory_norms = self.median("segments_norms_memory_in_bytes")
        result.memory_points = self.median("segments_points_memory_in_bytes")
        result.memory_stored_fields = self.median("segments_stored_fields_memory_in_bytes")
        result.dataset_size = self.sum("dataset_size_in_bytes")
        result.store_size = self.sum("store_size_in_bytes")
        result.translog_size = self.sum("translog_size_in_bytes")

        # convert to int, fraction counts are senseless
        median_segment_count = self.median("segments_count")
        result.segment_count = int(median_segment_count) if median_segment_count is not None else median_segment_count

        self.logger.debug("Gathering transform processing times.")
        result.total_transform_processing_times = self.total_transform_metric("total_transform_processing_time")
        result.total_transform_index_times = self.total_transform_metric("total_transform_index_time")
        result.total_transform_search_times = self.total_transform_metric("total_transform_search_time")
        result.total_transform_throughput = self.total_transform_metric("total_transform_throughput")

        self.logger.debug("Gathering Ingest Pipeline metrics.")
        result.ingest_pipeline_cluster_count = self.sum("ingest_pipeline_cluster_count")
        result.ingest_pipeline_cluster_time = self.sum("ingest_pipeline_cluster_time")
        result.ingest_pipeline_cluster_failed = self.sum("ingest_pipeline_cluster_failed")

        self.logger.debug("Gathering disk usage metrics.")
        result.disk_usage_total = self.disk_usage("disk_usage_total")
        result.disk_usage_inverted_index = self.disk_usage("disk_usage_inverted_index")
        result.disk_usage_stored_fields = self.disk_usage("disk_usage_stored_fields")
        result.disk_usage_doc_values = self.disk_usage("disk_usage_doc_values")
        result.disk_usage_points = self.disk_usage("disk_usage_points")
        result.disk_usage_norms = self.disk_usage("disk_usage_norms")
        result.disk_usage_term_vectors = self.disk_usage("disk_usage_term_vectors")

        return result