in osbenchmark/metrics.py [0:0]
def __call__(self):
result = GlobalStats()
for tasks in self.test_procedure.schedule:
for task in tasks:
t = task.name
op_type = task.operation.type
error_rate = self.error_rate(t, op_type)
duration = self.duration(t)
if task.operation.include_in_results_publishing or error_rate > 0:
self.logger.debug("Gathering request metrics for [%s].", t)
result.add_op_metrics(
t,
task.operation.name,
self.summary_stats("throughput", t, op_type),
self.single_latency(t, op_type),
self.single_latency(t, op_type, metric_name="service_time"),
self.single_latency(t, op_type, metric_name="processing_time"),
error_rate,
duration,
self.merge(
self.workload.meta_data,
self.test_procedure.meta_data,
task.operation.meta_data,
task.meta_data)
)
self.logger.debug("Gathering indexing metrics.")
result.total_time = self.sum("indexing_total_time")
result.total_time_per_shard = self.shard_stats("indexing_total_time")
result.indexing_throttle_time = self.sum("indexing_throttle_time")
result.indexing_throttle_time_per_shard = self.shard_stats("indexing_throttle_time")
result.merge_time = self.sum("merges_total_time")
result.merge_time_per_shard = self.shard_stats("merges_total_time")
result.merge_count = self.sum("merges_total_count")
result.refresh_time = self.sum("refresh_total_time")
result.refresh_time_per_shard = self.shard_stats("refresh_total_time")
result.refresh_count = self.sum("refresh_total_count")
result.flush_time = self.sum("flush_total_time")
result.flush_time_per_shard = self.shard_stats("flush_total_time")
result.flush_count = self.sum("flush_total_count")
result.merge_throttle_time = self.sum("merges_total_throttled_time")
result.merge_throttle_time_per_shard = self.shard_stats("merges_total_throttled_time")
self.logger.debug("Gathering ML max processing times.")
result.ml_processing_time = self.ml_processing_time_stats()
self.logger.debug("Gathering garbage collection metrics.")
result.young_gc_time = self.sum("node_total_young_gen_gc_time")
result.young_gc_count = self.sum("node_total_young_gen_gc_count")
result.old_gc_time = self.sum("node_total_old_gen_gc_time")
result.old_gc_count = self.sum("node_total_old_gen_gc_count")
self.logger.debug("Gathering segment memory metrics.")
result.memory_segments = self.median("segments_memory_in_bytes")
result.memory_doc_values = self.median("segments_doc_values_memory_in_bytes")
result.memory_terms = self.median("segments_terms_memory_in_bytes")
result.memory_norms = self.median("segments_norms_memory_in_bytes")
result.memory_points = self.median("segments_points_memory_in_bytes")
result.memory_stored_fields = self.median("segments_stored_fields_memory_in_bytes")
result.store_size = self.sum("store_size_in_bytes")
result.translog_size = self.sum("translog_size_in_bytes")
# convert to int, fraction counts are senseless
median_segment_count = self.median("segments_count")
result.segment_count = int(median_segment_count) if median_segment_count is not None else median_segment_count
self.logger.debug("Gathering transform processing times.")
result.total_transform_processing_times = self.total_transform_metric("total_transform_processing_time")
result.total_transform_index_times = self.total_transform_metric("total_transform_index_time")
result.total_transform_search_times = self.total_transform_metric("total_transform_search_time")
result.total_transform_throughput = self.total_transform_metric("total_transform_throughput")
return result