def _AnalyzeAsadmLatencyResults()

in perfkitbenchmarker/traces/asadm.py [0:0]


def _AnalyzeAsadmLatencyResults(metadata, output, samples):
  """Parse asadm result.

  Args:
    metadata: metadata of the sample.
    output: the output of the stress-ng benchmark.
    samples: list of samples to return.
  """
  output_lines = output.splitlines()
  timestamps_in_ms = []
  read_iops = []
  read_lat_1_ms = []
  read_lat_8_ms = []
  read_lat_64_ms = []
  write_iops = []
  write_lat_1_ms = []
  write_lat_8_ms = []
  write_lat_64_ms = []
  for line in output_lines:
    if not line:  # Skip if the line is empty.
      continue
    if re.search(r'\[.*\]', line):
      timestamps_in_ms.append(ParseTimestamp(line))
      continue
    line_split = line.split('|')
    if not line_split or len(line_split) != 7:
      continue
    name = line_split[0].upper().strip()
    op_str = line_split[1].upper().strip()
    op_per_sec_str = line_split[3].strip()
    lat_1_ms_str = line_split[4].strip()
    lat_8_ms_str = line_split[5].strip()
    lat_64_ms_str = line_split[6].strip()
    if name != 'TEST':
      continue
    if op_str == 'READ':
      read_iops.append(float(op_per_sec_str))
      read_lat_1_ms.append(float(lat_1_ms_str))
      read_lat_8_ms.append(float(lat_8_ms_str))
      read_lat_64_ms.append(float(lat_64_ms_str))
    elif op_str == 'WRITE':
      write_iops.append(float(op_per_sec_str))
      write_lat_1_ms.append(float(lat_1_ms_str))
      write_lat_8_ms.append(float(lat_8_ms_str))
      write_lat_64_ms.append(float(lat_64_ms_str))

  effective_read_length = len(timestamps_in_ms)
  effective_write_length = len(timestamps_in_ms)
  if (
      not len(timestamps_in_ms)
      == len(read_iops)
      == len(read_lat_1_ms)
      == len(read_lat_8_ms)
      == len(read_lat_64_ms)
  ):
    logging.warning(
        'Lists are not in the same length: timestamps[%d], read_iops[%d],'
        ' read_lat_1_ms[%d], read_lat_8_ms[%d], read_lat_64_ms[%d],',
        len(timestamps_in_ms),
        len(read_iops),
        len(read_lat_1_ms),
        len(read_lat_8_ms),
        len(read_lat_64_ms),
    )
    effective_read_length = min(
        len(timestamps_in_ms),
        len(read_iops),
        len(read_lat_1_ms),
        len(read_lat_8_ms),
        len(read_lat_64_ms),
    )
  if (
      not len(timestamps_in_ms)
      == len(write_iops)
      == len(write_lat_1_ms)
      == len(write_lat_8_ms)
      == len(write_lat_64_ms)
  ):
    logging.warning(
        'Lists are not in the same length: timestamps[%d], write_iops[%d],'
        ' write_lat_1_ms[%d], write_lat_8_ms[%d], write_lat_64_ms[%d]',
        len(timestamps_in_ms),
        len(write_iops),
        len(write_lat_1_ms),
        len(write_lat_8_ms),
        len(write_lat_64_ms),
    )
    effective_write_length = min(
        len(timestamps_in_ms),
        len(write_iops),
        len(write_lat_1_ms),
        len(write_lat_8_ms),
        len(write_lat_64_ms),
    )
  effective_metric_length = min(effective_read_length, effective_write_length)
  samples.extend([
      sample.CreateTimeSeriesSample(
          values=read_iops[:effective_metric_length],
          timestamps=timestamps_in_ms[:effective_metric_length],
          metric=READ_IOPS_METRIC,
          units='ops/sec',
          interval=metadata['interval'],
      ),
      sample.CreateTimeSeriesSample(
          values=read_lat_1_ms[:effective_metric_length],
          timestamps=timestamps_in_ms[:effective_metric_length],
          metric=READ_LATENCY_OVER_1_MS_METRIC,
          units='%',
          interval=metadata['interval'],
      ),
      sample.CreateTimeSeriesSample(
          values=read_lat_8_ms[:effective_metric_length],
          timestamps=timestamps_in_ms[:effective_metric_length],
          metric=READ_LATENCY_OVER_8_MS_METRIC,
          units='%',
          interval=metadata['interval'],
      ),
      sample.CreateTimeSeriesSample(
          values=read_lat_64_ms[:effective_metric_length],
          timestamps=timestamps_in_ms[:effective_metric_length],
          metric=READ_LATENCY_OVER_64_MS_METRIC,
          units='%',
          interval=metadata['interval'],
      ),
      sample.CreateTimeSeriesSample(
          values=write_iops[:effective_metric_length],
          timestamps=timestamps_in_ms[:effective_metric_length],
          metric=WRITE_IOPS_METRIC,
          units='ops/sec',
          interval=metadata['interval'],
      ),
      sample.CreateTimeSeriesSample(
          values=write_lat_1_ms[:effective_metric_length],
          timestamps=timestamps_in_ms[:effective_metric_length],
          metric=WRITE_LATENCY_OVER_1_MS_METRIC,
          units='%',
          interval=metadata['interval'],
      ),
      sample.CreateTimeSeriesSample(
          values=write_lat_8_ms[:effective_metric_length],
          timestamps=timestamps_in_ms[:effective_metric_length],
          metric=WRITE_LATENCY_OVER_8_MS_METRIC,
          units='%',
          interval=metadata['interval'],
      ),
      sample.CreateTimeSeriesSample(
          values=write_lat_64_ms[:effective_metric_length],
          timestamps=timestamps_in_ms[:effective_metric_length],
          metric=WRITE_LATENCY_OVER_64_MS_METRIC,
          units='%',
          interval=metadata['interval'],
      ),
  ])