void fillLatency()

in src/envoy/http/service_control/handler_utils.cc [184:233]


void fillLatency(const Envoy::StreamInfo::StreamInfo& stream_info,
                 LatencyInfo& latency,
                 ServiceControlFilterStats& filter_stats) {
  if (stream_info.requestComplete()) {
    latency.request_time_ms =
        convertNsToMs(stream_info.requestComplete().value());
  }

  Envoy::StreamInfo::TimingUtility timing(stream_info);
  auto start = timing.firstUpstreamTxByteSent();
  auto end = timing.lastUpstreamRxByteReceived();
  if (start && end) {
    ENVOY_BUG(end.value() >= start.value(), "End time < start time");
    latency.backend_time_ms = convertNsToMs(end.value() - start.value());
  } else if (!start) {
    // The request is rejected at a filter (does not
    // reach backend).
    latency.backend_time_ms = 0;
  }

  if (latency.request_time_ms >= 0) {
    if (latency.backend_time_ms >= 0) {
      // Calculation based on backend response timing.
      latency.overhead_time_ms =
          latency.request_time_ms - latency.backend_time_ms;
    } else if (start) {
      // Less accurate calculation based on overhead timing.
      latency.overhead_time_ms = convertNsToMs(start.value());
      latency.backend_time_ms =
          latency.request_time_ms - latency.overhead_time_ms;
    }
  }

  // FIXME(https://github.com/envoyproxy/envoy/issues/16684): When stats are
  // reported correctly, remove this hack. It results in under-reporting
  // overhead latency and over-reporting backend latency.
  if (!start && stream_info.responseCodeDetails() &&
      (stream_info.responseCodeDetails().value() ==
           Envoy::StreamInfo::ResponseCodeDetails::get().ResponseTimeout ||
       stream_info.responseCodeDetails().value() ==
           Envoy::StreamInfo::ResponseCodeDetails::get()
               .UpstreamPerTryTimeout)) {
    latency.backend_time_ms = latency.overhead_time_ms;
    latency.overhead_time_ms = 0;
  }

  filter_stats.filter_.request_time_.recordValue(latency.request_time_ms);
  filter_stats.filter_.backend_time_.recordValue(latency.backend_time_ms);
  filter_stats.filter_.overhead_time_.recordValue(latency.overhead_time_ms);
}