def _chat_completion_wrapper()

in instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/__init__.py [0:0]


    def _chat_completion_wrapper(self, wrapped, instance, args, kwargs):
        logger.debug(f"{wrapped} kwargs: {kwargs}")

        span_attributes = _get_attributes_from_wrapper(instance, kwargs)
        event_attributes = _get_event_attributes()

        span_name = _span_name_from_attributes(span_attributes)
        with self.tracer.start_as_current_span(
            name=span_name,
            kind=SpanKind.CLIENT,
            attributes=span_attributes,
            # this is important to avoid having the span closed before ending the stream
            end_on_exit=False,
        ) as span:
            messages = kwargs.get("messages", [])
            _send_log_events_from_messages(
                self.event_logger,
                messages=messages,
                attributes=event_attributes,
                capture_message_content=self.capture_message_content,
            )

            start_time = default_timer()
            try:
                result = wrapped(*args, **kwargs)
            except Exception as exc:
                span.set_status(StatusCode.ERROR, str(exc))
                span.set_attribute(ERROR_TYPE, exc.__class__.__qualname__)
                span.end()
                error_attributes = {**span_attributes, ERROR_TYPE: exc.__class__.__qualname__}
                _record_operation_duration_metric(self.operation_duration_metric, error_attributes, start_time)
                raise

            if kwargs.get("stream"):
                return StreamWrapper(
                    stream=result,
                    span=span,
                    span_attributes=span_attributes,
                    capture_message_content=self.capture_message_content,
                    event_attributes=event_attributes,
                    event_logger=self.event_logger,
                    start_time=start_time,
                    token_usage_metric=self.token_usage_metric,
                    operation_duration_metric=self.operation_duration_metric,
                )

            logger.debug(f"openai.resources.chat.completions.Completions.create result: {result}")

            # if the caller is using with_raw_response we need to parse the output to get the response class we expect
            is_raw_response = _is_raw_response(result)
            if is_raw_response:
                result = result.parse()
            response_attributes = _get_attributes_from_response(
                result.id, result.model, result.choices, result.usage, getattr(result, "service_tier", None)
            )
            if span.is_recording():
                for k, v in response_attributes.items():
                    span.set_attribute(k, v)

            metrics_attributes = {**span_attributes, **response_attributes}
            _record_token_usage_metrics(self.token_usage_metric, metrics_attributes, result.usage)
            _record_operation_duration_metric(self.operation_duration_metric, metrics_attributes, start_time)

            _send_log_events_from_choices(
                self.event_logger,
                choices=result.choices,
                attributes=event_attributes,
                capture_message_content=self.capture_message_content,
            )

            span.end()

            return result