def call_pipe()

in api_inference_community/routes.py [0:0]


def call_pipe(pipe: Any, inputs, params: Dict, start: float, accept: str) -> Response:
    root_logger = logging.getLogger()
    warnings = set()

    class RequestsHandler(logging.Handler):
        def emit(self, record):
            """Send the log records (created by loggers) to
            the appropriate destination.
            """
            warnings.add(record.getMessage())

    handler = RequestsHandler()
    handler.setLevel(logging.WARNING)
    root_logger.addHandler(handler)
    for _logger in logging.root.manager.loggerDict.values():  # type: ignore
        try:
            _logger.addHandler(handler)
        except Exception:
            pass

    status_code = 200
    if os.getenv("DEBUG", "0") in {"1", "true"}:
        outputs = pipe(inputs, **params)
    try:
        outputs = pipe(inputs, **params)
        task = os.getenv("TASK")
        metrics = get_metric(inputs, task, pipe)
    except (AssertionError, ValueError, TypeError) as e:
        outputs = {"error": str(e)}
        status_code = 400
    except Exception as e:
        outputs = {"error": "unknown error"}
        status_code = 500
        logger.error(f"There was an inference error: {e}")
        logger.exception(e)

    if warnings and isinstance(outputs, dict):
        outputs["warnings"] = list(sorted(warnings))

    compute_type = COMPUTE_TYPE
    headers = {
        HF_HEADER_COMPUTE_TIME: "{:.3f}".format(time.time() - start),
        HF_HEADER_COMPUTE_TYPE: compute_type,
        # https://stackoverflow.com/questions/43344819/reading-response-headers-with-fetch-api/44816592#44816592
        "access-control-expose-headers": f"{HF_HEADER_COMPUTE_TYPE}, {HF_HEADER_COMPUTE_TIME}",
    }

    if status_code == 200:
        headers.update(**{k: str(v) for k, v in metrics.items()})
        task = os.getenv("TASK")
        if task == "text-to-speech":
            waveform, sampling_rate = outputs
            audio_format = parse_accept(accept, AUDIO)
            data = ffmpeg_convert(waveform, sampling_rate, audio_format)
            headers["content-type"] = f"audio/{audio_format}"
            return Response(data, headers=headers, status_code=status_code)
        elif task == "audio-to-audio":
            waveforms, sampling_rate, labels = outputs
            items = []
            headers["content-type"] = "application/json"

            audio_format = parse_accept(accept, AUDIO)

            for waveform, label in zip(waveforms, labels):
                data = ffmpeg_convert(waveform, sampling_rate, audio_format)
                items.append(
                    {
                        "label": label,
                        "blob": base64.b64encode(data).decode("utf-8"),
                        "content-type": f"audio/{audio_format}",
                    }
                )
            return JSONResponse(items, headers=headers, status_code=status_code)
        elif task in IMAGE_OUTPUTS:
            image = outputs
            image_format = parse_accept(accept, IMAGE)
            buffer = io.BytesIO()
            image.save(buffer, format=image_format.upper())
            buffer.seek(0)
            img_bytes = buffer.read()
            return Response(
                img_bytes,
                headers=headers,
                status_code=200,
                media_type=f"image/{image_format}",
            )

    return JSONResponse(
        outputs,
        headers=headers,
        status_code=status_code,
    )