in src/sagemaker_huggingface_inference_toolkit/transformers_utils.py [0:0]
def wrap_conversation_pipeline(pipeline):
def wrapped_pipeline(inputs, *args, **kwargs):
converted_input = Conversation(
inputs["text"],
past_user_inputs=inputs.get("past_user_inputs", []),
generated_responses=inputs.get("generated_responses", []),
)
prediction = pipeline(converted_input, *args, **kwargs)
return {
"generated_text": prediction.generated_responses[-1],
"conversation": {
"past_user_inputs": prediction.past_user_inputs,
"generated_responses": prediction.generated_responses,
},
}
return wrapped_pipeline