in src/smolagents/gradio_ui.py [0:0]
def _process_action_step(step_log: ActionStep, skip_model_outputs: bool = False) -> Generator:
"""
Process an [`ActionStep`] and yield appropriate Gradio ChatMessage objects.
Args:
step_log ([`ActionStep`]): ActionStep to process.
skip_model_outputs (`bool`): Whether to skip model outputs.
Yields:
`gradio.ChatMessage`: Gradio ChatMessages representing the action step.
"""
import gradio as gr
# Output the step number
step_number = f"Step {step_log.step_number}"
if not skip_model_outputs:
yield gr.ChatMessage(role=MessageRole.ASSISTANT, content=f"**{step_number}**", metadata={"status": "done"})
# First yield the thought/reasoning from the LLM
if not skip_model_outputs and getattr(step_log, "model_output", ""):
model_output = _clean_model_output(step_log.model_output)
yield gr.ChatMessage(role=MessageRole.ASSISTANT, content=model_output, metadata={"status": "done"})
# For tool calls, create a parent message
if getattr(step_log, "tool_calls", []):
first_tool_call = step_log.tool_calls[0]
used_code = first_tool_call.name == "python_interpreter"
# Process arguments based on type
args = first_tool_call.arguments
if isinstance(args, dict):
content = str(args.get("answer", str(args)))
else:
content = str(args).strip()
# Format code content if needed
if used_code:
content = _format_code_content(content)
# Create the tool call message
parent_message_tool = gr.ChatMessage(
role=MessageRole.ASSISTANT,
content=content,
metadata={
"title": f"🛠️ Used tool {first_tool_call.name}",
"status": "done",
},
)
yield parent_message_tool
# Display execution logs if they exist
if getattr(step_log, "observations", "") and step_log.observations.strip():
log_content = step_log.observations.strip()
if log_content:
log_content = re.sub(r"^Execution logs:\s*", "", log_content)
yield gr.ChatMessage(
role=MessageRole.ASSISTANT,
content=f"```bash\n{log_content}\n",
metadata={"title": "📝 Execution Logs", "status": "done"},
)
# Display any images in observations
if getattr(step_log, "observations_images", []):
for image in step_log.observations_images:
path_image = AgentImage(image).to_string()
yield gr.ChatMessage(
role=MessageRole.ASSISTANT,
content={"path": path_image, "mime_type": f"image/{path_image.split('.')[-1]}"},
metadata={"title": "🖼️ Output Image", "status": "done"},
)
# Handle errors
if getattr(step_log, "error", None):
yield gr.ChatMessage(
role=MessageRole.ASSISTANT, content=str(step_log.error), metadata={"title": "💥 Error", "status": "done"}
)
# Add step footnote and separator
yield gr.ChatMessage(
role=MessageRole.ASSISTANT,
content=get_step_footnote_content(step_log, step_number),
metadata={"status": "done"},
)
yield gr.ChatMessage(role=MessageRole.ASSISTANT, content="-----", metadata={"status": "done"})