in src/smolagents/agents.py [0:0]
def provide_final_answer(self, task: str, images: list["PIL.Image.Image"] | None = None) -> ChatMessage:
"""
Provide the final answer to the task, based on the logs of the agent's interactions.
Args:
task (`str`): Task to perform.
images (`list[PIL.Image.Image]`, *optional*): Image(s) objects.
Returns:
`str`: Final answer to the task.
"""
messages = [
ChatMessage(
role=MessageRole.SYSTEM,
content=[
{
"type": "text",
"text": self.prompt_templates["final_answer"]["pre_messages"],
}
],
)
]
if images:
messages[0].content += [{"type": "image", "image": image} for image in images]
messages += self.write_memory_to_messages()[1:]
messages.append(
ChatMessage(
role=MessageRole.USER,
content=[
{
"type": "text",
"text": populate_template(
self.prompt_templates["final_answer"]["post_messages"], variables={"task": task}
),
}
],
)
)
try:
chat_message: ChatMessage = self.model.generate(messages)
return chat_message
except Exception as e:
return ChatMessage(role=MessageRole.ASSISTANT, content=f"Error in generating final LLM output:\n{e}")