in devai-cli/src/devai/commands/prompts.py [0:0]
def execute_prompt(path: str, input: Optional[str], output_format: Optional[str]):
"""Execute a prompt template with Gemini."""
prompt_file, is_user_override = find_prompt_file(path)
if not prompt_file.exists():
click.echo(f"Error: Prompt template not found at {path}", err=True)
return
try:
# Load the prompt template
with open(prompt_file, 'r') as f:
template = yaml.safe_load(f)
# Get configuration
config = template.get('configuration', {})
if output_format:
config['output_format'] = output_format
# Get the prompt sections
prompt_data = template.get('prompt', {})
system_context = prompt_data.get('system_context', '')
instruction = prompt_data.get('instruction', '')
# Build the full prompt
full_prompt = f"{system_context}\n\n{instruction}"
if input:
full_prompt += f"\n\nInput:\n{input}"
# Initialize Gemini with telemetry
client_info = ClientInfo(user_agent=USER_AGENT)
with telemetry.tool_context_manager(USER_AGENT):
model = GenerativeModel(MODEL_NAME)
# Generate response
response = model.generate_content(
full_prompt,
generation_config={
'temperature': config.get('temperature', 0.7),
'max_output_tokens': config.get('max_tokens', 1024),
}
)
# Format and display the response
if config.get('output_format') == 'json':
try:
import json
# Try to parse the response as JSON if it looks like JSON
if response.text.strip().startswith('{') or response.text.strip().startswith('['):
json_response = json.loads(response.text)
click.echo(json.dumps(json_response, indent=2))
else:
click.echo(response.text)
except json.JSONDecodeError:
click.echo(response.text)
else:
click.echo(response.text)
except Exception as e:
click.echo(f"Error executing prompt: {str(e)}", err=True)