in src/dispatch/ai/service.py [0:0]
def generate_case_signal_summary(case: Case, db_session: Session) -> dict[str, str]:
"""
Generate an analysis summary of a case stemming from a signal.
Args:
case (Case): The case object for which the analysis summary is being generated.
db_session (Session): The database session used for querying related data.
Returns:
dict: A dictionary containing the analysis summary, or an error message if the summary generation fails.
"""
# we generate the historical context
try:
historical_context = generate_case_signal_historical_context(
case=case, db_session=db_session
)
except GenAIException as e:
log.warning(f"Error generating GenAI historical context for {case.name}: {str(e)}")
raise e
# we fetch the artificial intelligence plugin
genai_plugin = plugin_service.get_active_instance(
db_session=db_session, project_id=case.project.id, plugin_type="artificial-intelligence"
)
# we check if the artificial intelligence plugin is enabled
if not genai_plugin:
message = (
"Unable to generate GenAI signal analysis. No artificial-intelligence plugin enabled."
)
log.warning(message)
raise GenAIException(message)
# we fetch the first instance id and signal
(first_instance_id, first_instance_signal) = signal_service.get_instances_in_case(
db_session=db_session, case_id=case.id
).first()
signal_instance = signal_service.get_signal_instance(
db_session=db_session, signal_instance_id=first_instance_id
)
# Check if the signal instance is valid
if not signal_instance:
message = "Unable to generate GenAI signal analysis. Signal instance not found."
log.warning(message)
raise GenAIException(message)
# Check if the signal is valid
if not signal_instance.signal:
message = "Unable to generate GenAI signal analysis. Signal not found."
log.warning(message)
raise GenAIException(message)
# Check if GenAI is enabled for the signal
if not signal_instance.signal.genai_enabled:
message = f"Unable to generate GenAI signal analysis. GenAI feature not enabled for {signal_instance.signal.name}."
log.warning(message)
raise GenAIException(message)
# we check if the signal has a prompt defined
if not signal_instance.signal.genai_prompt:
message = f"Unable to generate GenAI signal analysis. No GenAI prompt defined for {signal_instance.signal.name}."
log.warning(message)
raise GenAIException(message)
# we generate the prompt
prompt = f"""
<prompt>
{signal_instance.signal.genai_prompt}
</prompt>
<current_event>
{str(signal_instance.raw)}
</current_event>
<runbook>
{signal_instance.signal.runbook}
</runbook>
<historical_context>
{historical_context}
</historical_context>
"""
tokenized_prompt, num_tokens, encoding = num_tokens_from_string(
prompt, genai_plugin.instance.configuration.chat_completion_model
)
# we check if the prompt exceeds the token limit
model_token_limit = get_model_token_limit(
genai_plugin.instance.configuration.chat_completion_model
)
if num_tokens > model_token_limit:
prompt = truncate_prompt(tokenized_prompt, num_tokens, encoding, model_token_limit)
# we generate the analysis
response = genai_plugin.instance.chat_completion(prompt=prompt)
try:
summary = json.loads(response.replace("```json", "").replace("```", "").strip())
# we check if the summary is empty
if not summary:
message = "Unable to generate GenAI signal analysis. We received an empty response from the artificial-intelligence plugin."
log.warning(message)
raise GenAIException(message)
return summary
except json.JSONDecodeError as e:
message = "Unable to generate GenAI signal analysis. Error decoding response from the artificial-intelligence plugin."
log.warning(message)
raise GenAIException(message) from e