in agora/cerebral_api/src/app.py [0:0]
def handle_process_question(data):
"""Handle process_question via WebSocket with streaming response and special cases"""
request_id = str(uuid.uuid4())
try:
logger.info(f"[{request_id}] Processing question request")
# Extract data
question = data.get('question')
industry = data.get('industry')
role = data.get('role')
session_id = request.sid # Get the Socket.IO session ID
if VERBOSE:
logger.debug(f"[{request_id}] Question: {question}")
logger.debug(f"[{request_id}] Industry: {industry}")
logger.debug(f"[{request_id}] Role: {role}")
logger.debug(f"[{request_id}] Session ID: {session_id}")
if not all([question, industry, role]):
logger.warning(f"[{request_id}] Missing required parameters")
emit('error', {'error': 'Question, industry, and role parameters are required'})
return
# Check for greetings or special cases using simple pattern matching
greeting_patterns = [
'hello', 'hi', 'hey', 'greetings', 'good morning', 'good afternoon',
'good evening', 'hola', 'help', 'what can you do'
]
if any(pattern in question.lower() for pattern in greeting_patterns):
logger.info(f"[{request_id}] Detected greeting/help request, using chat_hello")
response = llm.chat_hello(industry, role, question, session_id)
emit('result', {'result': response})
emit('complete')
return
if question.lower() == "$version":
logger.info(f"[{request_id}] Version request, sending API version")
emit('result', {'result': API_VERSION})
emit('complete')
return
# Step 1: Classify the question
logger.info(f"[{request_id}] Classifying question")
category = llm.classify_question(question, industry, role)
logger.info(f"[{request_id}] Question classified as: {category}")
emit('classification', {'category': category})
# Handle unknown category
if category == "unknown":
logger.info(f"[{request_id}] Unknown category, using chat_hello for general response")
response = llm.chat_hello(industry, role, question, session_id)
emit('result', {'result': response})
emit('complete')
return
# Handle greetings category
if category == "greetings":
logger.info(f"[{request_id}] greetings category, using chat_hello for general response")
response = llm.chat_hello(industry, role, question, session_id)
emit('result', {'result': response})
emit('complete')
return
# Handle normal categories
elif category == 'documentation':
if USE_LOCAL_LLM:
if not CHROMA_AVAILABLE:
logger.warning(f"[{request_id}] ChromaDB not available")
emit('error', {'error': 'RAG functionality is currently unavailable'})
return
logger.info(f"[{request_id}] Using local RAG Assistant")
thread = threading.Thread(
target=rag_assistant.generate_response_slm,
args=(question, industry, role, request.sid)
)
thread.daemon = True
thread.start()
else:
logger.info(f"[{request_id}] Using remote Azure OpenAI")
response = llm.chat_llm(question, industry, role)
emit('result', {'result': response})
emit('complete')
elif category == 'data':
logger.info(f"[{request_id}] Processing data query")
influx_query = llm.convert_question_query_influx(question, industry, role)
emit('query', {'query': influx_query})
query_result = influx_handler.execute_query_and_return_data(influx_query)
html_formatted = llm.format_results_to_html(query_result, "influx", industry, role)
recommendations = llm.generate_recommendations(
question, influx_query, query_result, industry, role
)
emit('recommendations', {'recommendations': recommendations})
emit('result', {'result': html_formatted})
emit('complete')
elif category == 'relational':
logger.info(f"[{request_id}] Processing relational query")
sql_query = llm.convert_question_query_sql(question, industry, role)
emit('query', {'query': sql_query})
query_result = sql_handler.test_data(sql_query)
html_formatted = llm.format_results_to_html(query_result, "sql", industry, role)
recommendations = llm.generate_recommendations(
question, sql_query, query_result, industry, role
)
emit('recommendations', {'recommendations': recommendations})
emit('result', {'result': html_formatted})
emit('complete')
except Exception as e:
logger.error(f"[{request_id}] Error in process_question: {str(e)}")
if VERBOSE:
import traceback
logger.debug(f"[{request_id}] Full error traceback:")
logger.debug(traceback.format_exc())
emit('error', {'error': str(e)})