in src/chat_request.py [0:0]
def get_response(question, chat_history):
print("inputs:", question)
embedding = get_embedding(question)
context = get_context(question, embedding)
print("context:", context)
print("getting result...")
configuration = AzureOpenAIModelConfiguration(
azure_deployment=os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT", ""),
api_version=os.getenv("AZURE_OPENAI_API_VERSION", ""),
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT", "")
)
override_model = {
"configuration": configuration,
"parameters": {"max_tokens": 512}
}
data_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "./chat.prompty")
prompty_obj = Prompty.load(data_path, model=override_model)
result = prompty_obj(question = question, documents = context)
print("result: ", result)
return {"answer": result, "context": context}