in src/chat_request.py [0:0]
def get_response(question, chat_history):
print("inputs:", question)
embedding = get_embedding(question)
context = get_context(question, embedding)
print("context:", context)
print("getting result...")
deployment_name = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"]
configuration = AzureOpenAIModelConfiguration(
azure_deployment=deployment_name,
api_version=azure_config.aoai_api_version,
azure_endpoint=azure_config.aoai_endpoint
)
override_model = {
"configuration": configuration,
"parameters": {"max_tokens": 512}
}
data_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "./chat.prompty")
prompty_obj = Prompty.load(data_path, model=override_model)
result = prompty_obj(question=question, documents=context)
print("result: ", result)
return {"answer": result, "context": context}