in bot/code/state_management_bot.py [0:0]
def call_llm(self, question, chat_history = []):
data = dict()
data["question"] = question
data["chat_history"] = chat_history
data["categories"] = self.categories
data["organization_urls"] = self.organization_urls
data["organization"] = self.organization
body = str.encode(json.dumps(data))
# Replace this with the primary/secondary key or AMLToken for the endpoint
api_key = self.llm_api_key
if not api_key:
raise Exception("A key should be provided to invoke the endpoint")
# The azureml-model-deployment header will force the request to go to a specific deployment.
# Remove this header to have the request observe the endpoint traffic rules
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key), 'azureml-model-deployment': 'chat-with-website-1' }
req = urllib.request.Request(self.llm_endpoint, body, headers)
try:
response = urllib.request.urlopen(req)
result = response.read()
# print(result)
return result
except urllib.error.HTTPError as error:
print("The request failed with status code: " + str(error.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(error.info())
print(error.read().decode("utf8", 'ignore'))