in packages/blueprints/gen-ai-chatbot/static-assets/chatbot-genai-components/backend/python/app/bedrock.py [0:0]
def get_bedrock_response(args: dict) -> dict:
client = get_bedrock_client()
messages = args["messages"]
prompt = "\n".join(
[
message["content"][0]["text"]
for message in messages
if message["content"][0]["type"] == "text"
]
)
model_id = args["model"]
is_mistral_model = model_id.startswith("mistral")
if is_mistral_model:
prompt = f"<s>[INST] {prompt} [/INST]"
logger.info(f"Final Prompt: {prompt}")
body = json.dumps(
{
"prompt": prompt,
"max_tokens": args["max_tokens"],
"temperature": args["temperature"],
"top_p": args["top_p"],
"top_k": args["top_k"],
}
)
logger.info(f"The args before invoke bedrock: {args}")
if args["stream"]:
try:
response = client.invoke_model_with_response_stream(
modelId=model_id,
body=body,
)
# Ref: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-runtime/client/invoke_model_with_response_stream.html
response_body = response
except Exception as e:
logger.error(e)
else:
response = client.invoke_model(
modelId=model_id,
body=body,
)
# Ref: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-runtime/client/invoke_model.html
response_body = json.loads(response.get("body").read())
invocation_metrics = InvocationMetrics(
input_tokens=response["ResponseMetadata"]["HTTPHeaders"][
"x-amzn-bedrock-input-token-count"
],
output_tokens=response["ResponseMetadata"]["HTTPHeaders"][
"x-amzn-bedrock-output-token-count"
],
)
response_body["amazon-bedrock-invocationMetrics"] = invocation_metrics
return response_body