in use-cases/model-fine-tuning-pipeline/data-preparation/gemma-it/src/dataprep.py [0:0]
def generate_content(context):
try:
max_tokens = 200
temperature = 0.7
sys_prompt = "This is dialogue for online shopping experiences between an agent and a user."
prompt = f"Generate {num_questions} Search Queries in conversational tone and Answers for this product:\n{context}. Return the result without any formatting in a single line as Question : Answer ;"
messages = [
{
"role": "system",
"content": [
{"text": sys_prompt, "type": "text"},
],
},
{
"role": "user",
"content": [
{"text": prompt, "type": "text"},
],
},
]
logger.debug(messages)
response = client.chat.completions.create(
model=MODEL_ID,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
logger.debug(response)
except openai.BadRequestError as e:
# Handle error 400
logger.error(f"Error 400: {e}")
raise
except openai.AuthenticationError as e:
# Handle error 401
logger.error(f"Error 401: {e}")
raise
except openai.PermissionDeniedError as e:
# Handle error 403
logger.error(f"Error 403: {e}")
raise
except openai.NotFoundError as e:
# Handle error 404
logger.error(f"Error 404: {e}")
raise
except openai.UnprocessableEntityError as e:
# Handle error 422
logger.error(f"Error 422: {e}")
raise
except openai.RateLimitError as e:
# Handle error 429
logger.error(f"Error 429: {e}")
logger.error("A 429 status code was received; we should back off a bit.")
raise
except openai.InternalServerError as e:
# Handle error >=500
logger.error(f"Error >=500: {e}")
raise
except openai.APIConnectionError as e:
# Handle API connection error
logger.error("The server could not be reached")
logger.error(
e.__cause__
) # an underlying Exception, likely raised within httpx.
raise
except openai.APIStatusError as e:
logger.error("Another non-200-range status code was received")
logger.error(e.status_code)
logger.error(e.response)
raise
return response.choices[0].message.content