in ml/eval/alpaca.py [0:0]
def judge_responses(response1, response2, prompt):
"""
Use OpenAI GPT-4 API to judge two model responses.
Returns: "A" if response1 is better, "B" if response2 is better, or "tie".
"""
openai.api_key = os.getenv("OPENAI_API_KEY")
prompt_text = f"""
Given the user prompt: "{prompt}"
Response A: "{response1}"
Response B: "{response2}"
Which response is better? Reply with 'A', 'B', or 'tie'.
"""
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "system", "content": "You are an expert evaluator."},
{"role": "user", "content": prompt_text}],
max_tokens=5
)
result = response["choices"][0]["message"]["content"].strip().lower()
return result if result in ["a", "b", "tie"] else "tie"
except Exception as e:
print(f"Error in OpenAI API call: {e}")
return "tie"