in ml/eval/bt.py [0:0]
def bradley_terry_comparison(old_rewards, new_rewards):
"""
Perform Bradley-Terry comparison between two sets of model generations.
Args:
old_rewards (list): List of dictionaries for the OLD model's generations and rewards.
new_rewards (list): List of dictionaries for the NEW model's generations and rewards.
Returns:
list: Comparison results including preferred outputs and probabilities.
dict: Metrics summary including percentage preferred and average probabilities.
"""
results = []
new_preferred_count = 0
old_preferred_count = 0
probabilities = []
for ix in range(len(old_rewards)):
old = old_rewards[ix]
new = new_rewards[ix]
# Ensure prompts match
assert old['prompt'] == new['prompt'], f"ERROR: Prompts at index {ix} do not match."
# Compute Bradley-Terry probability
new_reward = torch.tensor(old['reward'], dtype=torch.float32)
old_reward = torch.tensor(new['reward'], dtype=torch.float32)
prob_new_preferred = torch.sigmoid(new_reward - old_reward).item()
probabilities.append(prob_new_preferred)
preferred_model = 'new' if prob_new_preferred > 0.5 else 'old'
# Count preferences
if preferred_model == 'new':
new_preferred_count += 1
else:
old_preferred_count += 1
# Log results
bt_result = {
'prompt': old['prompt'],
'old_output': old['output'],
'new_output': new['output'],
'old_reward': old['reward'],
'new_reward': new['reward'],
'preferred': preferred_model,
'prob_new_preferred': prob_new_preferred
}
results.append(bt_result)
# Calculate metrics
total_examples = len(old_rewards)
metrics = {
'total_examples': total_examples,
'new_preferred_percentage': 100 * new_preferred_count / total_examples,
'old_preferred_percentage': 100 * old_preferred_count / total_examples,
'avg_probability_new_preferred': sum(probabilities) / total_examples
}
return results, metrics