def run()

in evals/elsuite/bluff/eval.py [0:0]


    def run(self, recorder: evals.record.Recorder) -> dict[str, Union[float, int]]:
        samples = list(range(self.n_samples))
        self.eval_all_samples(recorder, samples)
        metrics = recorder.get_metrics()

        #   1.  Per-round winrate
        winners = [m["wins"] for m in metrics]
        player_0_wins = sum(
            sum(winner == 0 for winner in round_winners) for round_winners in winners
        )
        player_1_wins = sum(
            sum(winner == 1 for winner in round_winners) for round_winners in winners
        )
        round_cnt = player_0_wins + player_1_wins

        #   2.  Per-round-ix winrate (e.g. did we learn from early rounds?)
        #   Note: we don't use self.n_samples because some games might have exceeded the context window length
        num_games = len(metrics)

        player_0_per_round_wins = [0] * self.num_rounds
        player_1_per_round_wins = [0] * self.num_rounds
        for round_winners in winners:
            for round_ix, winner in enumerate(round_winners):
                player_0_per_round_wins[round_ix] += int(winner == 0)
                player_1_per_round_wins[round_ix] += int(winner == 1)

        player_0_per_round_win_ratio = [wins / num_games for wins in player_0_per_round_wins]

        #   3.  Tests for the round_ix winrate changes
        data = pd.DataFrame(
            [
                list(range(self.num_rounds)),
                player_0_per_round_win_ratio,
            ],
            ["round_ix", "wins"],
        ).transpose()

        results = smf.ols("wins ~ round_ix", data=data).fit()
        print(results.summary())

        #   4.  Additional data - how rounds ended
        player_0_bid_won = 0
        player_0_bid_lost = 0
        player_0_called_bluff_won = 0
        player_0_called_bluff_lost = 0
        for game_data in metrics:
            round_data = zip(game_data["wins"], game_data["who_called_bluff"])
            for winner, who_called_bluff in round_data:
                if winner == 0 and who_called_bluff == 0:
                    player_0_called_bluff_won += 1
                elif winner == 0 and who_called_bluff == 1:
                    player_0_bid_won += 1
                elif winner == 1 and who_called_bluff == 0:
                    player_0_called_bluff_lost += 1
                elif winner == 1 and who_called_bluff == 1:
                    player_0_bid_lost += 1

        return {
            "valid_samples": num_games,
            "too_long_games": self.n_samples - num_games,
            "player_0": metrics[0]["player_0"],
            "player_1": metrics[0]["player_1"],
            "player_0_wins": player_0_wins,
            "player_1_wins": player_1_wins,
            "player_0_win_ratio": player_0_wins / round_cnt,
            "player_0_per_round_wins": player_0_per_round_wins,
            "player_1_per_round_wins": player_1_per_round_wins,
            "player_0_round_ix_coef": results.params["round_ix"],
            "player_0_round_ix_pvalue": results.pvalues["round_ix"],
            "player_0_bid_won": player_0_bid_won,
            "player_0_bid_lost": player_0_bid_lost,
            "player_0_called_bluff_won": player_0_called_bluff_won,
            "player_0_called_bluff_lost": player_0_called_bluff_lost,
        }