def do_evals()

in MTRF/r3l/r3l/r3l_agents/softlearning/evaluation_scripts/phased_evals_midair_slotted.py [0:0]


def do_evals(seed_dir, checkpoints_to_eval=None):
    print(seed_dir, "\n")
    path = Path(seed_dir)
    checkpoint_dirs = [d for d in glob.glob(str(path / "*")) if 'checkpoint' in d and os.path.isdir(d)]
    checkpoint_dirs.sort(key=lambda s: int(s.split("_")[-1]))
    if checkpoints_to_eval is not None:
        checkpoint_dirs = [d for d in checkpoint_dirs if int(d.split("_")[-1]) in checkpoints_to_eval]

    N_EVAL_EPISODES = 5
    T = 125
    EVAL_EVERY_N = 2
    should_save_video = True

    env = GymAdapter(
        "SawyerDhandInHandValve3", "RepositionMidairSlottedResetFree-v0",
        reset_every_n_episodes=1,
    )
    env.reset()

    success_rates = []
    ckpt_numbers = []
    obs_dicts_per_policy = []
    rew_dicts_per_policy = []
    returns_per_policy = []
    for ckpt_dir in reversed(checkpoint_dirs[::EVAL_EVERY_N]):
        ckpt_number = ckpt_dir.split("_")[-1]

        print("EVALUATING CHECKPOINT: ", ckpt_number)

        policy = load_policy_from_checkpoint(ckpt_dir, env)

        successes = []
        obs_dicts = []
        rew_dicts = []
        returns = []
        frames = []

        for ep in range(N_EVAL_EPISODES):
            env.reset()

            ret = 0
            for t in range(T):
                _, rew, done, info = env.step(policy(env.active_env.get_obs_dict()))
                if should_save_video:
                    frames.append(env.render(mode="rgb_array", width=480, height=480))
                ret += rew

            obs_dict = env.get_obs_dict()
            rew_dict = env.get_reward_dict(None, obs_dict)
            success = obs_dict["object_to_target_xyz_distance"] < 0.2
            successes.append(success)
            returns.append(ret)
            obs_dicts.append(obs_dict)
            rew_dicts.append(rew_dict)

        if should_save_video:
            save_video(f"./{ckpt_number}.mp4", np.asarray(frames))

        ckpt_numbers.append(ckpt_number)
        success_rate = np.array(successes).astype(int).mean()
        print("success % = ", success_rate)
        success_rates.append(success_rate)
        obs_dicts_per_policy.append(obs_dicts)
        rew_dicts_per_policy.append(rew_dicts)
        returns_per_policy.append(np.mean(returns))
        break

    return {
        "iters": ckpt_numbers,
        "success": success_rates,
        "obs": obs_dicts_per_policy,
        "rew": rew_dicts_per_policy,
        "returns": returns_per_policy,
    }