qlearn/atari/train_bootstrapped_agent.py [195:243]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            num_updates += 1
            k = random.randrange(args.nheads)

            # Update target network.
        if num_iters > args.learning_starts and num_iters % args.target_update_freq == 0:
            # TODO
            agent.update_target_net()

        if start_time is not None:
            steps_per_iter.update(num_iters - start_steps)
            iteration_time_est.update(time.time() - start_time)
        start_time, start_steps = time.time(), num_iters

        if num_iters > args.num_steps:
            break

        if done and num_episodes % args.print_freq == 0 and num_episodes >= args.print_freq:
            steps_left = args.num_steps - num_iters
            completion = np.round(num_iters / args.num_steps, 1)
            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            logger.record_tabular("% completion", completion)
            logger.record_tabular("total steps", num_iters)
            logger.record_tabular("episodes", num_episodes)
            logger.record_tabular("reward (100 epi mean)", mean_100ep_reward)

            fps_estimate = (float(steps_per_iter) / (float(iteration_time_est) + 1e-6)
                            if steps_per_iter._value is not None else "calculating...")
            logger.record_tabular("FPS", fps_estimate)
            logger.dump_tabular()
            logger.log()
            logger.log("ETA: " + pretty_eta(int(steps_left / fps_estimate)))
            logger.log()

            with open(save_dir + '/reward_monitor.csv', "a") as monitor_file:
                monitor = csv.writer(monitor_file)
                monitor.writerow([num_iters, mean_100ep_reward])

            # if len(td_errors_list) > 0:
            #     with open(save_dir + '/error_monitor.csv', "a") as monitor_file:
            #         monitor = csv.writer(monitor_file)
            #         monitor.writerow([num_updates, round(np.mean(td_errors_list), 4)])

            if best_score is None or mean_100ep_reward > best_score:
                logger.log("Saving model due to mean reward increase: {} -> {}".format(
                               best_score, mean_100ep_reward))
                best_score = mean_100ep_reward
                torch.save(agent.online_net.state_dict(), log_dir + '/best_model.torch')

            torch.save(agent.online_net.state_dict(), save_dir + '/current_model.torch')
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



qlearn/atari/train_prior_bootstrapped_agent.py [195:243]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            num_updates += 1
            k = random.randrange(args.nheads)

            # Update target network.
        if num_iters > args.learning_starts and num_iters % args.target_update_freq == 0:
            # TODO
            agent.update_target_net()

        if start_time is not None:
            steps_per_iter.update(num_iters - start_steps)
            iteration_time_est.update(time.time() - start_time)
        start_time, start_steps = time.time(), num_iters

        if num_iters > args.num_steps:
            break

        if done and num_episodes % args.print_freq == 0 and num_episodes >= args.print_freq:
            steps_left = args.num_steps - num_iters
            completion = np.round(num_iters / args.num_steps, 1)
            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            logger.record_tabular("% completion", completion)
            logger.record_tabular("total steps", num_iters)
            logger.record_tabular("episodes", num_episodes)
            logger.record_tabular("reward (100 epi mean)", mean_100ep_reward)

            fps_estimate = (float(steps_per_iter) / (float(iteration_time_est) + 1e-6)
                            if steps_per_iter._value is not None else "calculating...")
            logger.record_tabular("FPS", fps_estimate)
            logger.dump_tabular()
            logger.log()
            logger.log("ETA: " + pretty_eta(int(steps_left / fps_estimate)))
            logger.log()

            with open(save_dir + '/reward_monitor.csv', "a") as monitor_file:
                monitor = csv.writer(monitor_file)
                monitor.writerow([num_iters, mean_100ep_reward])

            # if len(td_errors_list) > 0:
            #     with open(save_dir + '/error_monitor.csv', "a") as monitor_file:
            #         monitor = csv.writer(monitor_file)
            #         monitor.writerow([num_updates, round(np.mean(td_errors_list), 4)])

            if best_score is None or mean_100ep_reward > best_score:
                logger.log("Saving model due to mean reward increase: {} -> {}".format(
                               best_score, mean_100ep_reward))
                best_score = mean_100ep_reward
                torch.save(agent.online_net.state_dict(), log_dir + '/best_model.torch')

            torch.save(agent.online_net.state_dict(), save_dir + '/current_model.torch')
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



