qlearn/atari/train_bootstrapped_agent.py [94:139]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    return parser.parse_args()

if __name__ == '__main__':
    args = parse_args()
    if args.run_index is not None:
        args.seed, args.env = RUN_ID[args.run_index]

    print(' ' * 26 + 'Options')
    for k, v in vars(args).items():
        print(' ' * 26 + k + ': ' + str(v))

    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)
    # Log
    date = time.strftime('%Y-%m-%d.%H%M')
    log_dir = '{}/{}-{}-seed-{}-{}'.format(args.log_dir, args.env, args.agent, args.seed, date)
    save_dir = '{}/{}-{}-seed-{}-{}'.format(args.save_dir, args.env, args.agent, args.seed, date)

    log = SummaryWriter(log_dir)
    print('Writing logs to {}'.format(log_dir))

    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    # with open(save_dir + '/error_monitor.csv', "wt") as monitor_file:
    #     monitor = csv.writer(monitor_file)
    #     monitor.writerow(['update', 'error', str(int(args.num_steps / args.learning_freq))])

    with open(save_dir + '/reward_monitor.csv', "wt") as monitor_file:
        monitor = csv.writer(monitor_file)
        monitor.writerow(['epoch', 'reward', str(args.num_steps)])

    with open(save_dir + "/params.pkl", 'wb') as f:
        pickle.dump(args, f)

    # Create and seed the env.
    env = make_atari(args.env)
    env = wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, scale=False)
    env.seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # TODO
    num_actions = env.action_space.n
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



qlearn/atari/train_noisy_agent.py [92:137]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    return parser.parse_args()

if __name__ == '__main__':
    args = parse_args()
    if args.run_index is not None:
        args.seed, args.env = RUN_ID[args.run_index]

    print(' ' * 26 + 'Options')
    for k, v in vars(args).items():
        print(' ' * 26 + k + ': ' + str(v))

    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)
    # Log
    date = time.strftime('%Y-%m-%d.%H%M')
    log_dir = '{}/{}-{}-seed-{}-{}'.format(args.log_dir, args.env, args.agent, args.seed, date)
    save_dir = '{}/{}-{}-seed-{}-{}'.format(args.save_dir, args.env, args.agent, args.seed, date)

    log = SummaryWriter(log_dir)
    print('Writing logs to {}'.format(log_dir))

    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    # with open(save_dir + '/error_monitor.csv', "wt") as monitor_file:
    #     monitor = csv.writer(monitor_file)
    #     monitor.writerow(['update', 'error', str(int(args.num_steps / args.learning_freq))])

    with open(save_dir + '/reward_monitor.csv', "wt") as monitor_file:
        monitor = csv.writer(monitor_file)
        monitor.writerow(['epoch', 'reward', str(args.num_steps)])

    with open(save_dir + "/params.pkl", 'wb') as f:
        pickle.dump(args, f)

    # Create and seed the env.
    env = make_atari(args.env)
    env = wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, scale=False)
    env.seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # TODO
    num_actions = env.action_space.n
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



