def evaluate_saved_model()

in test.py [0:0]


def evaluate_saved_model(
    args,
    result_dir,
    xpid, 
    num_episodes=10, 
    seeds=None, 
    deterministic=False, 
    verbose=False, 
    progressbar=False,
    num_processes=1):
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda:0" if args.cuda else "cpu")
    if 'cuda' in device.type:
        print('Using CUDA\n')

    if verbose:
        logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    if args.xpid is None:
        checkpointpath = os.path.expandvars(
            os.path.expanduser(os.path.join(result_dir, "latest", "model.tar"))
        )
    else:
        checkpointpath = os.path.expandvars(
            os.path.expanduser(os.path.join(result_dir, xpid, "model.tar"))
        )

    # Set up level sampler
    if seeds is None:
        seeds = [int.from_bytes(os.urandom(4), byteorder="little") for _ in range(num_episodes)]

    dummy_env, _ = make_lr_venv(
        num_envs=num_processes, env_name=args.env_name,
        seeds=None, device=device,
        num_levels=1, start_level=1,
        no_ret_normalization=args.no_ret_normalization,
        distribution_mode=args.distribution_mode,
        paint_vel_info=args.paint_vel_info)

    level_sampler = LevelSampler(
        seeds, 
        dummy_env.observation_space, dummy_env.action_space,
        strategy='sequential')

    model = model_for_env_name(args, dummy_env)

    pbar = None
    if progressbar:
        pbar = tqdm(total=num_episodes)

    if torch.cuda.is_available():
        map_location=lambda storage, loc: storage.cuda()
    else:
        map_location='cpu'

    checkpoint = torch.load(checkpointpath, map_location=map_location)
    model.load_state_dict(checkpoint["model_state_dict"])

    num_processes = min(num_processes, num_episodes)
    eval_episode_rewards = \
        evaluate(args, model, num_episodes, 
            device=device, 
            num_processes=num_processes, 
            level_sampler=level_sampler, 
            progressbar=pbar)

    mean_return = np.mean(eval_episode_rewards)
    median_return = np.median(eval_episode_rewards)

    logging.info(
        "Average returns over %i episodes: %.2f", num_episodes, mean_return
    )
    logging.info(
        "Median returns over %i episodes: %.2f", num_episodes, median_return
    )

    return mean_return, median_return