def benchmark()

in scripts/benchmark_framework.py [0:0]


def benchmark(gamefile, args):
    infos = textworld.EnvInfos()
    if args.activate_state_tracking or args.mode == "random-cmd":
        infos.admissible_commands = True

    if args.compute_intermediate_reward:
        infos.intermediate_reward = True

    env = textworld.start(gamefile, infos)
    print("Using {}".format(env))

    if args.mode == "random":
        agent = textworld.agents.NaiveAgent()
    elif args.mode == "random-cmd":
        agent = textworld.agents.RandomCommandAgent(seed=args.agent_seed)
    elif args.mode == "walkthrough":
        agent = textworld.agents.WalkthroughAgent()

    agent.reset(env)
    game_state = env.reset()

    if args.verbose:
        env.render()

    reward = 0
    done = False
    nb_resets = 1
    start_time = time.time()
    for _ in range(args.max_steps):
        command = agent.act(game_state, reward, done)
        game_state, reward, done = env.step(command)

        if done:
            env.reset()
            nb_resets += 1
            done = False

        if args.verbose:
            env.render()

    duration = time.time() - start_time
    speed = args.max_steps / duration
    msg = "Done {:,} steps in {:.2f} secs ({:,.1f} steps/sec) with {} resets."
    print(msg.format(args.max_steps, duration, speed, nb_resets))
    return speed