def test_run()

in train/pantheon_env.py [0:0]


def test_run(flags, jobs, thread_id):
    """
    Thread i runs jobs[i % len(jobs)] flags.test_runs_per_job times.
    """
    job_id = thread_id % len(jobs)
    cmd_tmpl = jobs[job_id]["cmd_tmpl"]

    # Expand data_dir in cmd template
    data_dir = path.join(flags.logdir, "test_expt{}".format(job_id))
    cmd = utils.safe_format(cmd_tmpl, {"data_dir": data_dir})
    cmd = update_cmd(cmd, flags, thread_id)

    # Run tests
    logging.info(
        "Test run: thread {} -> job {}, cmd: {}".format(
            thread_id, job_id, " ".join(cmd)
        )
    )
    pantheon_env = get_pantheon_env(flags)
    p = subprocess.Popen(cmd, env=pantheon_env)
    p.wait()
    assert p.returncode == 0, "Pantheon script exited with error code {}".format(
        p.returncode
    )

    # Run analysis
    analysis_cmd = [utils.meta["analyze_path"], "--data-dir={}".format(data_dir)]
    logging.info(
        "Thread {}, job {}: Running analysis on {}, cmd: {}".format(
            thread_id, job_id, data_dir, " ".join(analysis_cmd)
        )
    )
    p = subprocess.Popen(analysis_cmd, env=pantheon_env)
    p.wait()

    shutil.copyfile(
        path.join(data_dir, "pantheon_summary_mean.pdf"),
        path.join(flags.logdir, "test_expt{}.pdf".format(job_id)),
    )
    logging.info(
        "Test run finished for thread {}, job {}. Results in {}.".format(
            thread_id, job_id, data_dir
        )
    )