in spinup/utils/run_utils.py [0:0]
def call_experiment(exp_name, thunk, seed=0, num_cpu=1, data_dir=None,
datestamp=False, **kwargs):
"""
Run a function (thunk) with hyperparameters (kwargs), plus configuration.
This wraps a few pieces of functionality which are useful when you want
to run many experiments in sequence, including logger configuration and
splitting into multiple processes for MPI.
There's also a SpinningUp-specific convenience added into executing the
thunk: if ``env_name`` is one of the kwargs passed to call_experiment, it's
assumed that the thunk accepts an argument called ``env_fn``, and that
the ``env_fn`` should make a gym environment with the given ``env_name``.
The way the experiment is actually executed is slightly complicated: the
function is serialized to a string, and then ``run_entrypoint.py`` is
executed in a subprocess call with the serialized string as an argument.
``run_entrypoint.py`` unserializes the function call and executes it.
We choose to do it this way---instead of just calling the function
directly here---to avoid leaking state between successive experiments.
Args:
exp_name (string): Name for experiment.
thunk (callable): A python function.
seed (int): Seed for random number generators.
num_cpu (int): Number of MPI processes to split into. Also accepts
'auto', which will set up as many procs as there are cpus on
the machine.
data_dir (string): Used in configuring the logger, to decide where
to store experiment results. Note: if left as None, data_dir will
default to ``DEFAULT_DATA_DIR`` from ``spinup/user_config.py``.
**kwargs: All kwargs to pass to thunk.
"""
# Determine number of CPU cores to run on
num_cpu = psutil.cpu_count(logical=False) if num_cpu=='auto' else num_cpu
# Send random seed to thunk
kwargs['seed'] = seed
# Be friendly and print out your kwargs, so we all know what's up
print(colorize('Running experiment:\n', color='cyan', bold=True))
print(exp_name + '\n')
print(colorize('with kwargs:\n', color='cyan', bold=True))
kwargs_json = convert_json(kwargs)
print(json.dumps(kwargs_json, separators=(',',':\t'), indent=4, sort_keys=True))
print('\n')
# Set up logger output directory
if 'logger_kwargs' not in kwargs:
kwargs['logger_kwargs'] = setup_logger_kwargs(exp_name, seed, data_dir, datestamp)
else:
print('Note: Call experiment is not handling logger_kwargs.\n')
def thunk_plus():
# Make 'env_fn' from 'env_name'
if 'env_name' in kwargs:
import gym
env_name = kwargs['env_name']
kwargs['env_fn'] = lambda : gym.make(env_name)
del kwargs['env_name']
# Fork into multiple processes
mpi_fork(num_cpu)
# Run thunk
thunk(**kwargs)
# Prepare to launch a script to run the experiment
pickled_thunk = cloudpickle.dumps(thunk_plus)
encoded_thunk = base64.b64encode(zlib.compress(pickled_thunk)).decode('utf-8')
entrypoint = osp.join(osp.abspath(osp.dirname(__file__)),'run_entrypoint.py')
cmd = [sys.executable if sys.executable else 'python', entrypoint, encoded_thunk]
try:
subprocess.check_call(cmd, env=os.environ)
except CalledProcessError:
err_msg = '\n'*3 + '='*DIV_LINE_WIDTH + '\n' + dedent("""