in training/train.py [0:0]
def main(args) -> None:
cfg = compose(config_name=args.config)
if cfg.launcher.experiment_log_dir is None:
cfg.launcher.experiment_log_dir = os.path.join(
os.getcwd(), "sam2_logs", args.config
)
print("###################### Train App Config ####################")
print(OmegaConf.to_yaml(cfg))
print("############################################################")
add_pythonpath_to_sys_path()
makedir(cfg.launcher.experiment_log_dir)
with g_pathmgr.open(
os.path.join(cfg.launcher.experiment_log_dir, "config.yaml"), "w"
) as f:
f.write(OmegaConf.to_yaml(cfg))
cfg_resolved = OmegaConf.to_container(cfg, resolve=False)
cfg_resolved = OmegaConf.create(cfg_resolved)
with g_pathmgr.open(
os.path.join(cfg.launcher.experiment_log_dir, "config_resolved.yaml"), "w"
) as f:
f.write(OmegaConf.to_yaml(cfg_resolved, resolve=True))
submitit_conf = cfg.get("submitit", None)
assert submitit_conf is not None, "Missing submitit config"
submitit_dir = cfg.launcher.experiment_log_dir
submitit_dir = os.path.join(submitit_dir, "submitit_logs")
# Priotrize cmd line args
cfg.launcher.gpus_per_node = (
args.num_gpus if args.num_gpus is not None else cfg.launcher.gpus_per_node
)
cfg.launcher.num_nodes = (
args.num_nodes if args.num_nodes is not None else cfg.launcher.num_nodes
)
submitit_conf.use_cluster = (
args.use_cluster if args.use_cluster is not None else submitit_conf.use_cluster
)
if submitit_conf.use_cluster:
executor = submitit.AutoExecutor(folder=submitit_dir)
submitit_conf.partition = (
args.partition
if args.partition is not None
else submitit_conf.get("partition", None)
)
submitit_conf.account = (
args.account
if args.account is not None
else submitit_conf.get("account", None)
)
submitit_conf.qos = (
args.qos if args.qos is not None else submitit_conf.get("qos", None)
)
job_kwargs = {
"timeout_min": 60 * submitit_conf.timeout_hour,
"name": (
submitit_conf.name if hasattr(submitit_conf, "name") else args.config
),
"slurm_partition": submitit_conf.partition,
"gpus_per_node": cfg.launcher.gpus_per_node,
"tasks_per_node": cfg.launcher.gpus_per_node, # one task per GPU
"cpus_per_task": submitit_conf.cpus_per_task,
"nodes": cfg.launcher.num_nodes,
"slurm_additional_parameters": {
"exclude": " ".join(submitit_conf.get("exclude_nodes", [])),
},
}
if "include_nodes" in submitit_conf:
assert (
len(submitit_conf["include_nodes"]) >= cfg.launcher.num_nodes
), "Not enough nodes"
job_kwargs["slurm_additional_parameters"]["nodelist"] = " ".join(
submitit_conf["include_nodes"]
)
if submitit_conf.account is not None:
job_kwargs["slurm_additional_parameters"]["account"] = submitit_conf.account
if submitit_conf.qos is not None:
job_kwargs["slurm_additional_parameters"]["qos"] = submitit_conf.qos
if submitit_conf.get("mem_gb", None) is not None:
job_kwargs["mem_gb"] = submitit_conf.mem_gb
elif submitit_conf.get("mem", None) is not None:
job_kwargs["slurm_mem"] = submitit_conf.mem
if submitit_conf.get("constraints", None) is not None:
job_kwargs["slurm_constraint"] = submitit_conf.constraints
if submitit_conf.get("comment", None) is not None:
job_kwargs["slurm_comment"] = submitit_conf.comment
# Supports only cpu-bind option within srun_args. New options can be added here
if submitit_conf.get("srun_args", None) is not None:
job_kwargs["slurm_srun_args"] = []
if submitit_conf.srun_args.get("cpu_bind", None) is not None:
job_kwargs["slurm_srun_args"].extend(
["--cpu-bind", submitit_conf.srun_args.cpu_bind]
)
print("###################### SLURM Config ####################")
print(job_kwargs)
print("##########################################")
executor.update_parameters(**job_kwargs)
main_port = random.randint(
submitit_conf.port_range[0], submitit_conf.port_range[1]
)
runner = SubmititRunner(main_port, cfg)
job = executor.submit(runner)
print(f"Submitit Job ID: {job.job_id}")
runner.setup_job_info(job.job_id, rank=0)
else:
cfg.launcher.num_nodes = 1
main_port = random.randint(
submitit_conf.port_range[0], submitit_conf.port_range[1]
)
single_node_runner(cfg, main_port)