def main()

in MaskRCNN/pytorch/tools/train_net.py [0:0]


def main():


    parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', 0))
    parser.add_argument("--max_steps", type=int, default=0, help="Override number of training steps in the config")
    parser.add_argument("--skip-test", dest="skip_test", help="Do not test the final model",
                        action="store_true",)
    parser.add_argument("--fp16", help="Mixed precision training", action="store_true")
    parser.add_argument("--amp", help="Mixed precision training", action="store_true")
    parser.add_argument('--skip_checkpoint', default=False, action='store_true', help="Whether to save checkpoints")
    parser.add_argument("--json-summary", help="Out file for DLLogger", default="dllogger.out",
                        type=str,
                        )
    parser.add_argument("--debug", type=distutils.util.strtobool, default=False, help="debug")
    parser.add_argument("--eval-loss", action="store_true", default=False, help="evaluate loss(very buggy)")

    parser.add_argument("--print-freq", type=int, default=100, help="print freq for tensorboard")
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()
    args.fp16 = args.fp16 or args.amp
    
    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(
            backend="nccl", init_method="env://"
        )
        synchronize()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    
    if args.debug:
        cfg.DEBUG=args.debug

    # Redundant option - Override config parameter with command line input
    if args.max_steps > 0:
        cfg.SOLVER.MAX_ITER = args.max_steps

    if args.skip_checkpoint:
        cfg.SAVE_CHECKPOINT = False
        
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir:
        mkdir(output_dir)

    args.log_dir=save_path_formatter(args,cfg)

    logger = setup_logger("maskrcnn_benchmark", output_dir, get_rank())
    if is_main_process():
        dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
                                filename=args.json_summary),
                                dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)])
    else:
        dllogger.init(backends=[])

    dllogger.log(step="PARAMETER", data={"gpu_count":num_gpus})
    # dllogger.log(step="PARAMETER", data={"environment_info": collect_env_info()})
    dllogger.log(step="PARAMETER", data={"config_file": args.config_file})

    with open(args.config_file, "r") as cf:
        config_str = "\n" + cf.read()

    dllogger.log(step="PARAMETER", data={"config":cfg})

    if is_main_process():
        args.writer = SummaryWriter(args.log_dir,flush_secs=30)


    if args.fp16:
        fp16 = True
    else:
        fp16 = False

    model, iters_per_epoch = train(cfg, args.local_rank, args.distributed, fp16, dllogger, args)

    if not args.skip_test:
        #if not cfg.PER_EPOCH_EVAL:
        test_model(cfg, model, args.distributed, iters_per_epoch, dllogger, args)