def load_config()

in src/evaluate.py [0:0]


def load_config(data_path, device_id, path, evaluations, skip, cl_out_dir=None, skip_if_already_done_once=True):
    c_file = os.path.join(path, "config.ini")

    experiment = ""
    orig_path = os.path.join(path, '')

    if path.endswith(f"-D") or path.endswith(f"-D{os.path.sep}"):
        print(f"diff and flip of depth not yet supported")
        return 1, None

    # strip last 2 folders and possible trailing / from log path
    ctr = 0
    while ctr < 2:
        path, tail = os.path.split(path)
        if not tail == '':
            ctr += 1
            if ctr == 1:
                experiment = tail

    print(f"Evaluating {experiment}")

    if not os.path.exists(c_file):
        print(f"No config.ini found!")
        return 1, None

    # get current optimal epoch
    try:
        optimal_epoch = get_optimal_epoch(orig_path)
    except FileNotFoundError:
        print(f"No optimal epoch found - using latest weights")
        optimal_epoch = None

    # add evaluations to the list if none have been passed to the script
    if len(evaluations) == 0:
        opt_filenames = ["_opt.mp4", "_opt_0.mp4", "_opt_1.mp4"]
        opt_files_exist = False

        for filename in opt_filenames:
            opt_files_exist = opt_files_exist or os.path.exists(os.path.join(orig_path, filename))

        if not opt_files_exist and "opt" not in skip:
            evaluations.append("opt")

        eval_categories = ["complexity", "images", "videos", "output_images", "output_videos", "export"]

        for e in eval_categories:
            if e not in skip:
                evaluations.append(e)

    try:
        config = Config.init(c_file, only_known_args=True)
        if isinstance(config, configargparse.ArgParser):
            config, unknown = config.parse_known_args(['-c', c_file])
    except SystemExit:
        print(f"Errors in config file!")
        return 1, None

    # replace paths with command line values
    config.data = data_path
    config.logDir = path

    dataset_name = os.path.basename(os.path.normpath(config.data))
    experiment_name = os.path.basename(os.path.normpath(orig_path))
    out_dir = orig_path
    if cl_out_dir is not None:
        out_dir = os.path.join(cl_out_dir, dataset_name, experiment_name)

    os.makedirs(out_dir, exist_ok=True)

    # look for previous eval
    try:
        evaluated_epoch = get_optimal_epoch(os.path.join(out_dir, "eval"))
    except FileNotFoundError:
        print(f"No previous evaluation found - continuing")
        evaluated_epoch = None

    if evaluated_epoch is not None and optimal_epoch is not None:
        if optimal_epoch == evaluated_epoch and (len(evaluations) == 0 or skip_if_already_done_once):
            print(f"Evaluation already performed for this optimal epoch!")
            return 2, None

    # replace device id in config with command line device id
    config.device = device_id

    # fallback for missing lossWeights
    while len(config.lossWeights) < len(config.losses):
        config.lossWeights.append(1)

    # initialize config and load optimal or latest weights
    train_config = TrainConfig()
    train_config.initialize(config, log_path=orig_path, training=False)

    if cl_out_dir is not None:
        train_config.outDir = out_dir
    else:
        train_config.outDir = train_config.logDir

    if train_config.f_out[-1].n_feat != 3 and train_config.f_out[-1].n_feat != 4:
        print(f"Output features not 3 or 4!")
        return 1, None

    checkpoints = [os.path.join(config.logDir, f) for f in sorted(os.listdir(os.path.join(config.logDir))) if
                   config.checkPointName in f]

    if len(checkpoints) == 0:
        train_config.load_latest_weights()
    else:
        train_config.load_specific_weights(config.checkPointName)

    return 0, train_config