def main()

in src/evaluate.py [0:0]


def main():
    p = configargparse.ArgParser()
    p.add_argument('-d', '--device', default=0, type=int, help="cuda device to perform the eval on")
    p.add_argument('-data', '--data', required=True, type=str)
    p.add_argument('-log', '--logDir', default=[], action='append', type=str,
                   help="list containing the nets to be evaluated. if the first does not contain 'config.ini',"
                        " all subdirectories will be added as paths")
    p.add_argument('-e', '--eval', default=[], action='append', type=str,
                   choices=["complexity", "opt", "images", "videos", "output_images", "output_videos", "debug", "export"],
                   help="parameters to recreate the given results, even if evaluation already performed")
    p.add_argument('-s', '--skip', default=[], action='append', type=str,
                   choices=["complexity", "opt", "images", "videos", "output_images", "output_videos", "flip", "psnr", "ssim", "export"],
                   help="evaluations to skip")
    p.add_argument('-o', '--outDir', default=None, type=str,
                   help='output base directory. default - same directory as the input logDir.')
    p.add_argument('--skipIfAlreadyDone', default=True, action="store_false",
                   help="whether or not to skip the full evaluation if we ever did one before (contains opt.txt)")
    p.add_argument('--camPath', default=[], action='append', type=str, help="cam path that is used for output_videos")
    p.add_argument('--inferenceChunkSize', default=4096, type=int)
    cl = p.parse_args()

    device_id = cl.device
    data_path = cl.data
    skip = cl.skip

    data_set_path, data_set = os.path.split(data_path)
    if data_set == '':
        _, data_set = os.path.split(data_set_path)

    if not os.path.exists(os.path.join(cl.logDir[0], "config.ini")):
        paths = []
        for subdir in sorted(os.listdir(cl.logDir[0])):
            path = os.path.join(cl.logDir[0], subdir)
            if os.path.isdir(path):
                paths.append(path)
    else:
        paths = cl.logDir

    reference_video = load_reference_video(data_path)
    if reference_video is None:
        skip.append("videos")

    skipped = 0
    prev_done = 0
    eval_performed = 0

    # NOTE: can be removed if pyrtools available on Windows or other image pyramid library is used
    if platform.system() == 'Linux':
        metrics = ["flip", "psnr", "ssim"]
    else:
        metrics = ["flip", "psnr"]
        print(f"Warning: complexity calculation can lead to overflows if not done on Linux!")

    for idx, path in enumerate(paths):
        if len(paths) > 1:
            print(f"{idx+1} of {len(paths)}")

        evaluations = [e for e in cl.eval]
        state, train_config = load_config(data_path, device_id, path, evaluations, skip, cl_out_dir=cl.outDir,
                                          skip_if_already_done_once=cl.skipIfAlreadyDone)

        if train_config is None:
            print(f"Skipping {path}...")

            if state == 1:
                skipped += 1
            elif state == 2:
                prev_done += 1
        else:
            # we look for the data_set name in path and print warning if not found
            if path.find(data_set) == -1:
                print(f"Warning: did not find dataset name {data_set} in path {path}!")

            for m in metrics:
                if m not in skip:
                    evaluations.append(m)

            if cl.inferenceChunkSize is not None:
                train_config.config_file.inferenceChunkSize = cl.inferenceChunkSize

            train_config.evaluation_cam_path = cl.camPath

            evaluate(train_config, reference_video, evaluations)

            eval_performed += 1

            with torch.cuda.device(train_config.device):
                torch.cuda.empty_cache()

            # free allocated memory to be able to run all tests successively
            del train_config
            gc.collect()

    if len(paths) > 1:
        print(f"Performed evaluation on {eval_performed} of {len(paths)} folders")
        print(f"\tSkipped {skipped} folders because of loading issues")
        print(f"\tPrevious completed evaluation found on {prev_done} folders")