def prepare_parser()

in data_utils/make_hdf5.py [0:0]


def prepare_parser():
    usage = "Parser for ImageNet HDF5 scripts."
    parser = ArgumentParser(description=usage)
    parser.add_argument(
        "--resolution",
        type=int,
        default=128,
        help="Which Dataset resolution to train on, out of 64, 128, 256 (default: %(default)s)",
    )
    parser.add_argument(
        "--split",
        type=str,
        default="train",
        help="Which Dataset to convert: train, val (default: %(default)s)",
    )
    parser.add_argument(
        "--data_root",
        type=str,
        default="data",
        help="Default location where data is stored (default: %(default)s)",
    )
    parser.add_argument(
        "--out_path",
        type=str,
        default="data",
        help="Default location where data in hdf5 format will be stored (default: %(default)s)",
    )
    parser.add_argument(
        "--pretrained_model_path",
        type=str,
        default="",
        help="Location where the pretrained model (to extract features) can be found (default: %(default)s)",
    )
    parser.add_argument(
        "--save_features_only",
        action="store_true",
        default=False,
        help="Only save features in hdf5 file.",
    )
    parser.add_argument(
        "--save_images_only",
        action="store_true",
        default=False,
        help="Only save images and their labels in hdf5 file.",
    )
    parser.add_argument(
        "--feature_augmentation",
        action="store_true",
        default=False,
        help="Additioally store instance features with horizontally flipped input images.",
    )
    parser.add_argument(
        "--feature_extractor",
        type=str,
        default="classification",
        choices=["classification", "selfsupervised"],
        help="Choice of feature extractor",
    )
    parser.add_argument(
        "--backbone_feature_extractor",
        type=str,
        default="resnet50",
        choices=["resnet50"],
        help="Choice of feature extractor backbone",
    )
    parser.add_argument(
        "--which_dataset", type=str, default="imagenet", help="Dataset choice."
    )
    parser.add_argument(
        "--instance_json",
        type=str,
        default="",
        help="Path to JSON containing instance segmentations for COCO_Stuff",
    )
    parser.add_argument(
        "--stuff_json",
        type=str,
        default="",
        help="Path to JSON containing instance segmentations for COCO_Stuff",
    )
    parser.add_argument(
        "--batch_size",
        type=int,
        default=256,
        help="Default overall batchsize (default: %(default)s)",
    )
    parser.add_argument(
        "--num_workers",
        type=int,
        default=16,
        help="Number of dataloader workers (default: %(default)s)",
    )
    parser.add_argument(
        "--chunk_size",
        type=int,
        default=500,
        help="Default overall batchsize (default: %(default)s)",
    )
    parser.add_argument(
        "--compression",
        action="store_true",
        default=False,
        help="Use LZF compression? (default: %(default)s)",
    )
    return parser