def create_parser()

in scripts/extract_saliency.py [0:0]


def create_parser():
    parser = argparse.ArgumentParser()

    parser.add_argument("--pdb_name", type=str, required=True)
    parser.add_argument("--results_file", type=pathlib.Path, required=True)

    parser.add_argument("--model_file", type=pathlib.Path, default=None)
    parser.add_argument(
        "--cache_dir",
        type=pathlib.Path,
        default=pathlib.Path(MMCIF_PATH + "/mmCIF/"),
    )

    # Transformer arguments
    parser.add_argument(
        "--encoder_layers",
        type=int,
        default=6,
        help="number of layers to apply the transformer on",
    )
    parser.add_argument("--dropout", type=float, default=0.0, help="chance of dropping out a unit")
    parser.add_argument(
        "--relu_dropout", type=float, default=0.0, help="chance of dropping out a relu unit"
    )
    parser.add_argument(
        "--encoder_normalize_after",
        action="store_false",
        dest="encoder_normalize_before",
        help="whether to normalize outputs before",
    )
    parser.add_argument(
        "--encoder_attention_heads",
        type=int,
        default=8,
        help="number of heads of attention to use",
    )
    parser.add_argument(
        "--attention_dropout", type=float, default=0.0, help="dropout rate of attention"
    )
    parser.add_argument(
        "--encoder_ffn_embed_dim",
        type=int,
        default=1024,
        help="hidden dimension to use in transformer",
    )
    parser.add_argument(
        "--encoder_embed_dim", type=int, default=256, help="original embed dimension of element"
    )
    parser.add_argument("--max_size", type=int, default=64, help="maximum size of time series")

    return parser