def main()

in metrics/bert_score.py [0:0]


def main():
    start_time = time.time()
    parser = argparse.ArgumentParser()
    # Required parameters
    parser.add_argument(
        "--model_name_or_path",
        default=None,
        type=str,
        help="The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.",
    )
    parser.add_argument('--fp16', default=True,
                        help='Run in pseudo-fp16 mode (fp16 storage fp32 math).')

    parser.add_argument(
        "--model_type", type=str, default='bert', help="The model architecture to be trained or fine-tuned.",
    )

    parser.add_argument(
        "--vocab_file",
        type=str,
        required=True,
        help="The vocab file.",
    )

    parser.add_argument(
        "--event_type",
        type=str,
        required=True,
        help="The event type.",
        choices=['magenta', 'newevent']
    )

    parser.add_argument(
        "--len_tokens_evaluated",
        type=int,
        default=2048,
        help="Total max number of tokens to be evaluated.",
    )

    args = parser.parse_args()
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    config = config_class.from_pretrained(args.model_name_or_path, cache_dir=None)
    tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=None)

    if args.vocab_file:
        tokenizer.build_vocab_file(args.vocab_file, event_type=args.event_type)

    model = model_class.from_pretrained(
        args.model_name_or_path,
        from_tf=bool(".ckpt" in args.model_name_or_path),
        config=config,
        cache_dir=None,
    )

    if args.fp16:
        model = model.half()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    run_score(model, tokenizer, args.len_tokens_evaluated)
    print("--- %s seconds ---" % (time.time() - start_time))