in inference.py [0:0]
def model_fn(model_dir):
logger.info('model_fn')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(model_dir)
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
nlp_model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
nlp_model.to(device)
model = {'model':nlp_model, 'tokenizer':tokenizer}
# model = SentenceTransformer(model_dir + '/transformer/')
# logger.info(model)
return model