in scripts/inference.py [0:0]
def model_fn(model_dir):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = 'distilbert-base-uncased-distilled-squad'
nlpmodel = DistilBertForQuestionAnswering.from_pretrained(model_name)
tokenizer = DistilBertTokenizer.from_pretrained(model_name)
nlpmodel.to(device)
model = {'model':nlpmodel, 'tokenizer':tokenizer}
return model