def model_fn()

in code/inference.py [0:0]


def model_fn(model_dir):
    logger.info('model_fn')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(model_dir)
    tokenizer = AutoTokenizer.from_pretrained(model_dir)
    nlp_model = AutoModel.from_pretrained(model_dir)
    nlp_model.to(device)
    model = {'model':nlp_model, 'tokenizer':tokenizer}

    return model