def lambda_handler()

in ml-images/oci/app.py [0:0]


def lambda_handler(event, context):
    body = json.loads(event['body'])
    model_type = body['model_type']
    question = body['question']
    context = body['context']
    
    if model_type == 'nlp1':
        logger.info('NLP Model Version 1 loaded')
        tokenizer = tokenizer1
        model = model1
    elif model_type == 'nlp2':
        logger.info('NLP Model Version 2 loaded')
        tokenizer = tokenizer2
        model = model2
    else:
        logger.info('No model specified, loading version 1.')
        tokenizer = tokenizer1
        model = model1
        model_type = 'nlp1'

    inputs = tokenizer.encode_plus(question, context,add_special_tokens=True, return_tensors="pt")
    input_ids = inputs["input_ids"].tolist()[0]

    output = model(**inputs)
    answer_start_scores = output.start_logits
    answer_end_scores = output.end_logits

    answer_start = torch.argmax(answer_start_scores)
    answer_end = torch.argmax(answer_end_scores) + 1

    answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))

    print('Question: {0}, Answer: {1}'.format(question, answer))
    return {
        'statusCode': 200,
        'headers': {'Content-Type': 'application/json'},
        'body': json.dumps({
            'Model_Type': model_type,
            'Question': question,
            'Answer': answer
        })
    }