def lambda_handler()

in ml-images/s3/app.py [0:0]


def lambda_handler(event, context):
    body = json.loads(event['body'])
    
    question = body['question']
    context = body['context']
    
    # Gather the inputs
    inputs = loaded_model_tokenizer[1].encode_plus(question,context,add_special_tokens=True,return_tensors="pt")
    input_ids = inputs["input_ids"].tolist()[0]

    # Perform the inference
    output = loaded_model_tokenizer[0](**inputs)
    answer_start_scores = output.start_logits
    answer_end_scores = output.end_logits

    answer_start = torch.argmax(answer_start_scores)
    answer_end = torch.argmax(answer_end_scores) + 1
    
    answer = loaded_model_tokenizer[1].convert_tokens_to_string(loaded_model_tokenizer[1].convert_ids_to_tokens(input_ids[answer_start:answer_end]))

    print('Question: {0}, Answer: {1}'.format(question, answer))
    return {
        'statusCode': 200,
        'headers': {'Content-Type': 'application/json'},
        'body': json.dumps({
            'Question': question,
            'Answer': answer
        })
    }