def handler()

in lib/analyzer-lambda/analyzer-lambda.py [0:0]


def handler(event, context):
    log.getLogger().setLevel(log.INFO)
    global aws_request_id

    aws_request_id = event['upstream-request-id']
    topic = context.client_context.custom['subject']

    log.info('Received message from topic: {0} with payload:\n {1}'.format(
        topic, json.dumps(event, indent=4)))

    log.info('The upstream request id is: {}'.format(aws_request_id))

    deviceId = topic.split('/')[-1]
    key_prefix = deviceId

    metrics_list = ['timestamp', 'temperature', 'pressure', 'humidity']

    end_time = int(time.time())
    start_time = end_time - 3600

    calculated_offset = str(end_time - int(end_time / 10) * 10) + 'S'
    min_resolution_seconds = 10

    redis_results = select_metrics(
        key_prefix, aws_request_id, metrics_list, start_time)

    metrics = [redis_results[i * len(metrics_list):(i + 1) * len(metrics_list)]
               for i in range((len(redis_results) + len(metrics_list) - 1) // len(metrics_list))]

    raw_df = pd.DataFrame(metrics, columns=metrics_list).set_index('timestamp')
    raw_df = raw_df.apply(pd.to_numeric)
    raw_df.index = pd.to_datetime(raw_df.index, unit='s')

    # Code exercise for filling missing metrics...
    # --------------Enter code below this line--------------

    # ----------------------------------------------------------

    # Code exercise for rolling statistical calculation...
    # --------------Enter code below this line--------------

    # ----------------------------------------------------------

    topic = 'metrics/filled/{}'.format(deviceId)
    message = json.loads(raw_df.to_json(orient='columns'))
    callDownstreamLambda(topic, message)