def handler()

in src/SimpleReplay/cloudformation/LambdaReplay.py [0:0]


def handler(event, context):
    print(event)
    action = event['Input'].get('action')
    instance_id = event['Input'].get('instance_id')
    extract_bucket = event['Input'].get('extract_bucket')
    replay_bucket = event['Input'].get('replay_bucket')
    command_id = event['Input'].get('command_id')
    iam_role = event['Input'].get('iam_role')
    security_group_id = event['Input'].get('security_group_id')
    subnet_group = event['Input'].get('subnet_group')
    extract_account = event['Input'].get('extract_account')
    db = event['Input'].get('db')
    user = event['Input'].get('user')
    node_type = event['Input'].get('node_type')
    number_of_nodes = event['Input'].get('number_of_nodes')

    parameter_group_name = event['Input'].get('parameter_group_name')
    sql_id = event['Input'].get('sql_id')
    source_clusterid = event['Input'].get('source_clusterid')
    cluster_type = event['Input'].get('cluster_type')
    clusterid = source_clusterid + "-" + cluster_type if cluster_type else source_clusterid
    snapshotid = "ra3-migration-evaluation-snapshot-" + source_clusterid if source_clusterid else ""

    try:
        client = boto3.client('redshift')
        extract_prefix = json.loads(get_config_from_s3(extract_bucket, 'config/extract_prefix.json')) if extract_bucket else {'prefix':'','extract_output':''}
        prefix = extract_prefix['prefix']
        extract_output = extract_prefix['extract_output']
        if action == "cluster_status":
            res = {'status': cluster_status(client, clusterid)}
        elif action == "update_parameter_group":
            res = {'status': update_parameter_group(client, parameter_group_name, extract_bucket)}
        elif action == "create_cluster":
            res = {
                'status': create_cluster(client, clusterid, snapshotid, extract_bucket, iam_role, parameter_group_name,
                                         subnet_group, security_group_id, extract_account, node_type, number_of_nodes)}
        elif action =="classic_resize_cluster":
            res = {'status': classic_resize_cluster(client, clusterid, node_type, number_of_nodes)}
        elif action == "resume_cluster":
            client.resume_cluster(ClusterIdentifier=clusterid)
            res = {'status': 'initiated'}
        elif action == "pause_cluster":
            client.pause_cluster(ClusterIdentifier=clusterid)
            res = {'status': 'initiated'}
        elif action == "setup_redshift_objects":
            res = {'sql_id': setup_redshift_objects(replay_bucket, clusterid, db, user)}
        elif action == "run_replay":
            command_id = run_replay(client, replay_bucket, instance_id, clusterid, cluster_type, prefix, extract_output)
            res = {'command_id': command_id}
        elif action == "replay_status":
            res = {'status': replay_status(command_id, instance_id)}
        elif action == "unload_stats":
            script = "call unload_detailed_query_stats('" + prefix + "')"
            sql_id = run_sql(clusterid, db, user, script)
            res = {'sql_id': sql_id}
        elif action == "load_stats":
            script = "call load_detailed_query_stats('" + prefix + "')"
            sql_id = run_sql(clusterid, db, user, script, False, 'async')
            res = {'sql_id': sql_id}
        elif action == "sql_status":
            res = {'status': sql_status(sql_id)}
        else:
            raise ValueError("Invalid Task: " + action)
    except Exception as e:
        print(e)
        print(traceback.format_exc())
        raise
    print(res)
    return res