aws_emr_blog_v2/code/launch-cluster/cremr.py [545:600]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                }
            })
        cluster_id = client.run_job_flow(**cluster_parameters)

        physical_resource_id = cluster_id["JobFlowId"]
        response_data = {
            "ClusterID": cluster_id["JobFlowId"]
        }
        return physical_resource_id, response_data

    except Exception as E:
        raise


def update(event, context):
    """
    Place your code to handle Update events here

    To return a failure to CloudFormation simply raise an exception, the exception message will be sent to
    CloudFormation Events.
    """
    physical_resource_id = event["PhysicalResourceId"]
    response_data = {}
    return physical_resource_id, response_data


def delete(event, context):
    client = boto3.client("emr", region_name=event["ResourceProperties"]["StackRegion"])

    deleteresponse = client.terminate_job_flows(
        JobFlowIds=[
            event["PhysicalResourceId"]
        ]
    )

    response = client.describe_cluster(
        ClusterId=event["PhysicalResourceId"]
    )
    status = response["Cluster"]["Status"]["State"]

    response_data = {
        "ClusterStatus": status
    }

    return response_data


def handler(event, context):
    """
    Main handler function, passes off it's work to crhelper's cfn_handler
    """
    # update the logger with event info
    global logger
    logger = crhelper.log_config(event)
    return crhelper.cfn_handler(event, context, create, update, delete, logger,
                                init_failed)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



aws_emr_blog_v3/code/launch-cluster/cremr.py [403:523]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            }
        })

        # if isPrestoAppRequested:
        #     if event["ResourceProperties"]["UseAWSGlueForHiveMetastore"] == "false":
        #         cluster_parameters['BootstrapActions'].append(
        #             {
        #                 "Name": "Setup Presto Kerberos",
        #                 "ScriptBootstrapAction": {
        #                     "Path": "s3://" + event["ResourceProperties"]["S3ArtifactBucket"] + "/" + event["ResourceProperties"][
        #                         "S3ArtifactKey"] + "/" + event["ResourceProperties"][
        #                                 "ProjectVersion"] + "/scripts/configure_presto_kerberos_ba.sh",
        #                     "Args": [
        #                         "s3://" + event["ResourceProperties"]["S3ArtifactBucket"] + "/" + event["ResourceProperties"][
        #                             "S3ArtifactKey"] + "/" + event["ResourceProperties"][
        #                             "ProjectVersion"],
        #                         event["ResourceProperties"]["KdcAdminPassword"]
        #                     ]
        #                 }
        #             })
        #     if event["ResourceProperties"]["UseAWSGlueForHiveMetastore"] == "true":
        #         if prestoEngineRequested == "PrestoSQL":
        #             cluster_parameters['Configurations'].append(
        #                 {
        #                     "Classification": "prestosql-connector-hive",
        #                     "Properties": {
        #                         "hive.metastore": "glue"
        #                     }
        #                 });
        #         else:
        #             cluster_parameters['Configurations'].append(
        #                 {
        #                     "Classification": "presto-connector-hive",
        #                     "Properties": {
        #                         "hive.metastore": "glue"
        #                     }
        #                 });
        # if isSparkAppRequested and event["ResourceProperties"]["UseAWSGlueForHiveMetastore"] == "true":
        #     cluster_parameters['Configurations'].append(
        #         {
        #             "Classification": "spark-hive-site",
        #             "Properties": {
        #                 "hive.server2.enable.doAs": "true",
        #                 "hive.metastore.client.factory.class": "com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory"
        #             }
        #         });

        # if isPrestoAppRequested and event["ResourceProperties"]["InstallPrestoPlugin"] == "true":
        #     cluster_parameters['Steps'].append({
        #         "Name": "InstallRangerPrestoPlugin",
        #         "ActionOnFailure": "CONTINUE",
        #         "HadoopJarStep": {
        #             "Jar": scriptRunnerJar,
        #             "Args": [
        #                 "/mnt/tmp/aws-blog-emr-ranger/scripts/emr-steps/install-presto-ranger-plugin.sh",
        #                 event["ResourceProperties"]["RangerHostname"],
        #                 event["ResourceProperties"]["RangerVersion"],
        #                 "s3://" + event["ResourceProperties"]["S3ArtifactBucket"] + "/" + event["ResourceProperties"]["S3ArtifactKey"],
        #                 event["ResourceProperties"][
        #                     "ProjectVersion"],
        #                 event["ResourceProperties"]["emrReleaseLabel"],
        #                 prestoEngineRequested,
        #                 event["ResourceProperties"]["RangerHttpProtocol"],
        #                 event["ResourceProperties"]["InstallCloudWatchAgentForAudit"]
        #             ]
        #         }
        #     })
        cluster_id = client.run_job_flow(**cluster_parameters)

        physical_resource_id = cluster_id["JobFlowId"]
        response_data = {
            "ClusterID": cluster_id["JobFlowId"]
        }
        return physical_resource_id, response_data

    except Exception as E:
        raise


def update(event, context):
    """
    Place your code to handle Update events here

    To return a failure to CloudFormation simply raise an exception, the exception message will be sent to
    CloudFormation Events.
    """
    physical_resource_id = event["PhysicalResourceId"]
    response_data = {}
    return physical_resource_id, response_data


def delete(event, context):
    client = boto3.client("emr", region_name=event["ResourceProperties"]["StackRegion"])

    deleteresponse = client.terminate_job_flows(
        JobFlowIds=[
            event["PhysicalResourceId"]
        ]
    )

    response = client.describe_cluster(
        ClusterId=event["PhysicalResourceId"]
    )
    status = response["Cluster"]["Status"]["State"]

    response_data = {
        "ClusterStatus": status
    }

    return response_data


def handler(event, context):
    """
    Main handler function, passes off it's work to crhelper's cfn_handler
    """
    # update the logger with event info
    global logger
    logger = crhelper.log_config(event)
    return crhelper.cfn_handler(event, context, create, update, delete, logger,
                                init_failed)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



