def deploy_dms()

in static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/DMSLambda/deploy_dms_lambda.py [0:0]


def deploy_dms(event):
    logger.debug("Running function deploy_dms")
    try:
        dms_deploy_region = event['secondary_region_name']
        source_db_region = event['region_name']
        cfn_s3_source_region = event['cfn_region']
        bucket = event['cfn_bucket']
        key_prefix = event['folder']
    except Exception:
        dms_deploy_region = AWS_REGION_DMS_DEPLOY
        source_db_region = os.environ.get('AWS_REGION', AWS_REGION_SOURCE_DB)
        cfn_s3_source_region = os.environ.get('AWS_REGION', AWS_REGION_CFN_TEMPLATE)
        bucket = "aws-well-architected-labs-ohio",
        key_prefix = "Reliability/"
    # Create CloudFormation client
    client = boto3.client('cloudformation', dms_deploy_region)

    # Get the outputs of the VPC stack
    vpc_stack = event['vpc']['stackname']
    try:
        stack_response = client.describe_stacks(StackName=vpc_stack)
        stack_list = stack_response['Stacks']
        if (len(stack_list) < 1):
            logger.debug("Cannot find stack named " + vpc_stack + ", so cannot parse outputs as inputs")
            sys.exit(1)
    except Exception:
        logger.debug("Cannot find stack named " + vpc_stack + ", so cannot parse outputs as inputs")
        sys.exit(1)
    vpc_outputs = stack_list[0]['Outputs']

    # Create the list of subnets to pass
    private_subnets = find_in_outputs(vpc_outputs, 'PrivateSubnets')
    subnet_list = private_subnets.split(',')
    if (len(subnet_list) < 2):
        logger.debug("Cannot find enough subnets in " + vpc_stack + ", so cannot deploy Multi-AZ")
        sys.exit(1)
    dms_subnet_list = subnet_list[0] + ',' + subnet_list[1]

    # Create the list of security groups to pass
    dms_sg = find_in_outputs(vpc_outputs, 'WebSecurityGroup')

    # Find the source DB endpoint and server name
    rds_replica_stack = event['rr']['stackname']
    try:
        stack_response = client.describe_stacks(StackName=rds_replica_stack)
        stack_list = stack_response['Stacks']
        if (len(stack_list) < 1):
            logger.debug("Cannot find stack named " + rds_replica_stack + ", so cannot parse outputs as inputs")
            sys.exit(1)
    except Exception:
        logger.debug("Cannot find stack named " + rds_replica_stack + ", so cannot parse outputs as inputs")
        sys.exit(1)
    rds_replica_outputs = stack_list[0]['Outputs']
    source_db_address = find_in_outputs(rds_replica_outputs, 'DBAddress')
    address_parsed = source_db_address.split('.')
    if (len(address_parsed) < 1):
        logger.debug("Cannot get the read replica (source) server from " + source_db_address)
        sys.exit(1)

    # Find the dest DB id
    rds_stack = event['rds']['stackname']
    try:
        stack_response = client.describe_stacks(StackName=rds_stack)
        stack_list = stack_response['Stacks']
        if (len(stack_list) < 1):
            logger.debug("Cannot find stack named " + rds_stack + ", so cannot parse outputs as inputs")
            sys.exit(1)
    except Exception:
        logger.debug("Cannot find stack named " + rds_stack + ", so cannot parse outputs as inputs")
        sys.exit(1)
    rds_outputs = stack_list[0]['Outputs']
    dest_db_address = find_in_outputs(rds_outputs, 'DBAddress')
    address_parsed = dest_db_address.split('.')
    if (len(address_parsed) < 1):
        logger.debug("Cannot get the destination RDS server from " + dest_db_address)
        sys.exit(1)

    # Get workshop name
    try:
        workshop_name = event['workshop']
    except Exception:
        logger.debug("Unexpected error! (when parsing workshop name)\n Stack Trace:", traceback.format_exc())
        workshop_name = 'UnknownWorkshop'

    # Get password for source database
    # Password for Read Replica in region2 is same as that for Primary in region 1
    source_db_password = get_db_password(source_db_region, workshop_name)

    # Get password for destination database
    # (would prefer to use dynamic parameter patterns directly from CloudFormation
    # but this is not supported for AWS::DMS::Endpoint 
    # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html)
    dest_db_password = get_db_password(dms_deploy_region, workshop_name)


    # Get DB instance type only if it was specified (it is optional)
    try:
        if 'db_instance_class' in event:
          db_instance_class = event['db_instance_class']
        else:
          db_instance_class = None
    except Exception:
        logger.debug("Unexpected error! (when parsing DB instance class)\n Stack Trace:", traceback.format_exc())
        db_instance_class = None

    # Prepare the stack parameters
    dms_parameters = []
    dms_parameters.append({'ParameterKey': 'SourceDatabaseServer', 'ParameterValue': source_db_address, 'UsePreviousValue': True})
    dms_parameters.append({'ParameterKey': 'DestDatabaseServer', 'ParameterValue': dest_db_address, 'UsePreviousValue': True})
    dms_parameters.append({'ParameterKey': 'DatabaseName', 'ParameterValue': 'iptracker', 'UsePreviousValue': True})
    dms_parameters.append({'ParameterKey': 'MigrationSubnetIds', 'ParameterValue': dms_subnet_list, 'UsePreviousValue': True})
    dms_parameters.append({'ParameterKey': 'MigrationSecurityGroups', 'ParameterValue': dms_sg, 'UsePreviousValue': True})
    dms_parameters.append({'ParameterKey': 'SourceDBUser', 'ParameterValue': 'admin', 'UsePreviousValue': True})
    dms_parameters.append({'ParameterKey': 'DestDBPassword', 'ParameterValue': dest_db_password, 'UsePreviousValue': True})
    dms_parameters.append({'ParameterKey': 'SourceDBPassword', 'ParameterValue': source_db_password, 'UsePreviousValue': True})
    dms_parameters.append({'ParameterKey': 'DestDBUser', 'ParameterValue': 'admin', 'UsePreviousValue': True})
    dms_parameters.append({'ParameterKey': 'WorkshopName', 'ParameterValue': workshop_name, 'UsePreviousValue': True})
    # If DB instance class supplied then use it, otherwise CloudFormation template will use Parameter default
    if (db_instance_class is not None):
      dms_parameters.append({'ParameterKey': 'MigrationInstanceClass', 'ParameterValue': db_instance_class, 'UsePreviousValue': True})

    stack_tags = []
    stack_tags.append({'Key': 'Workshop', 'Value': 'AWSWellArchitectedReliability' + workshop_name})
    capabilities = []
    capabilities.append('CAPABILITY_NAMED_IAM')
    dms_template_s3_url = "https://s3." + cfn_s3_source_region + ".amazonaws.com/" + bucket + "/" + key_prefix + "dms.json"
    client.create_stack(
        StackName=stackname,
        TemplateURL=dms_template_s3_url,
        Parameters=dms_parameters,
        DisableRollback=False,
        TimeoutInMinutes=30,
        Capabilities=capabilities,
        Tags=stack_tags
    )
    return_dict = {'stackname': stackname}
    return return_dict