def deploy_rds()

in static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/RDSLambda/deploy_rds_lambda.py [0:0]


def deploy_rds(event):
    logger.debug("Running function deploy_rds")
    try:
        region = event['region_name']
        cfn_region = event['cfn_region']
        bucket = event['cfn_bucket']
        key_prefix = event['folder']
    except Exception:
        logger.error("Unexpected error!\n Stack Trace:", traceback.format_exc())
        region = os.environ.get('AWS_REGION', AWS_REGION)
        cfn_region = os.environ.get('AWS_REGION', AWS_REGION)
        bucket = "aws-well-architected-labs-ohio",
        key_prefix = "/"
    # Create CloudFormation client
    client = boto3.client('cloudformation', region)

    # Get the outputs of the VPC stack
    vpc_stack = event['vpc']['stackname']
    try:
        stack_response = client.describe_stacks(StackName=vpc_stack)
        stack_list = stack_response['Stacks']
        if (len(stack_list) < 1):
            logger.debug("Cannot find stack named " + vpc_stack + ", so cannot parse outputs as inputs")
            sys.exit(1)
    except Exception:
        logger.error("Unexpected error!\n Stack Trace:", traceback.format_exc())
        logger.error("Cannot find stack named " + vpc_stack + ", so cannot parse outputs as inputs")
        sys.exit(1)
    vpc_outputs = stack_list[0]['Outputs']

    # Create the list of subnets to pass
    private_subnets = find_in_outputs(vpc_outputs, 'PrivateSubnets')
    subnet_list = private_subnets.split(',')
    if (len(subnet_list) < 2):
        logger.debug("Cannot find enough subnets in " + vpc_stack + ", so cannot deploy Multi-AZ")
        sys.exit(1)
    rds_subnet_list = subnet_list[0] + ',' + subnet_list[1]

    # Create the list of security groups to pass
    rds_sg = find_in_outputs(vpc_outputs, 'MySQLSecurityGroup')

    # Get workshop name
    try:
        workshop_name = event['workshop']
    except Exception:
        logger.debug("Unexpected error! (when parsing workshop name)\n Stack Trace:", traceback.format_exc())
        workshop_name = 'UnknownWorkshop'

    # Get DB instance type only if it was specified (it is optional)
    try:
        if 'db_instance_class' in event:
          db_instance_class = event['db_instance_class']
        else:
          db_instance_class = None
    except Exception:
        logger.debug("Unexpected error! (when parsing DB instance class)\n Stack Trace:", traceback.format_exc())
        db_instance_class = None

    # Prepare the stack parameters
    rds_parameters = []
    rds_parameters.append({'ParameterKey': 'DBSubnetIds', 'ParameterValue': rds_subnet_list, 'UsePreviousValue': True})
    rds_parameters.append({'ParameterKey': 'DBSecurityGroups', 'ParameterValue': rds_sg, 'UsePreviousValue': True})
    rds_parameters.append({'ParameterKey': 'DBUser', 'ParameterValue': 'admin', 'UsePreviousValue': True})
    rds_parameters.append({'ParameterKey': 'WorkshopName', 'ParameterValue': workshop_name, 'UsePreviousValue': True})
    # If DB instance class supplied then use it, otherwise CloudFormation template will use Parameter default
    if (db_instance_class is not None):
      rds_parameters.append({'ParameterKey': 'DBInstanceClass', 'ParameterValue': db_instance_class, 'UsePreviousValue': True})
    stack_tags = []

    stack_tags.append({'Key': 'Workshop', 'Value': 'AWSWellArchitectedReliability' + workshop_name})
    rds_template_s3_url = "https://s3." + cfn_region + ".amazonaws.com/" + bucket + "/" + key_prefix + "mySQL_rds.json"
    client.create_stack(
        StackName=stackname,
        TemplateURL=rds_template_s3_url,
        Parameters=rds_parameters,
        DisableRollback=False,
        TimeoutInMinutes=30,
        Tags=stack_tags
    )
    return_dict = {'stackname': stackname}
    return return_dict