in TerraformScripts/sc_terraform_wrapper/__main__.py [0:0]
def run(cleanups, args, request, config, s3, response_poster):
resource_properties = request['ResourceProperties']
resource_group_name = terraform_resource_group.construct_resource_group_name(request)
assume_role_arn = resource_properties['LaunchRoleArn']
session_name = 'TerraformAssumeRoleSession-{}'.format(request['RequestId'])
assume_role_input = AssumeRoleInput(assume_role_arn, args.external_id, session_name)
# The user doesn't have the option to specify a dry-run argument (or any argument)
# for delete requests, so dry-runs are not supported for delete requests.
request_type = request['RequestType']
request_is_dryrun = bool(resource_properties.get('DryRunId')) and request_type != 'Delete'
# Try to retrieve the tags at the beginning. In case getting tags fails, it's
# better to fail before Terraform creates all the resources rather than after.
user_tags = None
stack_arn = Arn(request['StackId'])
if not request_is_dryrun and request_type in ['Create', 'Update']:
user_tags = terraform_tag.retrieve_user_tags_from_cfn(stack_arn, assume_role_input)
user_tags['CfnStackId'] = request['StackId']
user_tags['TfResourceGroupName'] = resource_group_name
# Set up directory of execution
print('Creating workspace')
root_workspace_path = os.path.expanduser(config['root-workspace-path'])
physical_resource_id = request['PhysicalResourceId']
workspace_path = os.path.join(root_workspace_path, physical_resource_id)
os.makedirs(workspace_path)
cleanups.append(('Remove workspace', lambda: shutil.rmtree(workspace_path)))
artifact_url = resource_properties['TerraformArtifactUrl']
artifact_file_local_path = os.path.join(root_workspace_path, physical_resource_id + '-file')
download_artifact(s3, artifact_url, artifact_file_local_path, workspace_path, cleanups)
print('Writing backend configuration to file')
state_file_location = inject_backend_config(workspace_path, config, physical_resource_id)
print('Creating AWS provider override file')
stack_region = stack_arn.region
inject_aws_provider_override(workspace_path, stack_region, assume_role_input)
print('Writing variables to file')
inject_variables(workspace_path, request['ResourceProperties'])
print('Starting Terraform execution')
os.chdir(workspace_path)
os.environ['TF_IN_AUTOMATION'] = 'true'
executor = TerraformExecutor(request_type)
executor.init_workspace()
if request_is_dryrun:
executor.plan()
# Even if dry run has error, mark success so user can try again.
response_poster.post_response_with_expiration_check('SUCCESS')
return
proc = executor.start_request_command()
signal_handler = lambda signum, frame: response_poster.post_timeout_response(proc.pid)
signal.signal(signal.SIGALRM, signal_handler)
# save additional time before the presigned s3 url timeout
signal.alarm(seconds_until_expiry(request['ResponseURL']))
executor.finish_request_command(proc)
state = terraform_state.parse(s3, workspace_path, config['bucket'], physical_resource_id, stack_arn)
arns = state.arns
# Parse the terraform state file and add resource group tags
if request['RequestType'] in ['Create', 'Update']:
print('Tagging resources with tags: ' + str(user_tags))
terraform_tag.tag_resources(arns, user_tags, stack_region, assume_role_input)
if request['RequestType'] in ['Create', 'Update']:
print('Creating resource group if not exist')
terraform_resource_group.create_resource_group_if_not_exist(user_tags, assume_role_input)
if request['RequestType'] == 'Delete':
terraform_resource_group.delete_resource_group(resource_group_name, stack_region, assume_role_input)
response_poster.post_response_with_expiration_check('SUCCESS',
arns=arns,
output_variables=state.outputs,
state_file_location=state_file_location)
signal.alarm(0) # Disable the alarm