in static/Cost/300_Optimization_Data_Collection/Code/source/ecs/ecs.py [0:0]
def lambda_handler(event, context):
bucket = os.environ[
"BUCKET_NAME"
] # Using enviroment varibles below the lambda will use your S3 bucket
DestinationPrefix = os.environ["PREFIX"]
try:
for record in event['Records']:
body = json.loads(record["body"])
account_id = body["account_id"]
print(account_id)
list_region = lits_regions()
with open(
"/tmp/data.json", "w"
) as f: # Saving in the temporay folder in the lambda
for region in list_region:
client = assume_role(account_id, "ecs", region)
paginator = client.get_paginator(
"list_clusters"
) # Paginator for a large list of accounts
response_iterator = paginator.paginate()
try:
for response in response_iterator: # extracts the needed info
for cluster in response["clusterArns"]:
listservices = client.list_services(
cluster=cluster.split("/")[1], maxResults=100
)
for i in listservices["serviceArns"]:
# print (i)
services = client.describe_services(
cluster=cluster.split("/")[1],
services=[
i.split("/")[2],
],
include=[
"TAGS",
],
)
for service in services["services"]:
data = {
"cluster": cluster.split("/")[1],
"service": service.get("serviceName"),
"servicesARN": i, #.split("/")[2]
"tags": service.get("tags"),
"account_id":account_id
}
print(data)
jsondata = json.dumps(
data
) # converts datetime to be able to placed in json
f.write(jsondata)
f.write("\n")
except Exception as e:
print(e)
pass
print("respose gathered")
today = date.today()
year = today.year
month = today.month
client = boto3.client("s3")
client.upload_file(
"/tmp/data.json",
bucket,
f"{DestinationPrefix}-data/year={year}/month={month}/{DestinationPrefix}-{account_id}.json",
) # uploading the file with the data to s3
print(f"Data in s3 - {DestinationPrefix}-data/year={year}/month={month}")
start_crawler()
except Exception as e:
print(e)
logging.warning(f"{e}" )