in src/lib/scripts/cloudwatch_uploader.py [0:0]
def run(self):
"""Run the cloudwatch uploader. It monitors the log file, uploads the content to cloudwatch.
"""
for log_line_splits in self._monitor_log_file():
try:
# Although this is good for developers to look at logs at one place. I think for production we need
# to comment this print statement. This is logging all the RoboMaker and SageMaker logs in the
# default cloudwatch logs. This increases computation and cost
# print(log_line_splits) # for logging in main sageonly job
cw_batch = [CloudWatchUploader._get_cw_data_format(line) for line in log_line_splits if line]
# Flatten the nested lists of log messages
cw_batch = list(itertools.chain(*cw_batch))
# If cloudwatch batch is empty then skip logging.
# Cloudwatch has a hard limit on number of bytes in the batch and the length on batch size.
if cw_batch and getsizeof(json.dumps(cw_batch)) < MAX_CLOUDWATCH_PUT_LOGS_BATCH_SIZE_BYTES and \
len(cw_batch) < MAX_CLOUDWATCH_LOG_EVENT_BATCH_LENGTH:
self._upload_logs_to_cloudwatch(cw_batch)
else:
start_index, total_bytes = 0, 0
for i, log in enumerate(cw_batch):
log_size = getsizeof(json.dumps(log))
if total_bytes + log_size > MAX_CLOUDWATCH_PUT_LOGS_BATCH_SIZE_BYTES or \
(i - start_index) >= MAX_CLOUDWATCH_LOG_EVENT_BATCH_LENGTH:
self._upload_logs_to_cloudwatch(cw_batch[start_index:i])
start_index = i
total_bytes = 0
total_bytes += log_size
self._upload_logs_to_cloudwatch(cw_batch[start_index:])
except Exception as ex:
# If there is any other exception adding to cloudwatch logs ignore that error and
# continue monitoring the logs. Although the exception reason is logged in the cloudwatch_client.py,
# logging it again incase exception is coming from one of the other functions.
print("Failed to upload logs to cloudwatch: {}".format(ex))
pass