in redshift_monitoring.py [0:0]
def monitor_cluster(config_sources):
aws_region = get_config_value(['AWS_REGION'], config_sources)
set_debug = get_config_value(['DEBUG', 'debug', ], config_sources)
if set_debug is not None and ((isinstance(set_debug,bool) and set_debug) or set_debug.upper() == 'TRUE'):
global debug
debug = True
kms = boto3.client('kms', region_name=aws_region)
cw = boto3.client('cloudwatch', region_name=aws_region)
if debug:
print("Connected to AWS KMS & CloudWatch in %s" % aws_region)
user = get_config_value(['DbUser', 'db_user', 'dbUser'], config_sources)
host = get_config_value(['HostName', 'cluster_endpoint', 'dbHost', 'db_host'], config_sources)
port = int(get_config_value(['HostPort', 'db_port', 'dbPort'], config_sources))
database = get_config_value(['DatabaseName', 'db_name', 'db'], config_sources)
cluster = get_config_value(['ClusterName', 'cluster_name', 'clusterName'], config_sources)
global interval
interval = get_config_value(['AggregationInterval', 'agg_interval', 'aggregtionInterval'], config_sources)
pwd = None
try:
pwd = pgpasslib.getpass(host, port, database, user)
except pgpasslib.FileNotFound as e:
pass
# check if unencrypted password exists if no pgpasslib
if pwd is None:
pwd = get_config_value(['db_pwd'], config_sources)
# check for encrypted password if the above two don't exist
if pwd is None:
enc_password = get_config_value(['EncryptedPassword', 'encrypted_password', 'encrypted_pwd', 'dbPassword'],
config_sources)
# resolve the authorisation context, if there is one, and decrypt the password
auth_context = get_config_value('kms_auth_context', config_sources)
if auth_context is not None:
auth_context = json.loads(auth_context)
try:
if auth_context is None:
pwd = kms.decrypt(CiphertextBlob=base64.b64decode(enc_password))[
'Plaintext']
else:
pwd = kms.decrypt(CiphertextBlob=base64.b64decode(enc_password), EncryptionContext=auth_context)[
'Plaintext']
except:
print('KMS access failed: exception %s' % sys.exc_info()[1])
print('Encrypted Password: %s' % enc_password)
print('Encryption Context %s' % auth_context)
raise
# Connect to the cluster
try:
if debug:
print('Connecting to Redshift: %s' % host)
conn = pg8000.connect(database=database, user=user, password=pwd, host=host, port=port, ssl=ssl)
conn.autocommit = True
except:
print('Redshift Connection Failed: exception %s' % sys.exc_info()[1])
raise
if debug:
print('Successfully Connected to Cluster')
# create a new cursor for methods to run through
cursor = conn.cursor()
# set application name
set_name = "set application_name to 'RedshiftAdvancedMonitoring-v%s'" % __version__
if debug:
print(set_name)
cursor.execute(set_name)
# collect table statistics
put_metrics = gather_table_stats(cursor, cluster)
# collect service class statistics
put_metrics.extend(gather_service_class_stats(cursor, cluster))
# run the externally configured commands and append their values onto the put metrics
put_metrics.extend(run_external_commands('Redshift Diagnostic', 'monitoring-queries.json', cursor, cluster))
# run the supplied user commands and append their values onto the put metrics
put_metrics.extend(run_external_commands('User Configured', 'user-queries.json', cursor, cluster))
# add a metric for how many metrics we're exporting (whoa inception)
put_metrics.extend([{
'MetricName': 'CloudwatchMetricsExported',
'Dimensions': [
{'Name': 'ClusterIdentifier', 'Value': cluster}
],
'Timestamp': datetime.datetime.utcnow(),
'Value': len(put_metrics),
'Unit': 'Count'
}])
max_metrics = 20
group = 0
print("Publishing %s CloudWatch Metrics" % (len(put_metrics)))
for x in range(0, len(put_metrics), max_metrics):
group += 1
# slice the metrics into blocks of 20 or just the remaining metrics
put = put_metrics[x:(x + max_metrics)]
if debug:
print("Metrics group %s: %s Datapoints" % (group, len(put)))
print(put)
try:
cw.put_metric_data(
Namespace='Redshift',
MetricData=put
)
except:
print('Pushing metrics to CloudWatch failed: exception %s' % sys.exc_info()[1])
raise
cursor.close()
conn.close()