in src/SystemTablePersistence/snapshot_system_stats.py [0:0]
def snapshot(config_sources):
aws_region = get_config_value(['AWS_REGION'], config_sources)
set_debug = get_config_value(['DEBUG', 'debug', ], config_sources)
if set_debug is not None and (set_debug or set_debug.upper() == 'TRUE'):
global debug
debug = True
kms = boto3.client('kms', region_name=aws_region)
if debug:
print("Connected to AWS KMS & CloudWatch in %s" % aws_region)
user = get_config_value(['DbUser', 'db_user', 'dbUser'], config_sources)
host = get_config_value(['HostName', 'cluster_endpoint', 'dbHost', 'db_host'], config_sources)
port = int(get_config_value(['HostPort', 'db_port', 'dbPort'], config_sources))
database = get_config_value(['DatabaseName', 'db_name', 'db'], config_sources)
cluster_name = get_config_value([config_constants.CLUSTER_NAME], config_sources)
unload_s3_location = get_config_value([config_constants.S3_UNLOAD_LOCATION], config_sources)
unload_role_arn = get_config_value([config_constants.S3_UNLOAD_ROLE_ARN], config_sources)
if unload_s3_location is not None and unload_role_arn is None:
raise Exception("If you configure S3 unload then you must also provide the UnloadRoleARN")
# we may have been passed the password in the configuration, so extract it if we can
pwd = get_config_value(['db_pwd'], config_sources)
# override the password with the contents of .pgpass or environment variables
try:
pg_pwd = pgpasslib.getpass(host, port, database, user)
if pg_pwd:
pwd = pg_pwd
except pgpasslib.FileNotFound as e:
pass
if pwd is None:
enc_password = get_config_value(['EncryptedPassword', 'encrypted_password', 'encrypted_pwd', 'dbPassword'],
config_sources)
# resolve the authorisation context, if there is one, and decrypt the password
auth_context = get_config_value('kms_auth_context', config_sources)
if auth_context is not None:
auth_context = json.loads(auth_context)
try:
if auth_context is None:
pwd = kms.decrypt(CiphertextBlob=base64.b64decode(enc_password))[
'Plaintext']
else:
pwd = kms.decrypt(CiphertextBlob=base64.b64decode(enc_password), EncryptionContext=auth_context)[
'Plaintext']
except:
print('KMS access failed: exception %s' % sys.exc_info()[1])
print('Encrypted Password: %s' % enc_password)
print('Encryption Context %s' % auth_context)
raise
# Connect to the cluster
try:
if debug:
print('Connecting to Redshift: %s' % host)
conn = pg8000.connect(database=database, user=user, password=pwd, host=host, port=port, ssl=ssl)
conn.autocommit = True
except:
print('Redshift Connection Failed: exception %s' % sys.exc_info()[1])
raise
if debug:
print('Successfully Connected to Cluster')
# create a new cursor for methods to run through
cursor = conn.cursor()
# set application name
set_name = "set application_name to 'RedshiftSystemTablePersistence-v%s'" % __version__
if debug:
print(set_name)
cursor.execute(set_name)
# load the table configuration
table_config = json.load(open(os.path.dirname(__file__) + '/lib/history_table_config.json', 'r'))
# create the dependent objects if we need to
create_schema_objects(cursor, conn)
# snapshot stats into history tables
insert_rowcounts = snapshot_system_tables(cursor, conn, table_config)
# export the data to s3 if configured
try:
if unload_s3_location is not None:
unload_stats(cursor, table_config, cluster_name, unload_s3_location, unload_role_arn)
except e:
print("Exception during System Table Detail unload to S3. This will not prevent automated cleanup.");
print(traceback.format_exc())
# cleanup history tables if requested in the configuration
delete_rowcounts = None
cleanup_after_days = get_config_value([config_constants.SYSTABLE_CLEANUP_AFTER_DAYS], config_sources)
if cleanup_after_days is not None:
try:
cleanup_after_days = int(cleanup_after_days)
except ValueError:
print("Configuration value '%s' must be an integer" % config_constants.SYSTABLE_CLEANUP_AFTER_DAYS)
raise
if cleanup_after_days > 0:
delete_rowcounts = cleanup_snapshots(cursor, conn, cleanup_after_days, table_config)
cursor.close()
conn.close()
return {"inserted": insert_rowcounts, "deleted": delete_rowcounts}