in global-clusters-automation/failover_and_convert_to_global.py [0:0]
def get_cluster_details(cluster):
try:
cluster_id = cluster.split(":")[-1]
region = cluster.split(":")[3]
client = session.client('docdb', region_name=region)
response = client.describe_db_clusters(
DBClusterIdentifier=cluster
)
cluster_response = response['DBClusters'][0]
vpc_group_ids = []
for each_item in cluster_response['VpcSecurityGroups']:
vpc_group_ids.append(each_item['VpcSecurityGroupId'])
cluster_details = {"region": region,
"secondary_cluster_id": cluster_id + "-" + dt_string,
# When converting the cluster to global cluster and adding clusters from the prior global
# cluster, we append the timestamp to keep the cluster ID unique. This is needed so that the
# function does not wait for the older clusters to be deleted. Also helps to differentiate
# between clusters created by script.
"number_of_instances": len(cluster_response['DBClusterMembers']),
"subnet_group": cluster_response['DBSubnetGroup'],
"security_group_id": vpc_group_ids,
"kms_key_id": cluster_response['KmsKeyId'],
"backup_retention_period": cluster_response['BackupRetentionPeriod'],
"cluster_parameter_group": cluster_response['DBClusterParameterGroup'],
"preferred_back_up_window": cluster_response['PreferredBackupWindow'],
"preferred_maintenance_window": cluster_response['PreferredMaintenanceWindow'],
"storage_encryption": cluster_response['StorageEncrypted'],
"deletion_protection": cluster_response['DeletionProtection']}
return cluster_details
except ClientError as e:
print('ERROR OCCURRED WHILE PROCESSING: ', e)
print('PROCESSING WILL STOP')
raise ClientError