in scripts/scaling/aws_openshift_quickstart/scaler.py [0:0]
def scale_inventory_groups(ocp_version='3.7'):
"""
Processes the scaling activities.
- Fires off the ansible playbook if needed.
- Prunes the ansible inventory to remove instances that have scaled down / terminated.
:param ocp_version: Version of the OpenShift Container Platform that's in use. Defaults to 3.7.
"""
InventoryConfig.ip_to_id_map = {v: k for (k, v) in InventoryConfig.id_to_ip_map.iteritems()}
# First, we just make sure that there's *something* to add/remove.
api_state = False
attempts = 0
total_scaled_nodes = []
log.info("Verifying that the API reflects the scaling events properly")
while api_state is False:
for group in ClusterGroups.groups:
total_scaled_nodes += group.scale_in_progress_instances['terminate']
total_scaled_nodes += group.scale_in_progress_instances['launch']
if attempts > 12:
log.info("No scaling events were populated. 2 minute timer expired. Moving on...")
break
if len(total_scaled_nodes) == 0:
time.sleep(10)
ClusterGroups.setup(ocp_version)
attempts += 1
else:
log.info("Great! The API contains scaling events that we need to process!")
api_state = True
_is = InventoryScaling
scaleup_needed = False
scaledown_needed = False
for group in ClusterGroups.groups:
if (not group.scale_override) and (not group.scaling_events):
continue
# Here we add the instance IDs to the termination and launchlist.
_is.nodes_to_remove[group.logical_name] += group.scale_in_progress_instances['terminate']
_is.nodes_to_add[group.logical_name] += group.scale_in_progress_instances['launch']
# duplicate this to the combined list.
_is.nodes_to_add['combined'] += _is.nodes_to_add[group.logical_name]
_is.nodes_to_remove['combined'] += _is.nodes_to_remove[group.logical_name]
if _is.nodes_to_remove['combined']:
scaledown_needed = True
if _is.nodes_to_add['combined']:
scaleup_needed = True
# We wait for the API to populate with the new instance IDs.
_is.wait_for_api()
# Now we convert the IDs in each list to IP Addresses.
for e in _is.nodes_to_add.keys():
_templist = []
for instance_id in _is.nodes_to_add[e]:
_templist.append(InventoryConfig.id_to_ip_map[instance_id])
_is.nodes_to_add[e] = _templist
for e in _is.nodes_to_remove.keys():
_templist = []
for instance_id in _is.nodes_to_remove[e]:
try:
_templist.append(InventoryConfig.known_instances[instance_id])
except KeyError:
continue
_is.nodes_to_remove[e] = _templist
scaledown_extra_args = {
'etcdremove': _is.nodes_to_remove['etcd'],
'noderemove': _is.nodes_to_remove['nodes'],
'masterremove': _is.nodes_to_remove['masters']
}
scaleup_extra_args = {
'etcdadd': _is.nodes_to_add['etcd'],
'nodeadd': _is.nodes_to_add['nodes'],
'masteradd': _is.nodes_to_add['masters']
}
log.debug("scaleup_needed: %s" % scaleup_needed)
log.debug("scaledown_needed: %s" % scaledown_needed)
log.debug("scaleup_extra_args: %s" % scaleup_extra_args)
log.debug("scaledown_extra_args: %s" % scaledown_extra_args)
if scaledown_needed:
# Housekeeping.
# TODO: YAML Config
if _is.nodes_to_remove['masters']:
_is.nodes_to_remove['nodes'] += _is.nodes_to_remove['masters']
log.info("Performing pre-scaledown tasks.")
run_ansible_playbook(category='pre_scaledown_tasks',
playbook=InventoryConfig.pre_scaledown_playbook,
extra_args=scaledown_extra_args)
if scaleup_needed:
# Housekeeping.
# TODO: YAML Config
if _is.nodes_to_add['masters']:
_is.nodes_to_add['nodes'] += _is.nodes_to_add['masters']
log.info("We've detected that we need to run ansible playbooks to scale up the cluster!")
log.info("Performing pre-scaleup tasks.")
run_ansible_playbook(category='pre_scaleup_tasks',
playbook=InventoryConfig.pre_scaleup_playbook,
extra_args=scaleup_extra_args)
_is.process_pipeline()
InventoryConfig.write_ansible_inventory_file()
# See note above about new_masters/new_nodes; This weeds those out.
# Housekeeping.
# TODO: YAML Config
_n = _is.nodes_to_add['masters']
_m = _is.nodes_to_add['nodes']
for host in _m:
if host in _n:
del _is.nodes_to_add['nodes'][_n.index(host)]
ansible_commands = {}
for category in InventoryConfig.inventory_node_skel.keys():
if category is 'provision':
continue
if category in ['etcd', 'glusterfs']:
_is_cat_name = category
else:
_is_cat_name = "{}{}".format(category, 's')
# categories are plural in the nodes_to_add dict, singular in everything else.
if len(_is.nodes_to_add[_is_cat_name]) == 0:
continue
provisioning_category = InventoryConfig.inventory_categories['provision'][0]
svars = {"target": provisioning_category, "scaling_category": category}
if ocp_version != '3.7':
svars['scale_prefix'] = '/usr/share/ansible/openshift-ansible/playbooks'
_extra_vars = '{}"{}"'.format('--extra-vars=', str(svars))
_ansible_cmd = "{} {} {}".format(
"ansible-playbook",
InventoryConfig.ansible_playbook_wrapper,
_extra_vars
)
log.info("We will run the following ansible command:")
log.info(_ansible_cmd)
ansible_commands[_is_cat_name] = _ansible_cmd
if ansible_commands:
run_ansible_playbook(prepared_commands=ansible_commands)
InventoryConfig.write_ansible_inventory_file()
_is.summarize_playbook_results()
InventoryConfig.write_ansible_inventory_file()
if scaleup_needed:
log.info("Performing post-scaleup tasks.")
run_ansible_playbook(category='post_scaleup_tasks',
playbook=InventoryConfig.post_scaleup_playbook)
if scaledown_needed:
log.info("Performing post-scaledown tasks.")
run_ansible_playbook(category='post_scaledown_tasks',
playbook=InventoryConfig.post_scaledown_playbook)