in eksupdate/starter.py [0:0]
def actual_update(cluster_name,asg_iter,to_update,regionName,max_retry,forced):
presentversion="1.1 eks update"
instance_type,image_to_search=get_ami_name(cluster_name,asg_iter,presentversion,regionName)
print("The Image Type Detected = ",instance_type)
if instance_type=="NAN":
return False
if isinstance(image_to_search,str) and "Windows_Server" in image_to_search:
image_to_search=image_to_search[:46]
latest_ami = get_latestami(to_update,instance_type,image_to_search,regionName)
print("The Latest AMI Recommended = {image}".format(image=latest_ami))
logs_pusher(regionName=regionName,cluster_name=cluster_name,msg="The Latest AMI Image = {image}".format(image=latest_ami))
if get_outdated_Asg(asg_iter, latest_ami,regionName):
addAutoScaling(asg_iter, latest_ami,regionName)
print("New Launch Configuration Added to = {ast} With EKS AMI = {ami}".format(ast=asg_iter,ami=latest_ami))
outdated_instances = outdated_lt(asg_iter,regionName)
if len(outdated_instances) == 0:
return True
try:
terminated_ids = []
logs_pusher(regionName=regionName,cluster_name=cluster_name,msg="The Outdate Instance Found Are = {instan}".format(instan=outdated_instances))
for instance in outdated_instances:
befor_count = get_num_of_instances(
asg_iter, terminated_ids,regionName)
logs_pusher(regionName=regionName,cluster_name=cluster_name,msg="Total Instance count = {count}".format(count=befor_count))
add_time = datetime.datetime.now(datetime.timezone.utc)
if abs(befor_count-len(outdated_instances)) != len(outdated_instances):
add_node(asg_iter,regionName)
time.sleep(45)
latest_instance = get_latest_instance(
asg_name=asg_iter, add_time=add_time,regionName=regionName)
logs_pusher(regionName=regionName,cluster_name=cluster_name,msg="The Instance Created = {instan}".format(instan=latest_instance))
print(latest_instance,
"is Created and waiting for it to be ready")
time.sleep(30)
wait_for_ready(latest_instance,regionName)
old_pod_id = find_node(
cluster_name=cluster_name, instance_id=instance,op="find",regionName=regionName)
if old_pod_id!="NAN":
retry=0
flag=0
while retry<=max_retry:
if not find_node(cluster_name=cluster_name, instance_id=instance,op="find",regionName=regionName) =="NAN":
flag=1
retry+=1
time.sleep(10)
if flag==0:
worker_terminate(instance,regionName=regionName)
raise Exception("404 instance is not corresponded to particular node group")
print("Unshceduling The worker Node ={wn} ".format(wn=old_pod_id))
unschedule_old_nodes(
ClusterName=cluster_name, Nodename=old_pod_id,regionName=regionName)
logs_pusher(regionName=regionName,cluster_name=cluster_name,msg="The Node is Unscheduled = {instan}".format(instan=old_pod_id))
print("Worker Node Drained = {instan}".format(instan=old_pod_id))
drain_nodes(cluster_name=cluster_name, Nodename=old_pod_id,forced=forced,regionName=regionName)
logs_pusher(regionName=regionName,cluster_name=cluster_name,msg="The Worker Node is Drained = {instan}".format(instan=old_pod_id))
print("Deleting worker Node Started ={op} ".format(op=old_pod_id))
delete_node(cluster_name=cluster_name, NodeName=old_pod_id,regionName=regionName)
logs_pusher(regionName=regionName,cluster_name=cluster_name,msg="The Worker Node is Deleted = {instan}".format(instan=old_pod_id))
print("Terminating Worker Node {wn}".format(wn=instance))
worker_terminate(instance,regionName=regionName)
terminated_ids.append(instance)
logs_pusher(regionName=regionName,cluster_name=cluster_name,msg="The Worker Node instance is Terminated = {instan}".format(instan=instance))
return True
except Exception as e:
raise (e)