in source/compute_plane/python/lambda/drainer/handler.py [0:0]
def _lambda_handler(env, k8s_config, k8s_client, event):
kube_config_bucket = env['kube_config_bucket']
cluster_name = env['cluster_name']
if not os.path.exists(KUBE_FILEPATH):
if kube_config_bucket:
logger.info('No kubeconfig file found. Downloading...')
s3.download_file(kube_config_bucket, env['kube_config_object'], KUBE_FILEPATH)
else:
logger.info('No kubeconfig file found. Generating...')
create_kube_config(eks, cluster_name)
lifecycle_hook_name = event['detail']['LifecycleHookName']
auto_scaling_group_name = event['detail']['AutoScalingGroupName']
instance_id = event['detail']['EC2InstanceId']
logger.info('Instance ID: ' + instance_id)
instance = ec2.describe_instances(InstanceIds=[instance_id])['Reservations'][0]['Instances'][0]
node_name = instance['PrivateDnsName']
logger.info('Node name: ' + node_name)
# Configure
k8s_config.load_kube_config(KUBE_FILEPATH)
configuration = k8s_client.Configuration()
if not kube_config_bucket:
configuration.api_key['authorization'] = get_bearer_token(cluster_name, region)
configuration.api_key_prefix['authorization'] = 'Bearer'
# API
api = k8s_client.ApiClient(configuration)
v1 = k8s_client.CoreV1Api(api)
try:
if not node_exists(v1, node_name):
logger.error('Node not found.')
abandon_lifecycle_action(asg, auto_scaling_group_name, lifecycle_hook_name, instance_id)
return
cordon_node(v1, node_name)
remove_all_pods(v1, node_name)
print("all pods terminated")
logger.info("all pods terminated")
asg.complete_lifecycle_action(LifecycleHookName=lifecycle_hook_name,
AutoScalingGroupName=auto_scaling_group_name,
LifecycleActionResult='CONTINUE',
InstanceId=instance_id)
print("lifecycling hooks over")
logger.info("lifecycling hooks over")
except ApiException:
logger.exception('There was an error removing the pods from the node {}'.format(node_name))
abandon_lifecycle_action(asg, auto_scaling_group_name, lifecycle_hook_name, instance_id)