in pkg/node/node.go [123:177]
func (n Node) CordonAndDrain(nodeName string, reason string, recorder recorderInterface) error {
if n.nthConfig.DryRun {
log.Info().Str("node_name", nodeName).Str("reason", reason).Msg("Node would have been cordoned and drained, but dry-run flag was set.")
return nil
}
err := n.MaybeMarkForExclusionFromLoadBalancers(nodeName)
if err != nil {
return err
}
err = n.Cordon(nodeName, reason)
if err != nil {
return err
}
// Be very careful here: in tests, nodeName and node.Name can be different, as
// fetchKubernetesNode does some translation using the kubernetes.io/hostname label
node, err := n.fetchKubernetesNode(nodeName)
if err != nil {
return err
}
var pods *corev1.PodList
// Delete all pods on the node
log.Info().Str("node_name", nodeName).Msg("Draining the node")
// Emit events for all pods that will be evicted
if recorder != nil {
pods, err = n.fetchAllPods(node.Name)
if err == nil {
for _, pod := range pods.Items {
podRef := &corev1.ObjectReference{
Kind: "Pod",
Name: pod.Name,
Namespace: pod.Namespace,
}
annotations := make(map[string]string)
annotations["node"] = nodeName
for k, v := range pod.GetLabels() {
annotations[k] = v
}
recorder.AnnotatedEventf(podRef, annotations, corev1.EventTypeNormal, PodEvictReason, PodEvictMsgFmt, nodeName)
}
}
}
if n.nthConfig.UseAPIServerCacheToListPods {
if pods != nil {
pods = n.FilterOutDaemonSetPods(pods)
err = n.drainHelper.DeleteOrEvictPods(pods.Items)
}
} else {
// RunNodeDrain does an etcd quorum-read to list all pods on this node
err = drain.RunNodeDrain(n.drainHelper, node.Name)
}
if err != nil {
return err
}
return nil
}