in cmd/routed-eni-cni-plugin/cni.go [326:487]
func del(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrapper.GRPC, rpcClient rpcwrapper.RPC,
driverClient driver.NetworkAPIs) error {
conf, log, err := LoadNetConf(args.StdinData)
log.Debugf("Prev Result: %v\n", conf.PrevResult)
if err != nil {
return errors.Wrap(err, "del cmd: error loading config from args")
}
log.Infof("Received CNI del request: ContainerID(%s) Netns(%s) IfName(%s) Args(%s) Path(%s) argsStdinData(%s)",
args.ContainerID, args.Netns, args.IfName, args.Args, args.Path, args.StdinData)
var k8sArgs K8sArgs
if err := cniTypes.LoadArgs(args.Args, &k8sArgs); err != nil {
log.Errorf("Failed to load k8s config from args: %v", err)
return errors.Wrap(err, "del cmd: failed to load k8s config from args")
}
// For pods using branch ENI, try to delete using previous result
handled, err := tryDelWithPrevResult(driverClient, conf, k8sArgs, args.IfName, args.Netns, log)
if err != nil {
return errors.Wrap(err, "del cmd: failed to delete with prevResult")
}
if handled {
log.Infof("Handled CNI del request with prevResult: ContainerID(%s) Netns(%s) IfName(%s) PodNamespace(%s) PodName(%s)",
args.ContainerID, args.Netns, args.IfName, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
return nil
}
// notify local IP address manager to free secondary IP
// Set up a connection to the server.
conn, err := grpcClient.Dial(ipamdAddress, grpc.WithInsecure())
if err != nil {
log.Errorf("Failed to connect to backend server for container %s: %v",
args.ContainerID, err)
// When IPAMD is unreachable, try to teardown pod network using previous result. This action prevents rules from leaking while IPAMD is unreachable.
// Note that no error is returned to kubelet as there is no guarantee that kubelet will retry delete, and returning an error would prevent container runtime
// from cleaning up resources. When IPAMD again becomes responsive, it is responsible for reclaiming IP.
if teardownPodNetworkWithPrevResult(driverClient, conf, k8sArgs, args.IfName, log) {
log.Infof("Handled pod teardown using prevResult: ContainerID(%s) Netns(%s) IfName(%s) PodNamespace(%s) PodName(%s)",
args.ContainerID, args.Netns, args.IfName, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
} else {
log.Infof("Could not teardown pod using prevResult: ContainerID(%s) Netns(%s) IfName(%s) PodNamespace(%s) PodName(%s)",
args.ContainerID, args.Netns, args.IfName, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
}
return nil
}
defer conn.Close()
c := rpcClient.NewCNIBackendClient(conn)
r, err := c.DelNetwork(context.Background(), &pb.DelNetworkRequest{
ClientVersion: version,
K8S_POD_NAME: string(k8sArgs.K8S_POD_NAME),
K8S_POD_NAMESPACE: string(k8sArgs.K8S_POD_NAMESPACE),
K8S_POD_INFRA_CONTAINER_ID: string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID),
NetworkName: conf.Name,
ContainerID: args.ContainerID,
IfName: args.IfName,
Reason: "PodDeleted",
})
if err != nil {
if strings.Contains(err.Error(), datastore.ErrUnknownPod.Error()) {
// Plugins should generally complete a DEL action without error even if some resources are missing. For example,
// an IPAM plugin should generally release an IP allocation and return success even if the container network
// namespace no longer exists, unless that network namespace is critical for IPAM management
log.Infof("Container %s not found", args.ContainerID)
return nil
}
log.Errorf("Error received from DelNetwork gRPC call for container %s: %v", args.ContainerID, err)
// DelNetworkRequest may return a connection error, so try to delete using PrevResult whenever an error is returned. As with the case above, do
// not return error to kubelet, as there is no guarantee that delete is retried.
if teardownPodNetworkWithPrevResult(driverClient, conf, k8sArgs, args.IfName, log) {
log.Infof("Handled pod teardown using prevResult: ContainerID(%s) Netns(%s) IfName(%s) PodNamespace(%s) PodName(%s)",
args.ContainerID, args.Netns, args.IfName, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
} else {
log.Infof("Could not teardown pod using prevResult: ContainerID(%s) Netns(%s) IfName(%s) PodNamespace(%s) PodName(%s)",
args.ContainerID, args.Netns, args.IfName, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
}
return nil
}
if !r.Success {
log.Errorf("Failed to process delete request for container %s: Success == false",
args.ContainerID)
return errors.New("del cmd: failed to process delete request")
}
log.Infof("Received del network response from ipamd for pod %s namespace %s sandbox %s: %+v", string(k8sArgs.K8S_POD_NAME),
string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID), r)
var deletedPodIP net.IP
var maskLen int
if r.IPv4Addr != "" {
deletedPodIP = net.ParseIP(r.IPv4Addr)
maskLen = 32
} else if r.IPv6Addr != "" {
deletedPodIP = net.ParseIP(r.IPv6Addr)
maskLen = 128
}
if deletedPodIP != nil {
addr := &net.IPNet{
IP: deletedPodIP,
Mask: net.CIDRMask(maskLen, maskLen),
}
// vlanID != 0 means pod using security group
if r.PodVlanId != 0 {
if isNetnsEmpty(args.Netns) {
log.Infof("Ignoring TeardownPodENI as Netns is empty for SG pod:%s namespace: %s containerID:%s", k8sArgs.K8S_POD_NAME, k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_INFRA_CONTAINER_ID)
return nil
}
err = driverClient.TeardownBranchENIPodNetwork(addr, int(r.PodVlanId), conf.PodSGEnforcingMode, log)
} else {
err = driverClient.TeardownPodNetwork(addr, int(r.DeviceNumber), log)
}
if err != nil {
log.Errorf("Failed on TeardownPodNetwork for container ID %s: %v",
args.ContainerID, err)
return errors.Wrap(err, "del cmd: failed on tear down pod network")
}
} else {
log.Warnf("Container %s did not have a valid IP %s", args.ContainerID, r.IPv4Addr)
}
if r.NetworkPolicyMode == "" {
log.Infof("NETWORK_POLICY_ENFORCING_MODE is not set")
return nil
}
// Set up a connection to the network policy agent
ctx, cancel := context.WithTimeout(context.Background(), npAgentConnTimeout*time.Second) // Set timeout
defer cancel()
npConn, err := grpcClient.DialContext(ctx, npAgentAddress, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock())
if err != nil {
log.Errorf("Failed to connect to network policy agent: %v. Network Policy agent might not be running", err)
return errors.Wrap(err, "del cmd: failed to connect to network policy agent")
}
defer npConn.Close()
//Make a GRPC call for network policy agent
npc := rpcClient.NewNPBackendClient(npConn)
npr, err := npc.DeletePodNp(context.Background(),
&pb.DeleteNpRequest{
K8S_POD_NAME: string(k8sArgs.K8S_POD_NAME),
K8S_POD_NAMESPACE: string(k8sArgs.K8S_POD_NAMESPACE),
})
// NP agent will never return an error if its not able to delete ebpf probes
if err != nil || !npr.Success {
log.Errorf("Failed to delete pod network policy for Pod Name %s and NameSpace %s: GRPC returned - %v Network policy agent returned - %v",
string(k8sArgs.K8S_POD_NAME), string(k8sArgs.K8S_POD_NAMESPACE), err, npr)
}
log.Debugf("Network Policy agent for DeletePodNp returned Success : %v", npr.Success)
return nil
}