in pkg/rpc/rpc_handler.go [53:132]
func (s *server) EnforceNpToPod(ctx context.Context, in *rpc.EnforceNpRequest) (*rpc.EnforceNpReply, error) {
if s.policyReconciler == nil || s.policyReconciler.GeteBPFClient() == nil {
s.log.Info("Network policy is disabled, returning success")
success := rpc.EnforceNpReply{
Success: true,
}
return &success, nil
}
s.log.Info("Received Enforce Network Policy Request for Pod", "Name", in.K8S_POD_NAME, "Namespace", in.K8S_POD_NAMESPACE, "Mode", in.NETWORK_POLICY_MODE)
var err error
if !utils.IsValidNetworkPolicyEnforcingMode(in.NETWORK_POLICY_MODE) {
err = errors.New("Invalid Network Policy Mode")
s.log.Error(err, "Network Policy Mode validation failed ", in.NETWORK_POLICY_MODE)
return nil, err
}
s.policyReconciler.SetNetworkPolicyMode(in.NETWORK_POLICY_MODE)
podIdentifier := utils.GetPodIdentifier(in.K8S_POD_NAME, in.K8S_POD_NAMESPACE, s.log)
isFirstPodInPodIdentifier := s.policyReconciler.GeteBPFClient().IsFirstPodInPodIdentifier(podIdentifier)
err = s.policyReconciler.GeteBPFClient().AttacheBPFProbes(types.NamespacedName{Name: in.K8S_POD_NAME, Namespace: in.K8S_POD_NAMESPACE},
podIdentifier)
if err != nil {
s.log.Error(err, "Attaching eBPF probe failed for", "pod", in.K8S_POD_NAME, "namespace", in.K8S_POD_NAMESPACE)
return nil, err
}
// We attempt to program eBPF firewall map entries for this pod, if the local agent is aware of the policies
// configured against it. For example, if this is a new replica of an existing pod/deployment then the local
// node agent will have the policy information available to it. If not, we will leave the pod in default allow
// or default deny state based on NP mode until the Network Policy controller reconciles existing policies
// against this pod.
// Check if there are active policies against the new pod and if there are other pods on the local node that share
// the eBPF firewall maps with the newly launched pod, if already present we can skip the map update and return
policiesAvailableInLocalCache := s.policyReconciler.ArePoliciesAvailableInLocalCache(podIdentifier)
if policiesAvailableInLocalCache && isFirstPodInPodIdentifier {
// If we're here, then the local agent knows the list of active policies that apply to this pod and
// this is the first pod of it's type to land on the local node/cluster
s.log.Info("Active policies present against this pod and this is a new Pod to the local node, configuring firewall rules....")
//Derive Ingress and Egress Firewall Rules and Update the relevant eBPF maps
ingressRules, egressRules, _ :=
s.policyReconciler.DeriveFireWallRulesPerPodIdentifier(podIdentifier, in.K8S_POD_NAMESPACE)
err = s.policyReconciler.GeteBPFClient().UpdateEbpfMaps(podIdentifier, ingressRules, egressRules)
if err != nil {
s.log.Error(err, "Map update(s) failed for, ", "podIdentifier ", podIdentifier)
return nil, err
}
} else {
// If no active policies present against this pod identifier, set pod_state to default_allow or default_deny
if !policiesAvailableInLocalCache {
s.log.Info("No active policies present for ", "podIdentifier: ", podIdentifier)
if utils.IsStrictMode(in.NETWORK_POLICY_MODE) {
s.log.Info("Updating pod_state map to default_deny for ", "podIdentifier: ", podIdentifier)
err = s.policyReconciler.GeteBPFClient().UpdatePodStateEbpfMaps(podIdentifier, DEFAULT_DENY, true, true)
if err != nil {
s.log.Error(err, "Map update(s) failed for, ", "podIdentifier ", podIdentifier)
return nil, err
}
} else {
s.log.Info("Updating pod_state map to default_allow for ", "podIdentifier: ", podIdentifier)
err = s.policyReconciler.GeteBPFClient().UpdatePodStateEbpfMaps(podIdentifier, DEFAULT_ALLOW, true, true)
if err != nil {
s.log.Error(err, "Map update(s) failed for, ", "podIdentifier ", podIdentifier)
return nil, err
}
}
} else {
s.log.Info("Pod shares the eBPF firewall maps with other local pods. No Map update required..")
}
}
resp := rpc.EnforceNpReply{
Success: err == nil,
}
return &resp, nil
}