in cns/restserver/util.go [141:251]
func (service *HTTPRestService) saveNetworkContainerGoalState(req cns.CreateNetworkContainerRequest) (types.ResponseCode, string) { //nolint // legacy
// we don't want to overwrite what other calls may have written
service.Lock()
defer service.Unlock()
var (
hostVersion string
existingSecondaryIPConfigs map[string]cns.SecondaryIPConfig // uuid is key
vfpUpdateComplete bool
)
if service.state.ContainerStatus == nil {
service.state.ContainerStatus = make(map[string]containerstatus)
}
existingNCStatus, ok := service.state.ContainerStatus[req.NetworkContainerid]
if ok {
hostVersion = existingNCStatus.HostVersion
existingSecondaryIPConfigs = existingNCStatus.CreateNetworkContainerRequest.SecondaryIPConfigs
vfpUpdateComplete = existingNCStatus.VfpUpdateComplete
}
if req.NetworkContainerid == nodesubnet.NodeSubnetNCID {
hostVersion = nodesubnet.NodeSubnetHostVersion
vfpUpdateComplete = true
}
if hostVersion == "" {
// Host version is the NC version from NMAgent, set it -1 to indicate no result from NMAgent yet.
// TODO, query NMAgent and with aggresive time out and assign latest host version.
hostVersion = "-1"
}
// Remove the auth token before saving the containerStatus to cns json file
createNetworkContainerRequest := req
createNetworkContainerRequest.AuthorizationToken = ""
service.state.ContainerStatus[req.NetworkContainerid] = containerstatus{
ID: req.NetworkContainerid,
VMVersion: req.Version,
CreateNetworkContainerRequest: createNetworkContainerRequest,
HostVersion: hostVersion,
VfpUpdateComplete: vfpUpdateComplete,
}
switch req.NetworkContainerType {
case cns.AzureContainerInstance:
fallthrough
case cns.Docker:
fallthrough
case cns.Kubernetes:
fallthrough
case cns.Basic:
fallthrough
case cns.JobObject:
fallthrough
case cns.COW, cns.BackendNICNC, cns.WebApps:
switch service.state.OrchestratorType {
case cns.Kubernetes:
fallthrough
case cns.ServiceFabric:
fallthrough
case cns.Batch:
fallthrough
case cns.DBforPostgreSQL:
fallthrough
case cns.AzureFirstParty:
fallthrough
case cns.WebApps, cns.BackendNICNC: // todo: Is WebApps an OrchastratorType or ContainerType?
podInfo, err := cns.UnmarshalPodInfo(req.OrchestratorContext)
if err != nil {
errBuf := fmt.Sprintf("Unmarshalling %s failed with error %v", req.NetworkContainerType, err)
return types.UnexpectedError, errBuf
}
orchestratorContext := podInfo.Name() + podInfo.Namespace()
if service.state.ContainerIDByOrchestratorContext == nil {
service.state.ContainerIDByOrchestratorContext = make(map[string]*ncList)
}
if _, ok := service.state.ContainerIDByOrchestratorContext[orchestratorContext]; !ok {
service.state.ContainerIDByOrchestratorContext[orchestratorContext] = new(ncList)
}
ncs := service.state.ContainerIDByOrchestratorContext[orchestratorContext]
ncs.Add(req.NetworkContainerid)
logger.Printf("service.state.ContainerIDByOrchestratorContext[%s] is %+v", orchestratorContext, *service.state.ContainerIDByOrchestratorContext[orchestratorContext])
case cns.KubernetesCRD:
// Validate and Update the SecondaryIpConfig state
returnCode, returnMesage := service.updateIPConfigsStateUntransacted(req, existingSecondaryIPConfigs, hostVersion)
if returnCode != 0 {
return returnCode, returnMesage
}
default:
errMsg := fmt.Sprintf("Unsupported orchestrator type: %s", service.state.OrchestratorType)
logger.Errorf(errMsg)
return types.UnsupportedOrchestratorType, errMsg
}
default:
errMsg := fmt.Sprintf("Unsupported network container type %s", req.NetworkContainerType)
logger.Errorf(errMsg)
return types.UnsupportedNetworkContainerType, errMsg
}
service.saveState()
return 0, ""
}