func()

in agent/engine/docker_task_engine.go [1790:2147]


func (engine *DockerTaskEngine) createContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
	logger.Info("Creating container", logger.Fields{
		field.TaskID:    task.GetID(),
		field.Container: container.Name,
	})
	client := engine.client
	if container.DockerConfig.Version != nil {
		minVersion := dockerclient.GetSupportedDockerAPIVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
		logger.Debug("CreateContainer: overriding docker API version in task payload", logger.Fields{
			field.TaskID:              task.GetID(),
			field.Container:           container.Name,
			"usingDockerAPIVersion":   minVersion,
			"payloadDockerAPIVersion": *container.DockerConfig.Version,
		})
		// Error in creating versioned client is dealt with later when client.APIVersion() is called
		client, _ = client.WithVersion(minVersion)
	}

	dockerContainerName := ""
	containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
	if !ok {
		containerMap = make(map[string]*apicontainer.DockerContainer)
	} else {
		// looking for container that has docker name but not created
		for _, v := range containerMap {
			if v.Container.Name == container.Name {
				dockerContainerName = v.DockerName
				break
			}
		}
	}

	// Resolve HostConfig
	// we have to do this in create, not start, because docker no longer handles
	// merging create config with start host-config the same; e.g. memory limits get lost
	dockerClientVersion, versionErr := client.APIVersion()
	if versionErr != nil {
		return dockerapi.DockerContainerMetadata{Error: CannotGetDockerClientVersionError{versionErr}}
	}
	hostConfig, hcerr := task.DockerHostConfig(container, containerMap, dockerClientVersion, engine.cfg)
	if hcerr != nil {
		return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(hcerr)}
	}

	// Add Service Connect modifications if needed
	if task.IsServiceConnectEnabled() {
		err := engine.serviceconnectManager.AugmentTaskContainer(
			task, container, hostConfig, engine.cfg.InstanceIPCompatibility)
		if err != nil {
			return dockerapi.DockerContainerMetadata{Error: apierrors.NewNamedError(err)}
		}
	}
	if container.Type == apicontainer.ContainerServiceConnectRelay {
		err := engine.serviceconnectManager.AugmentInstanceContainer(task, container, hostConfig)
		if err != nil {
			return dockerapi.DockerContainerMetadata{Error: apierrors.NewNamedError(err)}
		}
	}

	if container.AWSLogAuthExecutionRole() {
		err := task.ApplyExecutionRoleLogsAuth(hostConfig, engine.credentialsManager)
		if err != nil {
			return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
		}
	}

	firelensConfig := container.GetFirelensConfig()
	if firelensConfig != nil {
		err := task.AddFirelensContainerBindMounts(firelensConfig, hostConfig, engine.cfg)
		if err != nil {
			return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
		}

		cerr := task.PopulateSecretLogOptionsToFirelensContainer(container)
		if cerr != nil {
			return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(cerr)}
		}

		if firelensConfig.Type == firelens.FirelensConfigTypeFluentd {
			// For fluentd router, needs to specify FLUENT_UID to root in order for the fluentd process to access
			// the socket created by Docker.
			container.MergeEnvironmentVariables(map[string]string{
				"FLUENT_UID": "0",
			})
		}
	}

	// If the container is using a special log driver type "awsfirelens", it means the container wants to use
	// the firelens container to send logs. In this case, override the log driver type to be fluentd
	// and specify appropriate tag and fluentd-address, so that the logs are sent to and routed by the firelens container.
	// Update the environment variables FLUENT_HOST and FLUENT_PORT depending on the supported network modes - bridge
	// and awsvpc. For reference - https://docs.docker.com/config/containers/logging/fluentd/.
	if hostConfig.LogConfig.Type == logDriverTypeFirelens {
		// We need the Docker server version in order to generate the appropriate firelens log config
		dockerServerVersion, err := engine.Version()
		if err != nil {
			logger.Error("Failed to determine Docker server version for Firelens log config generation", logger.Fields{
				field.TaskID:    task.GetID(),
				field.Container: container.Name,
				field.Error:     err,
			})
			return dockerapi.DockerContainerMetadata{
				Error: dockerapi.CannotCreateContainerError{FromError: errors.Wrapf(versionErr,
					"failed to create container - container uses awsfirelens log driver and we failed to "+
						"determine the Docker server version for Firelens log config generation")},
			}
		}
		logConfig, err := getFirelensLogConfig(task, container, hostConfig, engine.cfg, dockerServerVersion)
		if err != nil {
			logger.Error("Failed to generate the Firelens log config", logger.Fields{
				field.TaskID:    task.GetID(),
				field.Container: container.Name,
				field.Error:     err,
			})
			return dockerapi.DockerContainerMetadata{
				Error: dockerapi.CannotCreateContainerError{FromError: errors.Wrapf(err,
					"failed to create container - container uses awsfirelens log driver and we failed to "+
						"generate the Firelens log config")},
			}
		}
		hostConfig.LogConfig = logConfig

		if task.IsNetworkModeAWSVPC() {
			container.MergeEnvironmentVariables(map[string]string{
				fluentNetworkHost: FluentAWSVPCHostValue,
				fluentNetworkPort: FluentNetworkPortValue,
			})
		} else if container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode {
			targetContainer := task.GetFirelensContainer()
			// For bridge-mode ServiceConnect-enabled tasks, we inject pause container for each application container
			// including the firelens container. Therefore, when resolving the container IP, we should be checking that
			// of the associated pause container.
			if task.IsServiceConnectEnabled() {
				var err error
				targetContainer, err = task.GetBridgeModePauseContainerForTaskContainer(targetContainer)
				if err != nil {
					logger.Error("Failed to create container", logger.Fields{
						field.TaskID:    task.GetID(),
						field.Container: container.Name,
						field.Error: errors.New(fmt.Sprintf(
							"container uses awsfirelens log driver but we failed to resolve Firelens bridge IP: %v", err)),
					})
					return dockerapi.DockerContainerMetadata{
						Error: dockerapi.CannotCreateContainerError{FromError: errors.New(fmt.Sprintf(
							"failed to create container - container uses awsfirelens log driver but we failed to "+
								"resolve Firelens bridge IP: %v", err))},
					}
				}
			}
			ipAddress, ok := getContainerHostIP(targetContainer.GetNetworkSettings())
			if !ok {
				err := apierrors.DockerClientConfigError{Msg: "unable to get BridgeIP for task in bridge mode"}
				return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(&err)}
			}
			container.MergeEnvironmentVariables(map[string]string{
				fluentNetworkHost: ipAddress,
				fluentNetworkPort: FluentNetworkPortValue,
			})
		}
	}

	// This is a short term solution only for specific regions
	if hostConfig.LogConfig.Type == logDriverTypeAwslogs {
		if engine.cfg.InstanceIPCompatibility.IsIPv6Only() {
			engine.setAWSLogsDualStackEndpoint(task, container, hostConfig)
		} else {
			region := engine.cfg.AWSRegion
			if _, ok := unresolvedIsolatedRegions[region]; ok {
				endpoint := ""
				dnsSuffix := ""
				partition, ok := ep.PartitionForRegion(ep.DefaultPartitions(), region)
				if !ok {
					logger.Warn("No partition resolved for region. Using AWS default", logger.Fields{
						"region":           region,
						"defaultDNSSuffix": ep.AwsPartition().DNSSuffix(),
					})
					dnsSuffix = ep.AwsPartition().DNSSuffix()
				} else {
					resolvedEndpoint, err := partition.EndpointFor("logs", region)
					if err == nil {
						endpoint = resolvedEndpoint.URL
					} else {
						dnsSuffix = partition.DNSSuffix()
					}
				}
				if endpoint == "" {
					endpoint = fmt.Sprintf("https://logs.%s.%s", region, dnsSuffix)
				}
				hostConfig.LogConfig.Config[awsLogsEndpointKey] = endpoint
			}
		}
	}

	// If the container has mode=non-blocking set, and the max-buffer-size field is
	// unset, then default to maxBufferSizeDefault.
	if mode, ok := hostConfig.LogConfig.Config[logDriverMode]; ok {
		_, hasBufferSize := hostConfig.LogConfig.Config[logDriverBufferSize]
		if mode == string(dockercontainer.LogModeNonBlock) && !hasBufferSize {
			hostConfig.LogConfig.Config[logDriverBufferSize] = maxBufferSizeDefault
		}
	}

	//Apply the log driver secret into container's LogConfig and Env secrets to container.Environment
	hasSecretAsEnvOrLogDriver := func(s apicontainer.Secret) bool {
		return s.Type == apicontainer.SecretTypeEnv || s.Target == apicontainer.SecretTargetLogDriver
	}
	if container.HasSecret(hasSecretAsEnvOrLogDriver) {
		err := task.PopulateSecrets(hostConfig, container)

		if err != nil {
			return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
		}
	}

	// Populate credentialspec resource
	if container.RequiresAnyCredentialSpec() {
		logger.Debug("Obtained container with credentialspec resource requirement for task", logger.Fields{
			field.TaskID:    task.GetID(),
			field.Container: container.Name,
		})
		var credSpecResource *credentialspec.CredentialSpecResource
		resource, ok := task.GetCredentialSpecResource()
		if !ok || len(resource) <= 0 {
			resMissingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch task resource credentialspec"}
			return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(resMissingErr)}
		}
		credSpecResource = resource[0].(*credentialspec.CredentialSpecResource)

		containerCredSpec, err := container.GetCredentialSpec()
		if err == nil && containerCredSpec != "" {
			// on windows CredentialSpec mapping: input := credentialspec:file://test.json, output := credentialspec=file://test.json
			// on linux CredentialSpec mapping: input := ssm/asm arn, output := /var/credentials-fetcher/krbdir/123456/ccname_webapp01_xyz
			desiredCredSpecInjection, err := credSpecResource.GetTargetMapping(containerCredSpec)
			if err != nil || desiredCredSpecInjection == "" {
				missingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec mapping"}
				return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(missingErr)}
			}
			engine.updateCredentialSpecMapping(task.GetID(), container.Name, desiredCredSpecInjection, hostConfig)
		} else {
			emptyErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec: " + err.Error()}
			return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(emptyErr)}
		}
	}

	if container.ShouldCreateWithEnvFiles() {
		err := task.MergeEnvVarsFromEnvfiles(container)
		if err != nil {
			logger.Error("Error populating environment variables from specified files into container", logger.Fields{
				field.TaskID:    task.GetID(),
				field.Container: container.Name,
				field.Error:     err,
			})
			return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
		}
	}

	if execcmd.IsExecEnabledContainer(container) {
		tID := task.GetID()
		err := engine.execCmdMgr.InitializeContainer(tID, container, hostConfig)
		if err != nil {
			logger.Warn("Error initializing ExecCommandAgent; proceeding to start container without exec feature", logger.Fields{
				field.TaskID:    task.GetID(),
				field.Container: container.Name,
				field.Error:     err,
			})
			// Emit a managedagent state chnage event if exec agent initialization fails
			engine.tasksLock.RLock()
			mTask, ok := engine.managedTasks[task.Arn]
			engine.tasksLock.RUnlock()
			if ok {
				mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, fmt.Sprintf("ExecuteCommandAgent Initialization failed - %v", err))
			} else {
				logger.Error("Failed to update status of ExecCommandAgent Process for container", logger.Fields{
					field.TaskID:    task.GetID(),
					field.Container: container.Name,
					field.Error:     "managed task not found",
				})
			}
		}
	}

	config, err := task.DockerConfig(container, dockerClientVersion)
	if err != nil {
		return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
	}

	// Augment labels with some metadata from the agent. Explicitly do this last
	// such that it will always override duplicates in the provided raw config
	// data.
	config.Labels[labelTaskARN] = task.Arn
	config.Labels[labelContainerName] = container.Name
	config.Labels[labelTaskDefinitionFamily] = task.Family
	config.Labels[labelTaskDefinitionVersion] = task.Version
	config.Labels[labelCluster] = engine.cfg.Cluster

	if dockerContainerName == "" {
		// only alphanumeric and hyphen characters are allowed
		reInvalidChars := regexp.MustCompile("[^A-Za-z0-9-]+")
		name := reInvalidChars.ReplaceAllString(container.Name, "")

		dockerContainerName = "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex()

		// Pre-add the container in case we stop before the next, more useful,
		// AddContainer call. This ensures we have a way to get the container if
		// we die before 'createContainer' returns because we can inspect by
		// name
		engine.state.AddContainer(&apicontainer.DockerContainer{
			DockerName: dockerContainerName,
			Container:  container,
		}, task)
		logger.Info("Created container name mapping for task", logger.Fields{
			field.TaskID:          task.GetID(),
			field.Container:       container.Name,
			"dockerContainerName": dockerContainerName,
		})
	}

	// Create metadata directory and file then populate it with common metadata of all containers of this task
	// Afterwards add this directory to the container's mounts if file creation was successful
	if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() {
		info, infoErr := engine.client.Info(engine.ctx, dockerclient.InfoTimeout)
		if infoErr != nil {
			logger.Warn("Unable to get docker info", logger.Fields{
				field.TaskID:    task.GetID(),
				field.Container: container.Name,
				field.Error:     infoErr,
			})
		}
		mderr := engine.metadataManager.Create(config, hostConfig, task, container.Name, info.SecurityOptions)
		if mderr != nil {
			logger.Warn("Unable to create metadata for container", logger.Fields{
				field.TaskID:    task.GetID(),
				field.Container: container.Name,
				field.Error:     mderr,
			})
		}
	}

	createContainerBegin := time.Now()
	metadata := client.CreateContainer(engine.ctx, config, hostConfig,
		dockerContainerName, engine.cfg.ContainerCreateTimeout)
	if metadata.DockerID != "" {
		dockerContainer := &apicontainer.DockerContainer{DockerID: metadata.DockerID,
			DockerName: dockerContainerName,
			Container:  container}
		engine.state.AddContainer(dockerContainer, task)
		engine.saveDockerContainerData(dockerContainer)
	}
	container.SetLabels(config.Labels)
	logger.Info("Created docker container for task", logger.Fields{
		field.TaskID:    task.GetID(),
		field.Container: container.Name,
		field.DockerId:  metadata.DockerID,
		field.Elapsed:   time.Since(createContainerBegin),
	})
	container.SetRuntimeID(metadata.DockerID)
	return metadata
}