func()

in agent/engine/docker_task_engine.go [1301:1412]


func (engine *DockerTaskEngine) startContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
	seelog.Infof("Task engine [%s]: starting container: %s (Runtime ID: %s)", task.Arn, container.Name, container.GetRuntimeID())
	client := engine.client
	if container.DockerConfig.Version != nil {
		client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
	}

	dockerID, err := engine.getDockerID(task, container)
	if err != nil {
		return dockerapi.DockerContainerMetadata{
			Error: dockerapi.CannotStartContainerError{
				FromError: err,
			},
		}
	}

	startContainerBegin := time.Now()
	dockerContainerMD := client.StartContainer(engine.ctx, dockerID, engine.cfg.ContainerStartTimeout)
	if dockerContainerMD.Error != nil {
		return dockerContainerMD
	}

	seelog.Infof("Task engine [%s]: started docker container for task: %s -> %s, took %s",
		task.Arn, container.Name, dockerContainerMD.DockerID, time.Since(startContainerBegin))

	// Get metadata through container inspection and available task information then write this to the metadata file
	// Performs this in the background to avoid delaying container start
	// TODO: Add a state to the apicontainer.Container for the status of the metadata file (Whether it needs update) and
	// add logic to engine state restoration to do a metadata update for containers that are running after the agent was restarted
	if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() {
		go func() {
			err := engine.metadataManager.Update(engine.ctx, dockerID, task, container.Name)
			if err != nil {
				seelog.Warnf("Task engine [%s]: failed to update metadata file for container %s: %v",
					task.Arn, container.Name, err)
				return
			}
			container.SetMetadataFileUpdated()
			seelog.Debugf("Task engine [%s]: updated metadata file for container %s",
				task.Arn, container.Name)
		}()
	}

	// If container is a firelens container, fluent host is needed to be added to the environment variable for the task.
	// For the supported network mode - bridge and awsvpc, the awsvpc take the host 127.0.0.1 but in bridge mode,
	// there is a need to wait for the IP to be present before the container using the firelens can be created.
	if container.GetFirelensConfig() != nil {
		if !task.IsNetworkModeAWSVPC() && (container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode) {
			_, gotContainerIP := getContainerHostIP(dockerContainerMD.NetworkSettings)
			if !gotContainerIP {
				getIPBridgeBackoff := retry.NewExponentialBackoff(minGetIPBridgeTimeout, maxGetIPBridgeTimeout, getIPBridgeRetryJitterMultiplier, getIPBridgeRetryDelayMultiplier)
				contextWithTimeout, cancel := context.WithTimeout(engine.ctx, time.Minute)
				defer cancel()
				err := retry.RetryWithBackoffCtx(contextWithTimeout, getIPBridgeBackoff, func() error {
					inspectOutput, err := engine.client.InspectContainer(engine.ctx, dockerContainerMD.DockerID,
						dockerclient.InspectContainerTimeout)
					if err != nil {
						return err
					}
					_, gotIPBridge := getContainerHostIP(inspectOutput.NetworkSettings)
					if gotIPBridge {
						dockerContainerMD.NetworkSettings = inspectOutput.NetworkSettings
						return nil
					} else {
						return errors.New("Bridge IP not available to use for firelens")
					}
				})
				if err != nil {
					return dockerapi.DockerContainerMetadata{
						Error: dockerapi.CannotStartContainerError{FromError: err},
					}
				}
			}

		}
	}
	if execcmd.IsExecEnabledContainer(container) {
		if ma, _ := container.GetManagedAgentByName(execcmd.ExecuteCommandAgentName); !ma.InitFailed {
			reason := "ExecuteCommandAgent started"
			if err := engine.execCmdMgr.StartAgent(engine.ctx, engine.client, task, container, dockerID); err != nil {
				reason = err.Error()
				seelog.Errorf("Task engine [%s]: Failed to start ExecCommandAgent Process for container [%s]: %v", task.Arn, container.Name, err)
			}

			engine.tasksLock.RLock()
			mTask, ok := engine.managedTasks[task.Arn]
			engine.tasksLock.RUnlock()
			// whether we started or failed to start, we'll want to emit a state change event
			// redundant state change events like RUNNING->RUNNING are allowed
			if ok {
				mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, reason)
			} else {
				seelog.Errorf("Task engine [%s]: Failed to update status of ExecCommandAgent Process for container [%s]: managed task not found", task.Arn, container.Name)
			}
		}
	}

	// On Windows, we need to invoke CNI plugins for all containers
	// invokePluginsForContainer will return nil for other platforms
	if dockerContainerMD.Error == nil && task.IsNetworkModeAWSVPC() && !container.IsInternal() {
		err := engine.invokePluginsForContainer(task, container)
		if err != nil {
			return dockerapi.DockerContainerMetadata{
				Error: ContainerNetworkingError{
					fromError: errors.Wrapf(err, "startContainer: cni plugin invocation failed"),
				},
			}
		}
	}

	return dockerContainerMD
}