func()

in agent/engine/docker_task_engine.go [2209:2395]


func (engine *DockerTaskEngine) startContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
	logger.Info("Starting container", logger.Fields{
		field.TaskID:    task.GetID(),
		field.Container: container.Name,
		field.RuntimeID: container.GetRuntimeID(),
	})
	client := engine.client
	if container.DockerConfig.Version != nil {
		minVersion := dockerclient.GetSupportedDockerAPIVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
		logger.Debug("StartContainer: overriding docker API version in task payload", logger.Fields{
			field.TaskID:              task.GetID(),
			field.Container:           container.Name,
			"usingDockerAPIVersion":   minVersion,
			"payloadDockerAPIVersion": *container.DockerConfig.Version,
		})
		// Error in creating versioned client is dealt with later when client.StartContainer() is called
		client, _ = client.WithVersion(minVersion)
	}

	dockerID, err := engine.getDockerID(task, container)
	if err != nil {
		return dockerapi.DockerContainerMetadata{
			Error: dockerapi.CannotStartContainerError{
				FromError: err,
			},
		}
	}

	startContainerBegin := time.Now()
	dockerContainerMD := client.StartContainer(engine.ctx, dockerID, engine.cfg.ContainerStartTimeout)
	if dockerContainerMD.Error != nil {
		return dockerContainerMD
	}

	logger.Info("Started container", logger.Fields{
		field.TaskID:    task.GetID(),
		field.Container: container.Name,
		field.RuntimeID: container.GetRuntimeID(),
		field.Elapsed:   time.Since(startContainerBegin),
	})

	// Get metadata through container inspection and available task information then write this to the metadata file
	// Performs this in the background to avoid delaying container start
	// TODO: Add a state to the apicontainer.Container for the status of the metadata file (Whether it needs update) and
	// add logic to engine state restoration to do a metadata update for containers that are running after the agent was restarted
	if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() {
		go func() {
			err := engine.metadataManager.Update(engine.ctx, dockerID, task, container.Name)
			if err != nil {
				logger.Warn("Failed to update metadata file for container", logger.Fields{
					field.TaskID:    task.GetID(),
					field.Container: container.Name,
					field.Error:     err,
				})
				return
			}
			container.SetMetadataFileUpdated()
			logger.Debug("Updated metadata file for container", logger.Fields{
				field.TaskID:    task.GetID(),
				field.Container: container.Name,
			})
		}()
	}

	// If container is a firelens container, fluent host is needed to be added to the environment variable for the task.
	// For the supported network mode - bridge and awsvpc, the awsvpc take the host 127.0.0.1 but in bridge mode,
	// there is a need to wait for the IP to be present before the container using the firelens can be created.
	//
	// For bridge-mode ServiceConnect-enabled tasks, we inject pause container for each application container
	// including the firelens container. Therefore, when resolving the container IP, we should be checking that
	// of the associated pause container. In such case, the firelens container has network mode "container" since it's
	// launched into its pause container's network namespace.
	if container.GetFirelensConfig() != nil && task.IsNetworkModeBridge() {
		_, gotContainerIP := getContainerHostIP(dockerContainerMD.NetworkSettings)
		if task.IsServiceConnectEnabled() {
			targetContainer, err := task.GetBridgeModePauseContainerForTaskContainer(container)
			if err != nil {
				logger.Error("Failed to start Firelens container", logger.Fields{
					field.TaskID:    task.GetID(),
					field.Container: container.Name,
					field.Error:     err,
				})
				return dockerapi.DockerContainerMetadata{
					Error: dockerapi.CannotStartContainerError{FromError: errors.New(fmt.Sprintf(
						"failed to start firelens container: %v", err))},
				}
			}
			_, gotContainerIP = getContainerHostIP(targetContainer.GetNetworkSettings())
		}

		if !gotContainerIP {
			getIPBridgeBackoff := retry.NewExponentialBackoff(minGetIPBridgeTimeout, maxGetIPBridgeTimeout, getIPBridgeRetryJitterMultiplier, getIPBridgeRetryDelayMultiplier)
			contextWithTimeout, cancel := context.WithTimeout(engine.ctx, time.Minute)
			defer cancel()
			err := retry.RetryWithBackoffCtx(contextWithTimeout, getIPBridgeBackoff, func() error {
				gotIPBridge := false
				if task.IsServiceConnectEnabled() {
					targetContainer, err := task.GetBridgeModePauseContainerForTaskContainer(container)
					if err != nil {
						return err
					}
					_, gotIPBridge = getContainerHostIP(targetContainer.GetNetworkSettings())
					if gotIPBridge {
						return nil
					}
				} else {
					inspectOutput, err := engine.client.InspectContainer(engine.ctx, dockerContainerMD.DockerID,
						dockerclient.InspectContainerTimeout)
					if err != nil {
						return err
					}
					_, gotIPBridge = getContainerHostIP(inspectOutput.NetworkSettings)
					if gotIPBridge {
						dockerContainerMD.NetworkSettings = inspectOutput.NetworkSettings
						return nil
					}
				}
				return errors.New("Bridge IP not available to use for firelens")
			})
			if err != nil {
				logger.Error("Failed to start Firelens container", logger.Fields{
					field.TaskID:    task.GetID(),
					field.Container: container.Name,
					field.Error:     err,
				})
				return dockerapi.DockerContainerMetadata{
					Error: dockerapi.CannotStartContainerError{FromError: err},
				}
			}
		}
	}
	if execcmd.IsExecEnabledContainer(container) {
		if ma, _ := container.GetManagedAgentByName(execcmd.ExecuteCommandAgentName); !ma.InitFailed {
			reason := "ExecuteCommandAgent started"
			if err := engine.execCmdMgr.StartAgent(engine.ctx, engine.client, task, container, dockerID); err != nil {
				reason = err.Error()
				logger.Error("Failed to start ExecCommandAgent Process for container", logger.Fields{
					field.TaskID:    task.GetID(),
					field.Container: container.Name,
					field.Error:     err,
				})
			}

			engine.tasksLock.RLock()
			mTask, ok := engine.managedTasks[task.Arn]
			engine.tasksLock.RUnlock()
			// whether we started or failed to start, we'll want to emit a state change event
			// redundant state change events like RUNNING->RUNNING are allowed
			if ok {
				mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, reason)
			} else {
				logger.Error("Failed to update status of ExecCommandAgent Process for container", logger.Fields{
					field.TaskID:    task.GetID(),
					field.Container: container.Name,
					field.Error:     "managed task not found",
				})
			}
		}
	}

	// On Windows, we need to invoke CNI plugins for all containers
	// invokePluginsForContainer will return nil for other platforms
	if dockerContainerMD.Error == nil && task.IsNetworkModeAWSVPC() && !container.IsInternal() {
		err := engine.invokePluginsForContainer(task, container)
		if err != nil {
			return dockerapi.DockerContainerMetadata{
				Error: ContainerNetworkingError{
					fromError: fmt.Errorf("startContainer: cni plugin invocation failed: %+v", err),
				},
			}
		}
	}

	if task.IsServiceConnectEnabled() && task.IsNetworkModeBridge() && task.IsContainerServiceConnectPause(container.Name) {
		ipv4Addr, ipv6Addr := getBridgeModeContainerIP(dockerContainerMD.NetworkSettings)
		if ipv4Addr == "" && ipv6Addr == "" {
			return dockerapi.DockerContainerMetadata{
				Error: ContainerNetworkingError{
					fromError: fmt.Errorf("startContainer: failed to resolve container IP for SC bridge mode pause container"),
				},
			}
		}
		task.PopulateServiceConnectNetworkConfig(ipv4Addr, ipv6Addr)
	}

	return dockerContainerMD
}