in pkg/cmd/run/run.go [252:427]
func (o *RunOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) error {
// Let kubectl run follow rules for `--`, see #13004 issue
if len(args) == 0 || o.ArgsLenAtDash == 0 {
return cmdutil.UsageErrorf(cmd, "NAME is required for run")
}
timeout, err := cmdutil.GetPodRunningTimeoutFlag(cmd)
if err != nil {
return cmdutil.UsageErrorf(cmd, "%v", err)
}
// validate image name
if o.Image == "" {
return fmt.Errorf("--image is required")
}
if !reference.ReferenceRegexp.MatchString(o.Image) {
return fmt.Errorf("Invalid image name %q: %v", o.Image, reference.ErrReferenceInvalidFormat)
}
if o.TTY && !o.Interactive {
return cmdutil.UsageErrorf(cmd, "-i/--stdin is required for containers with -t/--tty=true")
}
if o.Expose && len(o.Port) == 0 {
return cmdutil.UsageErrorf(cmd, "--port must be set when exposing a service")
}
o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
restartPolicy, err := getRestartPolicy(cmd, o.Interactive)
if err != nil {
return err
}
remove := cmdutil.GetFlagBool(cmd, "rm")
if !o.Attach && remove {
return cmdutil.UsageErrorf(cmd, "--rm should only be used for attached containers")
}
if o.Attach && o.DryRunStrategy != cmdutil.DryRunNone {
return cmdutil.UsageErrorf(cmd, "--dry-run=[server|client] can't be used with attached containers options (--attach, --stdin, or --tty)")
}
if err := verifyImagePullPolicy(cmd); err != nil {
return err
}
generators := generateversioned.GeneratorFn("run")
generator, found := generators[generateversioned.RunPodV1GeneratorName]
if !found {
return cmdutil.UsageErrorf(cmd, "generator %q not found", generateversioned.RunPodV1GeneratorName)
}
names := generator.ParamNames()
params := generate.MakeParams(cmd, names)
params["name"] = args[0]
if len(args) > 1 {
params["args"] = args[1:]
}
params["annotations"] = cmdutil.GetFlagStringArray(cmd, "annotations")
params["env"] = cmdutil.GetFlagStringArray(cmd, "env")
// TODO(eddiezane): These flags will be removed for 1.24
// https://github.com/kubernetes/kubectl/issues/1101#issuecomment-916149516
delete(params, "serviceaccount")
delete(params, "hostport")
delete(params, "requests")
delete(params, "limits")
var createdObjects = []*RunObject{}
runObject, err := o.createGeneratedObject(f, cmd, generator, names, params, o.NewOverrider(&corev1.Pod{}))
if err != nil {
return err
}
createdObjects = append(createdObjects, runObject)
allErrs := []error{}
if o.Expose {
serviceRunObject, err := o.generateService(f, cmd, params)
if err != nil {
allErrs = append(allErrs, err)
} else {
createdObjects = append(createdObjects, serviceRunObject)
}
}
if o.Attach {
if remove {
defer o.removeCreatedObjects(f, createdObjects)
}
opts := &attach.AttachOptions{
StreamOptions: exec.StreamOptions{
IOStreams: o.IOStreams,
Stdin: o.Interactive,
TTY: o.TTY,
Quiet: o.Quiet,
},
GetPodTimeout: timeout,
CommandName: cmd.Parent().CommandPath() + " attach",
Attach: &attach.DefaultRemoteAttach{},
}
config, err := f.ToRESTConfig()
if err != nil {
return err
}
opts.Config = config
opts.AttachFunc = attach.DefaultAttachFunc
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return err
}
attachablePod, err := polymorphichelpers.AttachablePodForObjectFn(f, runObject.Object, opts.GetPodTimeout)
if err != nil {
return err
}
err = handleAttachPod(f, clientset.CoreV1(), attachablePod.Namespace, attachablePod.Name, opts)
if err != nil {
return err
}
var pod *corev1.Pod
waitForExitCode := !o.LeaveStdinOpen && (restartPolicy == corev1.RestartPolicyNever || restartPolicy == corev1.RestartPolicyOnFailure)
if waitForExitCode {
// we need different exit condition depending on restart policy
// for Never it can either fail or succeed, for OnFailure only
// success matters
exitCondition := podCompleted
if restartPolicy == corev1.RestartPolicyOnFailure {
exitCondition = podSucceeded
}
pod, err = waitForPod(clientset.CoreV1(), attachablePod.Namespace, attachablePod.Name, opts.GetPodTimeout, exitCondition)
if err != nil {
return err
}
} else {
// after removal is done, return successfully if we are not interested in the exit code
return nil
}
switch pod.Status.Phase {
case corev1.PodSucceeded:
return nil
case corev1.PodFailed:
unknownRcErr := fmt.Errorf("pod %s/%s failed with unknown exit code", pod.Namespace, pod.Name)
if len(pod.Status.ContainerStatuses) == 0 || pod.Status.ContainerStatuses[0].State.Terminated == nil {
return unknownRcErr
}
// assume here that we have at most one status because kubectl-run only creates one container per pod
rc := pod.Status.ContainerStatuses[0].State.Terminated.ExitCode
if rc == 0 {
return unknownRcErr
}
return uexec.CodeExitError{
Err: fmt.Errorf("pod %s/%s terminated (%s)\n%s", pod.Namespace, pod.Name, pod.Status.ContainerStatuses[0].State.Terminated.Reason, pod.Status.ContainerStatuses[0].State.Terminated.Message),
Code: int(rc),
}
default:
return fmt.Errorf("pod %s/%s left in phase %s", pod.Namespace, pod.Name, pod.Status.Phase)
}
}
if runObject != nil {
if err := o.PrintObj(runObject.Object); err != nil {
return err
}
}
return utilerrors.NewAggregate(allErrs)
}