def _train_and_save()

in core/src/autogluon/core/trainer/abstract_trainer.py [0:0]


    def _train_and_save(self, X, y, model: AbstractModel, X_val=None, y_val=None, stack_name='core', level=1, **model_fit_kwargs) -> List[str]:
        """
        Trains model and saves it to disk, returning a list with a single element: The name of the model, or no elements if training failed.
        If the model name is returned:
            The model can be accessed via self.load_model(model.name).
            The model will have metadata information stored in self.model_graph.
            The model's name will be appended to self.models_level[stack_name][level]
            The model will be accessible and usable through any Trainer function that takes as input 'model' or 'model_name'.
        Note: self._train_and_save should not be used outside of self._train_single_full
        """
        X_pseudo = model_fit_kwargs.get('X_pseudo', None)
        y_pseudo = model_fit_kwargs.get('y_pseudo', None)
        fit_start_time = time.time()
        time_limit = model_fit_kwargs.get('time_limit', None)
        model_names_trained = []
        try:
            fit_log_message = f'Fitting model: {model.name} ...'
            if time_limit is not None:
                if time_limit <= 0:
                    logger.log(15, f'Skipping {model.name} due to lack of time remaining.')
                    return model_names_trained
                if self._time_limit is not None and self._time_train_start is not None:
                    time_left_total = self._time_limit - (fit_start_time - self._time_train_start)
                else:
                    time_left_total = time_limit
                fit_log_message += f' Training model for up to {round(time_limit, 2)}s of the {round(time_left_total, 2)}s of remaining time.'
            logger.log(20, fit_log_message)

            # If model is not bagged model and not stacked then pseudolabeled data needs to be incorporated at this level
            # Bagged model does validation on the fit level where as single models do it separately. Hence this if statement
            # is required
            if not isinstance(model, BaggedEnsembleModel) and X_pseudo is not None and y_pseudo is not None and X_pseudo.columns.equals(X.columns):
                X_w_pseudo = pd.concat([X, X_pseudo])
                y_w_pseudo = pd.concat([y, y_pseudo])
                model_fit_kwargs.pop('X_pseudo')
                model_fit_kwargs.pop('y_pseudo')
                logger.log(15, f'{len(X_pseudo)} extra rows of pseudolabeled data added to training set for {model.name}')
                model = self._train_single(X_w_pseudo, y_w_pseudo, model, X_val, y_val, **model_fit_kwargs)
            else:
                model = self._train_single(X, y, model, X_val, y_val, **model_fit_kwargs)

            fit_end_time = time.time()
            if self.weight_evaluation:
                w = model_fit_kwargs.get('sample_weight', None)
                w_val = model_fit_kwargs.get('sample_weight_val', None)
            else:
                w = None
                w_val = None
            if isinstance(model, BaggedEnsembleModel):
                if X_val is not None and y_val is not None:
                    score = model.score(X=X_val, y=y_val, sample_weight=w_val)
                elif model.is_valid_oof() or isinstance(model, WeightedEnsembleModel):
                    score = model.score_with_oof(y=y, sample_weight=w)
                else:
                    score = None
            else:
                if X_val is not None and y_val is not None:
                    score = model.score(X=X_val, y=y_val, sample_weight=w_val)
                else:
                    score = None
            pred_end_time = time.time()
            if model.fit_time is None:
                model.fit_time = fit_end_time - fit_start_time
            if model.predict_time is None:
                if score is None:
                    model.predict_time = None
                else:
                    model.predict_time = pred_end_time - fit_end_time
            model.val_score = score
            # TODO: Add recursive=True to avoid repeatedly loading models each time this is called for bagged ensembles (especially during repeated bagging)
            self.save_model(model=model)
        except TimeLimitExceeded:
            logger.log(20, f'\tTime limit exceeded... Skipping {model.name}.')
            # logger.log(20, '\tTime wasted: ' + str(time.time() - fit_start_time))
            del model
        except NotEnoughMemoryError:
            logger.warning(f'\tNot enough memory to train {model.name}... Skipping this model.')
            del model
        except NoValidFeatures:
            logger.warning(f'\tNo valid features to train {model.name}... Skipping this model.')
            del model
        except NoGPUError:
            logger.warning(f'\tNo GPUs available to train {model.name}... Skipping this model.')
            del model
        except NotEnoughCudaMemoryError:
            logger.warning(f'\tNot enough CUDA memory available to train {model.name}... Skipping this model.')
            del model
        except ImportError as err:
            logger.error(f'\tWarning: Exception caused {model.name} to fail during training (ImportError)... Skipping this model.')
            logger.error(f'\t\t{err}')
            if self.verbosity > 2:
                logger.exception('Detailed Traceback:')
        except Exception as err:
            logger.error(f'\tWarning: Exception caused {model.name} to fail during training... Skipping this model.')
            logger.error(f'\t\t{err}')
            if self.verbosity > 0:
                logger.exception('Detailed Traceback:')
            del model
        else:
            self._add_model(model=model, stack_name=stack_name, level=level)
            model_names_trained.append(model.name)
            if self.low_memory:
                del model
        return model_names_trained