ignite/engine/__init__.py [200:249]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    model: torch.nn.Module,
    optimizer: torch.optim.Optimizer,
    loss_fn: Union[Callable, torch.nn.Module],
    device: Optional[Union[str, torch.device]] = None,
    non_blocking: bool = False,
    prepare_batch: Callable = _prepare_batch,
    output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
    gradient_accumulation_steps: int = 1,
) -> Callable:
    """Factory function for supervised training using apex.

    Args:
        model: the model to train.
        optimizer: the optimizer to use.
        loss_fn: the loss function to use.
        device: device type specification (default: None).
            Applies to batches after starting the engine. Model *will not* be moved.
            Device can be CPU, GPU.
        non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
            with respect to the host. For other cases, this argument has no effect.
        prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
            tuple of tensors `(batch_x, batch_y)`.
        output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
            to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
        gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
            (default: 1 (means no gradient accumulation))

    Returns:
        Callable: update function.

    Examples:
        .. code-block:: python

            from ignite.engine import Engine, supervised_training_step_apex

            model = ...
            optimizer = ...
            loss_fn = ...

            update_fn = supervised_training_step_apex(model, optimizer, loss_fn, 'cuda')
            trainer = Engine(update_fn)

    .. versionadded:: 0.4.5
    .. versionchanged:: 0.5.0
        Added Gradient Accumulation.
    """

    try:
        from apex import amp as apex_amp
    except ModuleNotFoundError:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



ignite/engine/__init__.py [276:324]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    model: torch.nn.Module,
    optimizer: torch.optim.Optimizer,
    loss_fn: Union[Callable, torch.nn.Module],
    device: Optional[Union[str, torch.device]] = None,
    non_blocking: bool = False,
    prepare_batch: Callable = _prepare_batch,
    output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
    gradient_accumulation_steps: int = 1,
) -> Callable:
    """Factory function for supervised training using ``torch_xla``.

    Args:
        model: the model to train.
        optimizer: the optimizer to use.
        loss_fn: the loss function to use.
        device: device type specification (default: None).
            Applies to batches after starting the engine. Model *will not* be moved.
            Device can be CPU, TPU.
        non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
            with respect to the host. For other cases, this argument has no effect.
        prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
            tuple of tensors `(batch_x, batch_y)`.
        output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
            to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
        gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
            (default: 1 (means no gradient accumulation))

    Returns:
        Callable: update function.

    Examples:
        .. code-block:: python

            from ignite.engine import Engine, supervised_training_step_tpu

            model = ...
            optimizer = ...
            loss_fn = ...

            update_fn = supervised_training_step_tpu(model, optimizer, loss_fn, 'xla')
            trainer = Engine(update_fn)

    .. versionadded:: 0.4.5
    .. versionchanged:: 0.5.0
       Added Gradient Accumulation argument for all supervised training methods.
    """
    try:
        import torch_xla.core.xla_model as xm
    except ModuleNotFoundError:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



