sing/ae/trainer.py [17:25]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def _train_batch(self, batch):
        rebuilt, target = self._get_rebuilt_target(batch)
        self.optimizer.zero_grad()
        loss = self.train_loss(rebuilt, target)
        loss.backward()
        self.optimizer.step()
        return loss.item()

    def _get_rebuilt_target(self, batch):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



sing/fondation/trainer.py [135:152]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def _train_batch(self, batch):
        """
        Given a batch, call :meth:`_get_rebuilt_target`
        to obtain the `target` and `rebuilt` tensors and call
        :attr:`train_loss` on them, compute the gradient and perform
        one optimizer step.

        This method can be overriden in subclasses.
        """
        rebuilt, target = self._get_rebuilt_target(batch)
        self.optimizer.zero_grad()
        loss = self.train_loss(rebuilt, target)
        loss.backward()
        self.optimizer.step()

        return loss.item()

    def _get_rebuilt_target(self, batch):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



