pytext/metric_reporters/mask_compositional.py [149:183]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def add_batch_stats(
        self, n_batches, preds, targets, scores, loss, m_input, **context
    ):
        super().add_batch_stats(
            n_batches, preds, targets, scores, None, m_input, **context
        )
        self.all_loss["all_loss"].append(float(loss[0]))

        custom_losses = loss[1].keys()
        for loss_name in custom_losses:
            vals = self.all_loss[loss_name]
            # samplewise losses are stored as multi-element tensors, so need to separate cases
            if "samplewise" in loss_name:
                vals.append(loss[1][loss_name].data.cpu().numpy())
            else:
                vals.append(float(loss[1][loss_name]))

    def calculate_loss(self):
        """
        Calculate the average loss for all aggregated batch
        """
        loss_agg = {}
        for loss_name in self.all_loss.keys():
            if "samplewise" in loss_name:
                self.all_context.setdefault("losses", {})[loss_name] = np.concatenate(
                    self.all_loss[loss_name], axis=None
                )
            else:
                loss_agg[loss_name] = np.average(
                    self.all_loss[loss_name], weights=self.batch_size
                )

        return loss_agg

    def calculate_metric(self):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



pytext/metric_reporters/mask_seq2seq_topk.py [80:114]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def add_batch_stats(
        self, n_batches, preds, targets, scores, loss, m_input, **context
    ):
        super().add_batch_stats(
            n_batches, preds, targets, scores, None, m_input, **context
        )
        self.all_loss["all_loss"].append(float(loss[0]))

        custom_losses = loss[1].keys()
        for loss_name in custom_losses:
            vals = self.all_loss[loss_name]
            # samplewise losses are stored as multi-element tensors, so need to separate cases
            if "samplewise" in loss_name:
                vals.append(loss[1][loss_name].data.cpu().numpy())
            else:
                vals.append(float(loss[1][loss_name]))

    def calculate_loss(self):
        """
        Calculate the average loss for all aggregated batch
        """
        loss_agg = {}
        for loss_name in self.all_loss.keys():
            if "samplewise" in loss_name:
                self.all_context.setdefault("losses", {})[loss_name] = np.concatenate(
                    self.all_loss[loss_name], axis=None
                )
            else:
                loss_agg[loss_name] = np.average(
                    self.all_loss[loss_name], weights=self.batch_size
                )

        return loss_agg

    def calculate_metric(self):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



