classy_vision/meters/precision_meter.py [131:158]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def update(self, model_output, target, **kwargs):
        """
        args:
            model_output: tensor of shape (B, C) where each value is
                          either logit or class probability.
            target:       tensor of shape (B, C), which is one-hot /
                          multi-label encoded, or tensor of shape (B) /
                          (B, 1), integer encoded
        """
        # Convert target to 0/1 encoding if isn't
        target = maybe_convert_to_one_hot(target, model_output)

        # If Pytorch AMP is being used, model outputs are probably fp16
        # Since .topk() is not compatible with fp16, we promote the model outputs to full precision
        _, pred_classes = model_output.float().topk(
            max(self._topk), dim=1, largest=True, sorted=True
        )
        pred_mask_tensor = torch.zeros(target.size())
        for i, k in enumerate(self._topk):
            pred_mask_tensor.zero_()
            self._curr_correct_predictions_k[i] += torch.sum(
                # torch.min is used to simulate AND between binary
                # tensors. If tensors are not binary, this will fail.
                torch.min(
                    pred_mask_tensor.scatter_(1, pred_classes[:, :k], 1.0),
                    target.float(),
                )
            ).item()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



classy_vision/meters/recall_meter.py [130:157]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def update(self, model_output, target, **kwargs):
        """
        args:
            model_output: tensor of shape (B, C) where each value is
                          either logit or class probability.
            target:       tensor of shape (B, C), which is one-hot /
                          multi-label encoded, or tensor of shape (B) /
                          (B, 1), integer encoded
        """
        # Convert target to 0/1 encoding if isn't
        target = maybe_convert_to_one_hot(target, model_output)

        # If Pytorch AMP is being used, model outputs are probably fp16
        # Since .topk() is not compatible with fp16, we promote the model outputs to full precision
        _, pred_classes = model_output.float().topk(
            max(self._topk), dim=1, largest=True, sorted=True
        )
        pred_mask_tensor = torch.zeros(target.size())
        for i, k in enumerate(self._topk):
            pred_mask_tensor.zero_()
            self._curr_correct_predictions_k[i] += torch.sum(
                # torch.min is used to simulate AND between binary
                # tensors. If tensors are not binary, this will fail.
                torch.min(
                    pred_mask_tensor.scatter_(1, pred_classes[:, :k], 1.0),
                    target.float(),
                )
            ).item()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



