anticipation/anticipation/models/recognizers/future_recognizer.py [205:218]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        pred = torch.sigmoid(self.fc(x))
        losses['cls_loss'] = F.binary_cross_entropy(pred, kwargs['labels'])
        losses.update(self.eval_mAP(pred, kwargs['labels'], kwargs['label_mask']))
        losses.update(self.eval_ratio_mAP(pred, kwargs['labels'], kwargs['label_mask'], kwargs['ratio_idx']))


        return losses

    def forward_test(self, num_modalities, img_meta, **kwargs):
        lfb = kwargs['lfb']
        nB, nL, nC = lfb.size()
        x = lfb.permute(0, 2, 1)
        x = x.reshape((nB, nC, nL, 1, 1))
        x = self.backbone(x) # nB, nC, nL / K, 1, 1
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



anticipation/anticipation/models/recognizers/future_recognizer.py [239:252]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        pred = torch.sigmoid(self.fc(x))
        losses['cls_loss'] = F.binary_cross_entropy(pred, kwargs['labels'])
        losses.update(self.eval_mAP(pred, kwargs['labels'], kwargs['label_mask']))
        losses.update(self.eval_ratio_mAP(pred, kwargs['labels'], kwargs['label_mask'], kwargs['ratio_idx']))


        return losses

    def forward_test(self, num_modalities, img_meta, **kwargs):
        lfb = kwargs['lfb']
        nB, nL, nC = lfb.size()
        x = lfb.permute(0, 2, 1)
        x = x.reshape((nB, nC, nL, 1, 1))
        x = self.backbone(x) # nB, nC, nL / K, 1, 1
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



