anticipation/anticipation/models/recognizers/future_recognizer.py [168:179]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        x = self.backbone(x)
        # x = x[:, -1, :]
        pred = torch.sigmoid(self.fc(x))
        losses['cls_loss'] = F.binary_cross_entropy(pred, kwargs['labels'])
        losses.update(self.eval_mAP(pred, kwargs['labels'], kwargs['label_mask']))
        losses.update(self.eval_ratio_mAP(pred, kwargs['labels'], kwargs['label_mask'], kwargs['ratio_idx']))


        return losses

    def forward_test(self, num_modalities, img_meta, **kwargs):
        lfb = kwargs['lfb']
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



anticipation/anticipation/models/recognizers/future_recognizer.py [237:248]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        x = self.backbone(x) # nB, nC, nL / K, nL, 1
        # print(x.shape)
        pred = torch.sigmoid(self.fc(x))
        losses['cls_loss'] = F.binary_cross_entropy(pred, kwargs['labels'])
        losses.update(self.eval_mAP(pred, kwargs['labels'], kwargs['label_mask']))
        losses.update(self.eval_ratio_mAP(pred, kwargs['labels'], kwargs['label_mask'], kwargs['ratio_idx']))


        return losses

    def forward_test(self, num_modalities, img_meta, **kwargs):
        lfb = kwargs['lfb']
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



