def forward_for_single_feature_map()

in evaluation/tiny_benchmark/maskrcnn_benchmark/modeling/rpn/cascade_fcos/inference.py [0:0]


    def forward_for_single_feature_map(
            self, locations, box_cls_set,
            box_regression, centerness,
            image_sizes, show_box_cls):
        """
        Arguments:
            anchors: list[BoxList]
            box_cls: tensor of size N, A * C, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        box_prob_set = []
        for _box_cls in np.array(list(box_cls_set.values()))[[2]]:  #
            N, C, H, W = _box_cls.shape
            _box_cls = _box_cls.view(N, C, H, W).permute(0, 2, 3, 1)
            box_prob_set.append(_box_cls.reshape(N, -1, C).sigmoid())
        box_cls = torch.exp(torch.log(torch.stack(box_prob_set)).mean(dim=0))
        # max_score = box_prob_set[-1].max()
        # box_prob_set[:-1] = [box_prob / box_prob.max() * max_score for box_prob in box_prob_set[:-1]]
        # box_cls = torch.stack(box_prob_set).max(dim=0)[0]
        centerness = None

        # put in the same format as locations
        box_regression = box_regression.view(N, 4, H, W).permute(0, 2, 3, 1)
        box_regression = box_regression.reshape(N, -1, 4)
        if centerness is not None:
            centerness = centerness.view(N, 1, H, W).permute(0, 2, 3, 1)
            centerness = centerness.reshape(N, -1).sigmoid()

        if self.vis_labels:
            # box_prob_set.extend([box_cls, centerness, centerness[:,:,None]*box_prob_set[-1]])
            show_box_cls(box_prob_set, N, H, W, C, self.pre_nms_thresh)

        candidate_inds = box_cls > self.pre_nms_thresh
        pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
        pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)

        # multiply the classification scores with centerness scores
        if centerness is not None:
            box_cls = (box_cls * centerness[:, :, None])

        results = []
        for i in range(N):
            per_box_cls = box_cls[i]
            per_candidate_inds = candidate_inds[i]
            per_box_cls = per_box_cls[per_candidate_inds]

            per_candidate_nonzeros = per_candidate_inds.nonzero()
            per_box_loc = per_candidate_nonzeros[:, 0]
            per_class = per_candidate_nonzeros[:, 1] + 1

            per_box_regression = box_regression[i]
            per_box_regression = per_box_regression[per_box_loc]
            per_locations = locations[per_box_loc]

            per_pre_nms_top_n = pre_nms_top_n[i]

            if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
                per_box_cls, top_k_indices = \
                    per_box_cls.topk(per_pre_nms_top_n, sorted=False)
                per_class = per_class[top_k_indices]
                per_box_regression = per_box_regression[top_k_indices]
                per_locations = per_locations[top_k_indices]

            detections = torch.stack([
                per_locations[:, 0] - per_box_regression[:, 0],
                per_locations[:, 1] - per_box_regression[:, 1],
                per_locations[:, 0] + per_box_regression[:, 2],
                per_locations[:, 1] + per_box_regression[:, 3],
            ], dim=1)

            h, w = image_sizes[i]
            boxlist = BoxList(detections, (int(w), int(h)), mode="xyxy")
            boxlist.add_field("labels", per_class)
            boxlist.add_field("scores", per_box_cls)
            boxlist.add_field("det_locations", per_locations)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            results.append(boxlist)

        return results