def __init__()

in models/vision/detection/awsdet/models/anchor_heads/retina_head.py [0:0]


    def __init__(self,
                 num_classes,
                 stacked_convs=4,
                 feat_channels=256,
                 octave_base_scale=4,
                 scales_per_octave=3,
                 anchor_ratios=[0.5, 1.0, 2.0],
                 anchor_strides=[8, 16, 32, 64, 128],
                 target_means=[.0, .0, .0, .0],
                 target_stds=[1.0, 1.0, 1.0, 1.0],
                 pos_iou_thr=0.5,
                 neg_iou_thr=0.4,
                 allow_low_quality_matches=False,
                 alpha=0.25,
                 gamma=2.0,
                 label_smoothing=0.0,
                 num_pre_nms=1000,
                 min_confidence=0.005,
                 nms_threshold=0.5,
                 max_instances=100,
                 soft_nms_sigma=0.5,
                 weight_decay=1e-5
                 ):
        '''
        Anchor head of as described in `RetinaNet <https://arxiv.org/pdf/1708.02002.pdf>`_.
        The head contains two subnetworks. The first classifies anchor boxes and
        the second regresses deltas for the anchors.

        Attributes
        ---
            anchor_scales: 1D array of anchor sizes in pixels.
            anchor_ratios: 1D array of anchor ratios of width/height.
            anchor_strides: Stride of the feature map relative 
                to the image in pixels.
            max_instances: int. bboxes kept after non-maximum 
                suppression.
            nms_threshold: float. Non-maximum suppression threshold to 
                filter RPN proposals.
            target_means: [4] Bounding box refinement mean.
            target_stds: [4] Bounding box refinement standard deviation.
            num_pre_nms: int. Number of bboxes to keep before NMS is applied
            pos_iou_thr: float.
            neg_iou_thr: float.
        '''
        self.num_classes = num_classes
        self.weight_decay = weight_decay
        self.stacked_convs = stacked_convs
        super(RetinaHead, self).__init__(
                self.num_classes,
                feat_channels=feat_channels,
                octave_base_scale=octave_base_scale,
                scales_per_octave=scales_per_octave,
                anchor_ratios=anchor_ratios,
                anchor_strides=anchor_strides,
                target_means=target_means,
                target_stds=target_stds
                )
        self.anchor_target = anchor_target.AnchorTarget(
            target_means=self.target_means,
            target_stds=self.target_stds,
            positive_fraction=1.0, # no sampling TODO: pass sampler as arg into anchor target generator
            pos_iou_thr=pos_iou_thr,
            neg_iou_thr=neg_iou_thr,
            allow_low_quality_matches=allow_low_quality_matches)
        #TODO make losses package common to all models
        self.class_loss = functools.partial(losses.focal_loss, alpha=alpha, gamma=gamma, label_smoothing=label_smoothing)
        self.bbox_loss = losses.retinanet_bbox_loss
        # Retina head being single stage head is the final box predictor stage, so it needs NMS specific parameters
        self.num_pre_nms = num_pre_nms
        self.nms_threshold = nms_threshold
        self.min_confidence = min_confidence
        self.max_instances = max_instances
        self.soft_nms_sigma = soft_nms_sigma