def __init__()

in src/sagemaker_defect_detection/models/ddn.py [0:0]


    def __init__(self, backbone: str):
        """
        Implementation of MFN model as described in

        Yu He, Kechen Song, Qinggang Meng, Yunhui Yan,
        “An End-to-end Steel Surface Defect Detection Approach via Fusing Multiple Hierarchical Features,”
        IEEE Transactions on Instrumentation and Measuremente, 2020,69(4),1493-1504.

        Parameters
        ----------
        backbone : str
            Either `resnet34` or `resnet50`
        """
        super().__init__()
        self.backbone = get_backbone(backbone)
        # input 224x224 -> conv1 output size 112x112
        self.start_layer = nn.Sequential(
            self.backbone.conv1,  # type: ignore
            self.backbone.bn1,  # type: ignore
            self.backbone.relu,  # type: ignore
            self.backbone.maxpool,  # type: ignore
        )
        self.r2 = self.backbone.layer1  # 64/256x56x56 <- (resnet34/resnet50)
        self.r3 = self.backbone.layer2  # 128/512x28x28
        self.r4 = self.backbone.layer3  # 256/1024x14x14
        self.r5 = self.backbone.layer4  # 512/2048x7x7
        in_channel = 64 if backbone == "resnet34" else 256
        self.b2 = nn.Sequential(
            nn.Conv2d(
                in_channel, in_channel, kernel_size=3, padding=1, stride=2
            ),  # 56 -> 28 without Relu or batchnorm not in the paper ???
            nn.BatchNorm2d(in_channel),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channel, in_channel, kernel_size=3, padding=1, stride=2),  # 28 -> 14
            nn.BatchNorm2d(in_channel),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channel, in_channel * 2, kernel_size=1, padding=0),
            nn.BatchNorm2d(in_channel * 2),
            nn.ReLU(inplace=True),
        ).apply(
            init_weights
        )  # after r2: 128/512x14x14  <-
        self.b3 = nn.MaxPool2d(2)  # after r3: 128/512x14x14  <-
        in_channel *= 2  # 128/512
        self.b4 = nn.Sequential(
            nn.Conv2d(in_channel * 2, in_channel, kernel_size=1, padding=0),
            nn.BatchNorm2d(in_channel),
            nn.ReLU(inplace=True),
        ).apply(
            init_weights
        )  # after r4: 128/512x14x14
        in_channel *= 4  # 512 / 2048
        self.b5 = nn.Sequential(
            nn.ConvTranspose2d(
                in_channel, in_channel, kernel_size=3, stride=2, padding=1, output_padding=1
            ),  # <- after r5 which is 512x7x7 -> 512x14x14
            nn.BatchNorm2d(in_channel),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channel, in_channel // 4, kernel_size=1, padding=0),
            nn.BatchNorm2d(in_channel // 4),
            nn.ReLU(inplace=True),
        ).apply(init_weights)

        self.out_channels = 512 if backbone == "resnet34" else 2048  # required for FasterRCNN