src/modeling/res_encoder.py [49:76]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.mlp_2 = nn.Linear(mlp_d, mlp_d)
        self.sm = nn.Linear(mlp_d, self.num_labels)

        if n_layers == 1:
            self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
                                              self.sm])
        elif n_layers == 2:
            self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
                                              self.mlp_2, nn.ReLU(), nn.Dropout(dropout_r),
                                              self.sm])
        else:
            print("Error num layers")

    def init_embedding(self, embedding):
        self.Embd.weight = embedding.weight

    def forward(self, input_ids, attention_mask, labels=None):
        # if self.max_l:
        #     l1 = l1.clamp(max=self.max_l)
        #     l2 = l2.clamp(max=self.max_l)
        #     if s1.size(0) > self.max_l:
        #         s1 = s1[:self.max_l, :]
        #     if s2.size(0) > self.max_l:
        #         s2 = s2[:self.max_l, :]
        batch_l_1 = torch.sum(attention_mask, dim=1)

        # p_s1 = self.Embd(s1)
        embedding_1 = self.Embd(input_ids)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/modeling/res_encoder.py [136:163]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.mlp_2 = nn.Linear(mlp_d, mlp_d)
        self.sm = nn.Linear(mlp_d, self.num_labels)

        if n_layers == 1:
            self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
                                              self.sm])
        elif n_layers == 2:
            self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
                                              self.mlp_2, nn.ReLU(), nn.Dropout(dropout_r),
                                              self.sm])
        else:
            print("Error num layers")

    def init_embedding(self, embedding):
        self.Embd.weight = embedding.weight

    def forward(self, input_ids, attention_mask, labels=None):
        # if self.max_l:
        #     l1 = l1.clamp(max=self.max_l)
        #     l2 = l2.clamp(max=self.max_l)
        #     if s1.size(0) > self.max_l:
        #         s1 = s1[:self.max_l, :]
        #     if s2.size(0) > self.max_l:
        #         s2 = s2[:self.max_l, :]
        batch_l_1 = torch.sum(attention_mask, dim=1)

        # p_s1 = self.Embd(s1)
        embedding_1 = self.Embd(input_ids)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



