def __init__()

in src/modeling/res_encoder.py [0:0]


    def __init__(self, h_size=[1024, 1024, 1024], v_size=10, embd_dim=300, mlp_d=1024,
                 dropout_r=0.1, k=3, n_layers=1, num_labels=3):
        super(ResEncoder, self).__init__()
        self.Embd = nn.Embedding(v_size, embd_dim)
        self.num_labels = num_labels

        self.lstm = nn.LSTM(input_size=embd_dim, hidden_size=h_size[0],
                            num_layers=1, bidirectional=True)

        self.lstm_1 = nn.LSTM(input_size=(embd_dim + h_size[0] * 2), hidden_size=h_size[1],
                              num_layers=1, bidirectional=True)

        self.lstm_2 = nn.LSTM(input_size=(embd_dim + h_size[0] * 2), hidden_size=h_size[2],
                              num_layers=1, bidirectional=True)

        self.h_size = h_size
        self.k = k

        # self.mlp_1 = nn.Linear(h_size[2] * 2 * 4, mlp_d)
        self.mlp_1 = nn.Linear(h_size[2] * 2, mlp_d)
        self.mlp_2 = nn.Linear(mlp_d, mlp_d)
        self.sm = nn.Linear(mlp_d, self.num_labels)

        if n_layers == 1:
            self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
                                              self.sm])
        elif n_layers == 2:
            self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
                                              self.mlp_2, nn.ReLU(), nn.Dropout(dropout_r),
                                              self.sm])
        else:
            print("Error num layers")