codes/models.py [73:84]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.norm = nn.LayerNorm(d_model)
        self.encoder_layer = nn.TransformerEncoderLayer(
                         d_model=d_model, nhead=nhead,
                         dim_feedforward=dim_ffn,
                         batch_first=True,dropout=drop, layer_norm_eps=LNM)
        self.encoder = nn.TransformerEncoder(
                         self.encoder_layer,
                         num_layers=num_layer)

    def forward(self, x):
        output = self.encoder(self.norm(x))
        return output
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



codes/models.py [89:100]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.norm = nn.LayerNorm(d_model)
        self.encoder_layer = nn.TransformerEncoderLayer(
                         d_model=d_model, nhead=nhead,
                         dim_feedforward=dim_ffn,
                         batch_first=True,dropout=drop, layer_norm_eps=LNM)
        self.encoder = nn.TransformerEncoder(
                         self.encoder_layer,
                         num_layers=num_layer)

    def forward(self, x):
        output = self.encoder(self.norm(x))
        return output
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



