NMT/src/model/attention.py [55:71]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        if self.share_lang_emb:
            logger.info("Sharing encoder input embeddings")
            layer_0 = nn.Embedding(self.n_words[0], self.emb_dim, padding_idx=self.pad_index)
            nn.init.normal_(layer_0.weight, 0, 0.1)
            nn.init.constant_(layer_0.weight[self.pad_index], 0)
            embeddings = [layer_0 for _ in range(self.n_langs)]
        else:
            embeddings = []
            for n_words in self.n_words:
                layer_i = nn.Embedding(n_words, self.emb_dim, padding_idx=self.pad_index)
                nn.init.normal_(layer_i.weight, 0, 0.1)
                nn.init.constant_(layer_i.weight[self.pad_index], 0)
                embeddings.append(layer_i)
        self.embeddings = nn.ModuleList(embeddings)

        # LSTM layers / shared layers
        lstm = [
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



NMT/src/model/seq2seq.py [64:82]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        if self.share_lang_emb:
            logger.info("Sharing encoder input embeddings")
            layer_0 = nn.Embedding(self.n_words[0], self.emb_dim, padding_idx=self.pad_index)
            nn.init.normal_(layer_0.weight, 0, 0.1)
            nn.init.constant_(layer_0.weight[self.pad_index], 0)

            embeddings = [layer_0 for _ in range(self.n_langs)]
        else:
            embeddings = []
            for n_words in self.n_words:
                layer_i = nn.Embedding(n_words, self.emb_dim, padding_idx=self.pad_index)
                nn.init.normal_(layer_i.weight, 0, 0.1)
                nn.init.constant_(layer_i.weight[self.pad_index], 0)

                embeddings.append(layer_i)
        self.embeddings = nn.ModuleList(embeddings)

        # LSTM layers / shared layers
        lstm = [
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



