code/src/model/attention.py [189:221]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        if self.lstm_proj:
            self.lstm_proj_layer = nn.Linear(self.hidden_dim, self.emb_dim)
            proj_output_dim = self.emb_dim
        else:
            self.lstm_proj_layer = None
            proj_output_dim = self.hidden_dim

        # projection layers
        proj = nn.Linear(proj_output_dim, self.n_words)
        if self.share_decpro_emb:
            logger.info("Sharing input embeddings and projection matrix in the decoder")
            proj.weight = self.embeddings.weight
        self.proj = proj

    def get_bos_attr(self, attr):
        """
        Generate beginning of sentence attribute embedding.
        """
        if self.bos_attr == 'avg':
            return self.bos_attr_embeddings(attr).mean(1)
        if self.bos_attr == 'cross':
            return self.bos_attr_embeddings(((attr - self.attr_offset[None]) * self.attr_shifts[None]).sum(1))
        assert False

    def get_bias_attr(self, attr):
        """
        Generate attribute bias.
        """
        if self.bias_attr == 'avg':
            return self.bias_attr_embeddings(attr).mean(1)
        if self.bias_attr == 'cross':
            return self.bias_attr_embeddings(((attr - self.attr_offset[None]) * self.attr_shifts[None]).sum(1))
        assert False
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



code/src/model/seq2seq.py [177:209]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        if self.lstm_proj:
            self.lstm_proj_layer = nn.Linear(self.hidden_dim, self.emb_dim)
            proj_output_dim = self.emb_dim
        else:
            self.lstm_proj_layer = None
            proj_output_dim = self.hidden_dim

        # projection layers
        proj = nn.Linear(proj_output_dim, self.n_words)
        if self.share_decpro_emb:
            logger.info("Sharing input embeddings and projection matrix in the decoder")
            proj.weight = self.embeddings.weight
        self.proj = proj

    def get_bos_attr(self, attr):
        """
        Generate beginning of sentence attribute embedding.
        """
        if self.bos_attr == 'avg':
            return self.bos_attr_embeddings(attr).mean(1)
        if self.bos_attr == 'cross':
            return self.bos_attr_embeddings(((attr - self.attr_offset[None]) * self.attr_shifts[None]).sum(1))
        assert False

    def get_bias_attr(self, attr):
        """
        Generate attribute bias.
        """
        if self.bias_attr == 'avg':
            return self.bias_attr_embeddings(attr).mean(1)
        if self.bias_attr == 'cross':
            return self.bias_attr_embeddings(((attr - self.attr_offset[None]) * self.attr_shifts[None]).sum(1))
        assert False
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



