tensorflow_addons/seq2seq/attention_wrapper.py [739:760]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.attention_g = None
        self.attention_b = None
        super().__init__(
            memory=memory,
            memory_sequence_length=memory_sequence_length,
            query_layer=query_layer,
            memory_layer=memory_layer,
            probability_fn=wrapped_probability_fn,
            name=name,
            dtype=dtype,
            **kwargs,
        )

    def build(self, input_shape):
        super().build(input_shape)
        if self.attention_v is None:
            self.attention_v = self.add_weight(
                "attention_v",
                [self.units],
                dtype=self.dtype,
                initializer=self.kernel_initializer,
            )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow_addons/seq2seq/attention_wrapper.py [1106:1127]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.attention_g = None
        self.attention_b = None
        super().__init__(
            memory=memory,
            memory_sequence_length=memory_sequence_length,
            query_layer=query_layer,
            memory_layer=memory_layer,
            probability_fn=wrapped_probability_fn,
            name=name,
            dtype=dtype,
            **kwargs,
        )

    def build(self, input_shape):
        super().build(input_shape)
        if self.attention_v is None:
            self.attention_v = self.add_weight(
                "attention_v",
                [self.units],
                dtype=self.dtype,
                initializer=self.kernel_initializer,
            )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



