pytext/models/representations/augmented_lstm.py [438:460]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self,
        inputs: PackedSequence,
        states: Optional[Tuple[torch.Tensor, torch.Tensor]],
    ):
        output_sequence = inputs
        final_h = []
        final_c = []

        if not states:
            hidden_states = [None] * self.num_layers
        elif states[0].size()[0] != self.num_layers:
            raise RuntimeError(
                "Initial states were passed to forward() but the number of "
                "initial states does not match the number of layers."
            )
        else:
            hidden_states = list(  # noqa
                zip(
                    states[0].chunk(self.num_layers, 0),
                    states[1].chunk(self.num_layers, 0),
                )
            )
        for i, state in enumerate(hidden_states):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



pytext/models/representations/augmented_lstm.py [497:520]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self,
        inputs: PackedSequence,
        states: Optional[Tuple[torch.Tensor, torch.Tensor]],
    ):
        output_sequence = inputs
        final_h = []
        final_c = []

        if not states:
            hidden_states = [None] * self.num_layers
        elif states[0].size()[0] != self.num_layers:
            raise RuntimeError(
                "Initial states were passed to forward() but the number of "
                "initial states does not match the number of layers."
            )
        else:
            hidden_states = list(  # noqa
                zip(
                    states[0].chunk(self.num_layers, 0),
                    states[1].chunk(self.num_layers, 0),
                )
            )

        for i, state in enumerate(hidden_states):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



