optimum/executorch/modeling.py [490:507]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            self.stats.on_sampling_end()
            if not first_token_generated:
                self.stats.on_first_token()
                first_token_generated = True

            # Get next token
            next_token = torch.argmax(logits[:, -1, :], dim=-1).item()
            generated_ids.append(next_token)
            self.stats.set_num_generated_tokens(len(generated_ids) - 1)  # Don't count decoder_start_token

            # Update input for next iteration
            decoder_input_ids = torch.tensor([[next_token]], dtype=torch.long)

            # Check if EOS token
            if next_token == self.eos_token_id:
                break

        return generated_ids
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



optimum/executorch/modeling.py [1032:1048]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            self.stats.on_sampling_end()
            if not first_token_generated:
                self.stats.on_first_token()
                first_token_generated = True
            # Get next token
            next_token = torch.argmax(logits[:, -1, :], dim=-1).item()
            generated_ids.append(next_token)
            self.stats.set_num_generated_tokens(len(generated_ids) - 1)  # Don't count decoder_start_token

            # Update input for next iteration
            decoder_input_ids = torch.tensor([[next_token]], dtype=torch.long)

            # Check if EOS token
            if next_token == self.eos_token_id:
                break

        return generated_ids
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



