def get_completion()

in experimental/piranha_playground/rule_inference/piranha_chat.py [0:0]


    def get_completion(self, n_samples: int = 1) -> Optional[List[str]]:
        """
        Attempts to generate a new GPT model prediction based on the internal message list. It handles
        common OpenAI API exceptions such as rate limiting and API errors.

        :param n_samples: int: Number of samples to generate from the model.
        :return List[str]: A list of generated messages. None if an API exception occurs.
        :raises PiranhaChatException: If it fails to generate a completion from the GPT model after three attempts.
        """

        for _ in range(3):
            try:
                logger.debug("Attempting to get completion from GPT.")
                response = openai.ChatCompletion.create(
                    model=self.model,
                    messages=self.messages,
                    temperature=self.temperature,  # this is the degree of randomness of the model's output
                    n=n_samples,
                )
                return [
                    response.choices[i].message.content
                    for i in range(len(response.choices))
                ]
            except (
                openai.error.RateLimitError,
                openai.error.Timeout,
                openai.error.APIError,
            ) as e:
                logger.error(e)
                sleep_time = 0.5
                logger.error(f"Rate limit reached. Sleeping for {sleep_time}s.")
                time.sleep(sleep_time)
        raise PiranhaChatException("Failed to get completion from GPT.")