def create_chats()

in experimental/piranha_playground/rule_inference/piranha_agent.py [0:0]


    def create_chats(self, rules: str) -> List[PiranhaGPTChat]:
        """
        Prepare the data for interaction with the AI model.

        :param rules: Statically inferred rules in TOML format
        :type rules: str
        :return: List of chat interactions with information necessary for AI model.
        :rtype: List[PiranhaGPTChat]
        """


        parser = TemplateParser(self.language)

        source_tree = parser.get_tree_from_code(self.source_code, remove_comments=True)
        target_tree = parser.get_tree_from_code(self.target_code, remove_comments=True)

        source_tree_sexpr = NodeUtils.generate_sexpr(source_tree.root_node, 0)
        target_tree_sexpr = NodeUtils.generate_sexpr(target_tree.root_node, 0)
        # Create diff between source and target code using difflib
        diff = list(
            difflib.unified_diff(
                self.source_code.splitlines(), self.target_code.splitlines()
            )
        )
        diff = "\n".join(diff)
        # diff = self.append_diff_information(diff, source_tree, target_tree)
        # Cleanup source
        prompt_holes = {
            "source_code": self.source_code,
            "source_tree": source_tree_sexpr,
            "target_tree": target_tree_sexpr,
            "diff": diff,
            "rules": rules,
            "hints": self.hints,
        }
        # Number of Chat interactions to have with the model
        n_samples = 5
        chat_interactions = [
            PiranhaGPTChat(holes=prompt_holes) for _ in range(n_samples)
        ]
        try:
            first_round = chat_interactions[0].get_completion(n_samples=n_samples)
        except PiranhaChatException as e:
            logger.debug(
                f"Chat completion failed with {e}. Trying again with a new chat...\n"
            )
            return []
        for i, response in enumerate(first_round):
            # Hack to prevent running the prompt multiple times (it's the same for all samples)
            # It is cheaper just to sample OpenAI API
            chat_interactions[i].append_system_message(response)
        return chat_interactions