src/co_op_translator/core/llm/providers/azure/markdown_translator.py [44:62]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            )
        )
        return kernel

    async def _run_prompt(self, prompt: str, index: int, total: int) -> str:
        """
        Execute a single translation prompt using Azure OpenAI.

        Args:
            prompt: The translation prompt
            index: Current chunk index
            total: Total number of chunks

        Returns:
            str: Translated text
        """
        try:
            # Initialize settings for all prompts
            req_settings = self.kernel.get_prompt_execution_settings_from_service_id(
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/co_op_translator/core/llm/providers/openai/markdown_translator.py [44:62]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            )
        )
        return kernel

    async def _run_prompt(self, prompt: str, index: int, total: int) -> str:
        """
        Execute a single translation prompt using OpenAI.

        Args:
            prompt: The translation prompt
            index: Current chunk index
            total: Total number of chunks

        Returns:
            str: Translated text
        """
        try:
            # Initialize settings for all prompts
            req_settings = self.kernel.get_prompt_execution_settings_from_service_id(
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



