chunking/chunkers/nl2sql_chunker.py [30:42]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def __init__(self, data, max_chunk_size=None, token_overlap=None):
        """
        Initializes the NL2SQLChunker with the given data and sets up chunking parameters from environment variables.
        
        Args:
            data (str): The JSON content to be chunked.
        """
        super().__init__(data)
        self.max_chunk_size = max_chunk_size or int(os.getenv("NUM_TOKENS", "2048"))
        self.token_overlap = token_overlap or 100

    def get_chunks(self):
        chunks = []
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



chunking/chunkers/transcription_chunker.py [46:58]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def __init__(self, data, max_chunk_size=None, token_overlap=None):
        """
        Initializes the TranscriptionChunker with the given data and sets up chunking parameters from environment variables.
        
        Args:
            data (str): The document content to be chunked.
        """
        super().__init__(data)       
        self.max_chunk_size = max_chunk_size or int(os.getenv("NUM_TOKENS", "2048"))
        self.token_overlap = token_overlap or 100

    def get_chunks(self):           
        chunks = [] 
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



