def _read_in_arrow()

in pyspark_huggingface/compat/datasource.py [0:0]


    def _read_in_arrow(batches: Iterator["RecordBatch"], arrow_pickler, hf_reader) -> Iterator["RecordBatch"]:
        for batch in batches:
            for record in batch.to_pylist():
                partition = arrow_pickler.loads(record)
                yield from hf_reader.read(partition)