in libcloud/utils/files.py [0:0]
def read_in_chunks(iterator, chunk_size=None, fill_size=False, yield_empty=False):
"""
Return a generator which yields data in chunks.
:param iterator: An object which implements an iterator interface
or a File like object with read method.
:type iterator: :class:`object` which implements iterator interface.
:param chunk_size: Optional chunk size (defaults to CHUNK_SIZE)
:type chunk_size: ``int``
:param fill_size: If True, make sure chunks are exactly chunk_size in
length (except for last chunk).
:type fill_size: ``bool``
:param yield_empty: If true and iterator returned no data, only yield empty
bytes object
:type yield_empty: ``bool``
TODO: At some point in the future we could use byte arrays here if version
>= Python 3. This should speed things up a bit and reduce memory usage.
"""
chunk_size = chunk_size or CHUNK_SIZE
try:
get_data = iterator.read
args = (chunk_size,)
except AttributeError:
get_data = next
args = (iterator,)
data = b("")
empty = False
while not empty or len(data) > 0:
if not empty:
try:
chunk = b(get_data(*args))
if len(chunk) > 0:
data += chunk
else:
empty = True
except StopIteration:
empty = True
if len(data) == 0:
if empty and yield_empty:
yield b("")
return
if fill_size:
data = yield from _optimized_chunked_generator(data=data, chunk_size=chunk_size)
if empty:
# Yield last not completely filled chunk
yield data[:chunk_size]
data = data[chunk_size:]
else:
yield data
data = b("")