in tensorflow_serving/util/net_http/compression/gzip_zlib.cc [368:420]
int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen, const Bytef *source,
uLong *sourceLen,
int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
int err;
if ((err = CompressInit(dest, destLen, source, sourceLen)) != Z_OK)
return err;
// This is used to figure out how many bytes we wrote *this chunk*
uint64_t compressed_size = comp_stream_.total_out;
// Some setup happens only for the first chunk we compress in a run
if (first_chunk_) {
if ((err = WriteGzipHeader()) != Z_OK) return err;
compressed_size -= sizeof(GZIP_HEADER) - 1; // -= is right: adds to size
crc_ = crc32(0, nullptr, 0); // initialize
uncompressed_size_ = 0;
first_chunk_ = false; // so we don't do this again
}
// flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental
// compression.
err = deflate(&comp_stream_, flush_mode);
const uLong source_bytes_consumed = *sourceLen - comp_stream_.avail_in;
*sourceLen = comp_stream_.avail_in;
if ((err == Z_STREAM_END || err == Z_OK) && comp_stream_.avail_in == 0 &&
comp_stream_.avail_out != 0) {
// we processed everything ok and the output buffer was large enough.
} else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) {
return Z_BUF_ERROR; // should never happen
} else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
// an error happened
CompressErrorInit();
return err;
} else if (comp_stream_.avail_out == 0) { // not enough space
err = Z_BUF_ERROR;
}
assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR);
if (err == Z_STREAM_END) err = Z_OK;
// update the crc and other metadata
uncompressed_size_ += source_bytes_consumed;
compressed_size = comp_stream_.total_out - compressed_size; // delta
*destLen = compressed_size;
crc_ = crc32(crc_, source, source_bytes_consumed);
return err;
}