in src/aws-cpp-sdk-core/source/client/RequestCompression.cpp [81:197]
iostream_outcome Aws::Client::RequestCompression::compress(std::shared_ptr<Aws::IOStream> input,
const CompressionAlgorithm &algorithm) const
{
#ifdef ENABLED_ZLIB_REQUEST_COMPRESSION
if (algorithm == CompressionAlgorithm::GZIP)
{
// calculating stream size
input->seekg(0, input->end);
size_t streamSize = input->tellg();
input->seekg(0, input->beg);
AWS_LOGSTREAM_TRACE(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compressing request of " << streamSize << " bytes.");
// Preparing output
std::shared_ptr<Aws::IOStream> output = Aws::MakeShared<Aws::StringStream>(AWS_REQUEST_COMPRESSION_ALLOCATION_TAG);
if(!output)
{
AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Failed to allocate output while compressing")
return false;
}
// Prepare ZLIB to compress
int ret = Z_NULL;
int flush = Z_NO_FLUSH;
z_stream strm = {};
auto in = Aws::MakeUniqueArray<unsigned char>(ZLIB_CHUNK, AWS_REQUEST_COMPRESSION_ALLOCATION_TAG);
if(!in)
{
AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Failed to allocate in buffer while compressing")
return false;
}
auto out = Aws::MakeUniqueArray<unsigned char>(ZLIB_CHUNK, AWS_REQUEST_COMPRESSION_ALLOCATION_TAG);
if(!out)
{
AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Failed to allocate out buffer while compressing")
return false;
}
//Preparing allocators
#ifdef USE_AWS_MEMORY_MANAGEMENT
strm.zalloc = (void *(*)(void *, unsigned, unsigned)) aws_zalloc;
strm.zfree = (void (*)(void *, void *)) aws_zfree;
#else
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
#endif
strm.opaque = Z_NULL;
const int MAX_WINDOW_GZIP = 31;
const int DEFAULT_MEM_LEVEL_USAGE = 8;
ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, MAX_WINDOW_GZIP, DEFAULT_MEM_LEVEL_USAGE, Z_DEFAULT_STRATEGY);
if(ret != Z_OK)
{
return false;
}
//Adding one to the stream size counter to account for the EOF marker.
streamSize++;
size_t toRead = 0;
// Compress
do {
toRead = std::min(streamSize, ZLIB_CHUNK);
// Fill the buffer
if (! input->read(reinterpret_cast<char *>(in.get()), toRead))
{
if (input->eof())
{
//Last read need flush when exit loop
flush = Z_FINISH;
}
else {
AWS_LOGSTREAM_ERROR(
AWS_REQUEST_COMPRESSION_LOG_TAG,
"Uncompress request failed to read from stream");
return false;
}
}
assert(streamSize >= toRead);
streamSize -= toRead; //left to read
strm.avail_in = static_cast<uInt>((flush == Z_FINISH) ? toRead-1 : toRead); //skip EOF if included
strm.next_in = in.get();
do
{
// Run deflate on buffers
strm.avail_out = ZLIB_CHUNK;
strm.next_out = out.get();
ret = deflate(&strm, flush);
// writing the output
assert(ZLIB_CHUNK >= strm.avail_out);
unsigned output_size = ZLIB_CHUNK - strm.avail_out;
if(! output->write(reinterpret_cast<char *>(out.get()), output_size))
{
AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compressed request failed to write to output stream");
return false;
}
} while (strm.avail_out == 0);
assert(strm.avail_in == 0); // All data was read
} while (flush != Z_FINISH);
assert(ret == Z_STREAM_END); // Completed stream
AWS_LOGSTREAM_TRACE(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compressed request to: " << strm.total_out << " bytes");
deflateEnd(&strm);
return output;
}
else
{
AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compress request requested in runtime without support: " << GetCompressionAlgorithmId(algorithm));
return false;
}
#else
// If there is no support to compress, always fail calls to this method.
AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compress request requested in runtime without support: " << GetCompressionAlgorithmId(algorithm));
AWS_UNREFERENCED_PARAM(input); // silencing warning;
return false;
#endif
}