in modules/proxy/mod_proxy_http.c [1064:1915]
int ap_proxy_http_process_response(proxy_http_req_t *req)
{
apr_pool_t *p = req->p;
request_rec *r = req->r;
conn_rec *c = r->connection;
proxy_worker *worker = req->worker;
proxy_conn_rec *backend = req->backend;
conn_rec *origin = req->origin;
int do_100_continue = req->do_100_continue;
int status;
char *buffer;
char fixed_buffer[HUGE_STRING_LEN];
const char *buf;
char keepchar;
apr_bucket *e;
apr_bucket_brigade *bb;
apr_bucket_brigade *pass_bb;
int len, backasswards;
int interim_response = 0; /* non-zero whilst interim 1xx responses
* are being read. */
apr_size_t response_field_size = 0;
int pread_len = 0;
apr_table_t *save_table;
int backend_broke = 0;
static const char *hop_by_hop_hdrs[] =
{"Keep-Alive", "Proxy-Authenticate", "TE", "Trailer", "Upgrade", NULL};
int i;
const char *te = NULL;
int original_status = r->status;
const char *original_status_line = r->status_line;
const char *proxy_status_line = NULL;
apr_interval_time_t old_timeout = 0;
proxy_dir_conf *dconf = req->dconf;
int proxy_status = OK;
bb = apr_brigade_create(p, c->bucket_alloc);
pass_bb = apr_brigade_create(p, c->bucket_alloc);
/* Only use dynamically sized buffer if user specifies ResponseFieldSize */
if(backend->worker->s->response_field_size_set) {
response_field_size = backend->worker->s->response_field_size;
if (response_field_size != HUGE_STRING_LEN)
buffer = apr_pcalloc(p, response_field_size);
else
buffer = fixed_buffer;
}
else {
response_field_size = HUGE_STRING_LEN;
buffer = fixed_buffer;
}
/* Setup for 100-Continue timeout if appropriate */
if (do_100_continue && worker->s->ping_timeout_set) {
apr_socket_timeout_get(backend->sock, &old_timeout);
if (worker->s->ping_timeout != old_timeout) {
apr_status_t rc;
rc = apr_socket_timeout_set(backend->sock, worker->s->ping_timeout);
if (rc != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, APLOGNO(01101)
"could not set 100-Continue timeout");
}
}
}
/* Get response from the remote server, and pass it up the
* filter chain
*/
backend->r = make_fake_req(origin, r);
/* In case anyone needs to know, this is a fake request that is really a
* response.
*/
backend->r->proxyreq = PROXYREQ_RESPONSE;
apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
origin->local_addr->port));
do {
apr_status_t rc;
const char *upgrade = NULL;
int major = 0, minor = 0;
int toclose = 0;
apr_brigade_cleanup(bb);
rc = ap_proxygetline(backend->tmp_bb, buffer, response_field_size,
backend->r, 0, &len);
if (len == 0) {
/* handle one potential stray CRLF */
rc = ap_proxygetline(backend->tmp_bb, buffer, response_field_size,
backend->r, 0, &len);
}
if (len <= 0) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, APLOGNO(01102)
"error reading status line from remote "
"server %s:%d", backend->hostname, backend->port);
if (APR_STATUS_IS_TIMEUP(rc)) {
apr_table_setn(r->notes, "proxy_timedout", "1");
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01103) "read timeout");
if (do_100_continue) {
return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE,
"Timeout on 100-Continue");
}
}
/*
* If we are a reverse proxy request shutdown the connection
* WITHOUT ANY response to trigger a retry by the client
* if allowed (as for idempotent requests).
* BUT currently we should not do this if the request is the
* first request on a keepalive connection as browsers like
* seamonkey only display an empty page in this case and do
* not do a retry. We should also not do this on a
* connection which times out; instead handle as
* we normally would handle timeouts
*/
if (r->proxyreq == PROXYREQ_REVERSE && c->keepalives &&
!APR_STATUS_IS_TIMEUP(rc)) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01104)
"Closing connection to client because"
" reading from backend server %s:%d failed."
" Number of keepalives %i", backend->hostname,
backend->port, c->keepalives);
ap_proxy_fill_error_brigade(r, HTTP_BAD_GATEWAY, bb, 1);
ap_pass_brigade(r->output_filters, bb);
/* Mark the backend connection for closing */
backend->close = 1;
if (origin->keepalives) {
/* We already had a request on this backend connection and
* might just have run into a keepalive race. Hence we
* think positive and assume that the backend is fine and
* we do not need to signal an error on backend side.
*/
return OK;
}
/*
* This happened on our first request on this connection to the
* backend. This indicates something fishy with the backend.
* Return HTTP_INTERNAL_SERVER_ERROR to signal an unrecoverable
* server error. We do not worry about r->status code and a
* possible error response here as the ap_http_outerror_filter
* will fix all of this for us.
*/
return HTTP_INTERNAL_SERVER_ERROR;
}
if (!c->keepalives) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01105)
"NOT Closing connection to client"
" although reading from backend server %s:%d"
" failed.",
backend->hostname, backend->port);
}
return ap_proxyerror(r, HTTP_BAD_GATEWAY,
"Error reading from remote server");
}
/* XXX: Is this a real headers length send from remote? */
backend->worker->s->read += len;
/* Is it an HTTP/1 response?
* This is buggy if we ever see an HTTP/1.10
*/
if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) {
major = buffer[5] - '0';
minor = buffer[7] - '0';
/* If not an HTTP/1 message or
* if the status line was > 8192 bytes
*/
if ((major != 1) || (len >= response_field_size - 1)) {
return ap_proxyerror(r, HTTP_BAD_GATEWAY,
apr_pstrcat(p, "Corrupt status line returned "
"by remote server: ", buffer, NULL));
}
backasswards = 0;
keepchar = buffer[12];
buffer[12] = '\0';
proxy_status = atoi(&buffer[9]);
apr_table_setn(r->notes, "proxy-status",
apr_pstrdup(r->pool, &buffer[9]));
if (keepchar != '\0') {
buffer[12] = keepchar;
} else {
/* 2616 requires the space in Status-Line; the origin
* server may have sent one but ap_rgetline will
* have stripped it. */
buffer[12] = ' ';
buffer[13] = '\0';
}
proxy_status_line = apr_pstrdup(p, &buffer[9]);
/* The status out of the front is the same as the status coming in
* from the back, until further notice.
*/
r->status = proxy_status;
r->status_line = proxy_status_line;
ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
"Status from backend: %d", proxy_status);
/* read the headers. */
/* N.B. for HTTP/1.0 clients, we have to fold line-wrapped headers*/
/* Also, take care with headers with multiple occurrences. */
/* First, tuck away all already existing cookies */
save_table = apr_table_make(r->pool, 2);
apr_table_do(addit_dammit, save_table, r->headers_out,
"Set-Cookie", NULL);
/* shove the headers direct into r->headers_out */
rc = ap_proxy_read_headers(r, backend->r, buffer, response_field_size,
origin, &pread_len);
if (rc != APR_SUCCESS || r->headers_out == NULL) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01106)
"bad HTTP/%d.%d header returned by %s (%s)",
major, minor, r->uri, r->method);
backend->close = 1;
/*
* ap_send_error relies on a headers_out to be present. we
* are in a bad position here.. so force everything we send out
* to have nothing to do with the incoming packet
*/
r->headers_out = apr_table_make(r->pool,1);
r->status = HTTP_BAD_GATEWAY;
r->status_line = "bad gateway";
return r->status;
}
/* Now, add in the just read cookies */
apr_table_do(addit_dammit, save_table, r->headers_out,
"Set-Cookie", NULL);
/* and now load 'em all in */
if (!apr_is_empty_table(save_table)) {
apr_table_unset(r->headers_out, "Set-Cookie");
r->headers_out = apr_table_overlay(r->pool,
r->headers_out,
save_table);
}
/*
* Save a possible Transfer-Encoding header as we need it later for
* ap_http_filter to know where to end.
*/
te = apr_table_get(r->headers_out, "Transfer-Encoding");
/* can't have both Content-Length and Transfer-Encoding */
if (te && apr_table_get(r->headers_out, "Content-Length")) {
/*
* 2616 section 4.4, point 3: "if both Transfer-Encoding
* and Content-Length are received, the latter MUST be
* ignored";
*
* To help mitigate HTTP Splitting, unset Content-Length
* and shut down the backend server connection
* XXX: We aught to treat such a response as uncachable
*/
apr_table_unset(r->headers_out, "Content-Length");
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01107)
"server %s:%d returned Transfer-Encoding"
" and Content-Length",
backend->hostname, backend->port);
backend->close = 1;
}
upgrade = apr_table_get(r->headers_out, "Upgrade");
if (proxy_status == HTTP_SWITCHING_PROTOCOLS) {
if (!upgrade || !req->upgrade || (strcasecmp(req->upgrade,
upgrade) != 0)) {
return ap_proxyerror(r, HTTP_BAD_GATEWAY,
apr_pstrcat(p, "Unexpected Upgrade: ",
upgrade ? upgrade : "n/a",
" (expecting ",
req->upgrade ? req->upgrade
: "n/a", ")",
NULL));
}
backend->close = 1;
}
/* strip connection listed hop-by-hop headers from response */
toclose = ap_proxy_clear_connection_fn(r, r->headers_out);
if (toclose) {
backend->close = 1;
if (toclose < 0) {
return ap_proxyerror(r, HTTP_BAD_GATEWAY,
"Malformed connection header");
}
}
if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
ap_set_content_type(r, apr_pstrdup(p, buf));
}
if (!ap_is_HTTP_INFO(proxy_status)) {
ap_proxy_pre_http_request(origin, backend->r);
}
/* Clear hop-by-hop headers */
for (i=0; hop_by_hop_hdrs[i]; ++i) {
apr_table_unset(r->headers_out, hop_by_hop_hdrs[i]);
}
/* Delete warnings with wrong date */
r->headers_out = ap_proxy_clean_warnings(p, r->headers_out);
/* handle Via header in response */
if (req->sconf->viaopt != via_off
&& req->sconf->viaopt != via_block) {
const char *server_name = ap_get_server_name(r);
/* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
* then the server name returned by ap_get_server_name() is the
* origin server name (which does make too much sense with Via: headers)
* so we use the proxy vhost's name instead.
*/
if (server_name == r->hostname)
server_name = r->server->server_hostname;
/* create a "Via:" response header entry and merge it */
apr_table_addn(r->headers_out, "Via",
(req->sconf->viaopt == via_full)
? apr_psprintf(p, "%d.%d %s%s (%s)",
HTTP_VERSION_MAJOR(r->proto_num),
HTTP_VERSION_MINOR(r->proto_num),
server_name,
req->server_portstr,
AP_SERVER_BASEVERSION)
: apr_psprintf(p, "%d.%d %s%s",
HTTP_VERSION_MAJOR(r->proto_num),
HTTP_VERSION_MINOR(r->proto_num),
server_name,
req->server_portstr)
);
}
/* cancel keepalive if HTTP/1.0 or less */
if ((major < 1) || (minor < 1)) {
backend->close = 1;
origin->keepalive = AP_CONN_CLOSE;
}
else {
/*
* Keep track of the number of keepalives we processed on this
* connection.
*/
origin->keepalives++;
}
} else {
/* an http/0.9 response */
backasswards = 1;
r->status = proxy_status = 200;
r->status_line = "200 OK";
backend->close = 1;
}
if (ap_is_HTTP_INFO(proxy_status)) {
const char *policy = NULL;
/* RFC2616 tells us to forward this.
*
* OTOH, an interim response here may mean the backend
* is playing sillybuggers. The Client didn't ask for
* it within the defined HTTP/1.1 mechanisms, and if
* it's an extension, it may also be unsupported by us.
*
* There's also the possibility that changing existing
* behaviour here might break something.
*
* So let's make it configurable.
*
* We need to force "r->expecting_100 = 1" for RFC behaviour
* otherwise ap_send_interim_response() does nothing when
* the client did not ask for 100-continue.
*
* 101 Switching Protocol has its own configuration which
* shouldn't be interfered by "proxy-interim-response".
*/
if (proxy_status != HTTP_SWITCHING_PROTOCOLS) {
policy = apr_table_get(r->subprocess_env,
"proxy-interim-response");
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
"HTTP: received interim %d response (policy: %s)",
r->status, policy ? policy : "n/a");
if (!policy
|| (!strcasecmp(policy, "RFC")
&& (proxy_status != HTTP_CONTINUE
|| (r->expecting_100 = 1)))) {
switch (proxy_status) {
case HTTP_SWITCHING_PROTOCOLS:
AP_DEBUG_ASSERT(upgrade != NULL);
apr_table_setn(r->headers_out, "Connection", "Upgrade");
apr_table_setn(r->headers_out, "Upgrade",
apr_pstrdup(p, upgrade));
break;
}
ap_send_interim_response(r, 1);
}
/* FIXME: refine this to be able to specify per-response-status
* policies and maybe also add option to bail out with 502
*/
else if (strcasecmp(policy, "Suppress")) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01108)
"undefined proxy interim response policy");
}
interim_response++;
}
else {
interim_response = 0;
}
/* If we still do 100-continue (end-to-end or ping), either the
* current response is the expected "100 Continue" and we are done
* with this mode, or this is another interim response and we'll wait
* for the next one, or this is a final response and hence the backend
* did not honor our expectation.
*/
if (do_100_continue && (!interim_response
|| proxy_status == HTTP_CONTINUE)) {
/* RFC 7231 - Section 5.1.1 - Expect - Requirement for servers
* A server that responds with a final status code before
* reading the entire message body SHOULD indicate in that
* response whether it intends to close the connection or
* continue reading and discarding the request message.
*
* So, if this response is not an interim 100 Continue, we can
* avoid sending the request body if the backend responded with
* "Connection: close" or HTTP < 1.1, and either let the core
* discard it or the caller try another balancer member with the
* same body (given status 503, though not implemented yet).
*/
int do_send_body = (proxy_status == HTTP_CONTINUE
|| (!toclose && major > 0 && minor > 0));
/* Reset to old timeout iff we've adjusted it. */
if (worker->s->ping_timeout_set) {
apr_socket_timeout_set(backend->sock, old_timeout);
}
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10153)
"HTTP: %s100 continue sent by %pI (%s): "
"%ssending body (response: HTTP/%i.%i %s)",
proxy_status != HTTP_CONTINUE ? "no " : "",
backend->addr,
backend->hostname ? backend->hostname : "",
do_send_body ? "" : "not ",
major, minor, proxy_status_line);
if (do_send_body) {
status = send_continue_body(req);
if (status != OK) {
return status;
}
}
else {
/* If we don't read the client connection any further, since
* there are pending data it should be "Connection: close"d to
* prevent reuse. We don't exactly c->keepalive = AP_CONN_CLOSE
* here though, because error_override or a potential retry on
* another backend could finally read that data and finalize
* the request processing, making keep-alive possible. So what
* we do is leaving r->expecting_100 alone, ap_set_keepalive()
* will do the right thing according to the final response and
* any later update of r->expecting_100.
*/
}
/* Once only! */
do_100_continue = 0;
}
if (proxy_status == HTTP_SWITCHING_PROTOCOLS) {
apr_status_t rv;
/* If we didn't send the full body yet, do it now */
if (do_100_continue) {
r->expecting_100 = 0;
status = send_continue_body(req);
if (status != OK) {
return status;
}
}
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10239)
"HTTP: tunneling protocol %s", upgrade);
rv = ap_proxy_tunnel_create(&req->tunnel, r, origin, upgrade);
if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10240)
"can't create tunnel for %s", upgrade);
return HTTP_INTERNAL_SERVER_ERROR;
}
r->status = HTTP_SWITCHING_PROTOCOLS;
req->proto = upgrade;
if (req->can_go_async) {
/* Let the MPM schedule the work when idle */
req->state = PROXY_HTTP_TUNNELING;
req->tunnel->timeout = dconf->async_delay;
proxy_http_async_cb(req);
return SUSPENDED;
}
/* Let proxy tunnel forward everything within this thread */
req->tunnel->timeout = req->idle_timeout;
status = ap_proxy_tunnel_run(req->tunnel);
/* Report bytes exchanged by the backend */
backend->worker->s->read +=
ap_proxy_tunnel_conn_bytes_in(req->tunnel->origin);
backend->worker->s->transferred +=
ap_proxy_tunnel_conn_bytes_out(req->tunnel->origin);
/* We are done with both connections */
r->connection->keepalive = AP_CONN_CLOSE;
backend->close = 1;
return DONE;
}
if (interim_response) {
/* Already forwarded above, read next response */
continue;
}
/* Moved the fixups of Date headers and those affected by
* ProxyPassReverse/etc from here to ap_proxy_read_headers
*/
/* PR 41646: get HEAD right with ProxyErrorOverride */
if (ap_proxy_should_override(dconf, proxy_status)) {
if (proxy_status == HTTP_UNAUTHORIZED) {
const char *buf;
const char *wa = "WWW-Authenticate";
if ((buf = apr_table_get(r->headers_out, wa))) {
apr_table_set(r->err_headers_out, wa, buf);
} else {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01109)
"origin server sent 401 without "
"WWW-Authenticate header");
}
}
/* clear r->status for override error, otherwise ErrorDocument
* thinks that this is a recursive error, and doesn't find the
* custom error page
*/
r->status = HTTP_OK;
/* Discard body, if one is expected (not HEAD request) */
if (!r->header_only) {
const char *tmp;
/* Add minimal headers needed to allow http_in filter
* detecting end of body without waiting for a timeout. */
if ((tmp = apr_table_get(r->headers_out, "Transfer-Encoding"))) {
apr_table_set(backend->r->headers_in, "Transfer-Encoding", tmp);
}
else if ((tmp = apr_table_get(r->headers_out, "Content-Length"))) {
apr_table_set(backend->r->headers_in, "Content-Length", tmp);
}
else if (te) {
apr_table_set(backend->r->headers_in, "Transfer-Encoding", te);
}
ap_discard_request_body(backend->r);
}
/*
* prevent proxy_handler() from treating this as an
* internal error.
*/
apr_table_setn(r->notes, "proxy-error-override", "1");
return proxy_status;
}
/* Forward back Upgrade header if it matches the configured one(s), it
* may be an HTTP_UPGRADE_REQUIRED response or some other status where
* Upgrade makes sense to negotiate the protocol by other means.
*/
if (upgrade && ap_proxy_worker_can_upgrade(p, worker, upgrade,
(*req->proto == 'w')
? "WebSocket" : NULL)) {
apr_table_setn(r->headers_out, "Connection", "Upgrade");
apr_table_setn(r->headers_out, "Upgrade", apr_pstrdup(p, upgrade));
}
r->sent_bodyct = 1;
/*
* Is it an HTTP/0.9 response or did we maybe preread the 1st line of
* the response? If so, load the extra data. These are 2 mutually
* exclusive possibilities, that just happen to require very
* similar behavior.
*/
if (backasswards || pread_len) {
apr_ssize_t cntr = (apr_ssize_t)pread_len;
if (backasswards) {
/*@@@FIXME:
* At this point in response processing of a 0.9 response,
* we don't know yet whether data is binary or not.
* mod_charset_lite will get control later on, so it cannot
* decide on the conversion of this buffer full of data.
* However, chances are that we are not really talking to an
* HTTP/0.9 server, but to some different protocol, therefore
* the best guess IMHO is to always treat the buffer as "text/x":
*/
ap_xlate_proto_to_ascii(buffer, len);
cntr = (apr_ssize_t)len;
}
e = apr_bucket_heap_create(buffer, cntr, NULL, c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
}
/* send body - but only if a body is expected */
if (!r->header_only && !AP_STATUS_IS_HEADER_ONLY(proxy_status)) {
apr_read_type_e mode;
int finish;
/* We need to copy the output headers and treat them as input
* headers as well. BUT, we need to do this before we remove
* TE, so that they are preserved accordingly for
* ap_http_filter to know where to end.
*/
backend->r->headers_in = apr_table_clone(backend->r->pool, r->headers_out);
/*
* Restore Transfer-Encoding header from response if we saved
* one before and there is none left. We need it for the
* ap_http_filter. See above.
*/
if (te && !apr_table_get(backend->r->headers_in, "Transfer-Encoding")) {
apr_table_add(backend->r->headers_in, "Transfer-Encoding", te);
}
apr_table_unset(r->headers_out,"Transfer-Encoding");
ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "start body send");
/* read the body, pass it to the output filters */
/* Handle the case where the error document is itself reverse
* proxied and was successful. We must maintain any previous
* error status so that an underlying error (eg HTTP_NOT_FOUND)
* doesn't become an HTTP_OK.
*/
if (ap_proxy_should_override(dconf, original_status)) {
r->status = original_status;
r->status_line = original_status_line;
}
mode = APR_NONBLOCK_READ;
finish = FALSE;
do {
apr_off_t readbytes;
apr_status_t rv;
rv = ap_get_brigade(backend->r->input_filters, bb,
AP_MODE_READBYTES, mode,
req->sconf->io_buffer_size);
/* ap_get_brigade will return success with an empty brigade
* for a non-blocking read which would block: */
if (mode == APR_NONBLOCK_READ
&& (APR_STATUS_IS_EAGAIN(rv)
|| (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)))) {
/* flush to the client and switch to blocking mode */
e = apr_bucket_flush_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
if (ap_pass_brigade(r->output_filters, bb)
|| c->aborted) {
backend->close = 1;
break;
}
apr_brigade_cleanup(bb);
mode = APR_BLOCK_READ;
continue;
}
if (rv == APR_EOF) {
backend->close = 1;
break;
}
if (rv != APR_SUCCESS || APR_BRIGADE_EMPTY(bb)) {
int error_status = HTTP_BAD_GATEWAY;
if (rv == APR_ENOSPC) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02475)
"Response chunk/line was too large to parse");
}
else if (rv == APR_ENOTIMPL) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02476)
"Response Transfer-Encoding was not recognised");
}
else if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01110)
"Network error reading response");
}
else {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10293)
"Unexpected empty data reading response");
error_status = HTTP_INTERNAL_SERVER_ERROR;
}
/* In this case, we are in real trouble because
* our backend bailed on us. Given we're half way
* through a response, our only option is to
* disconnect the client too.
*/
apr_brigade_cleanup(bb);
ap_proxy_fill_error_brigade(r, error_status, bb, 1);
ap_pass_brigade(r->output_filters, bb);
backend_broke = 1;
backend->close = 1;
break;
}
/* next time try a non-blocking read */
mode = APR_NONBLOCK_READ;
if (!apr_is_empty_table(backend->r->trailers_in)) {
apr_table_do(add_trailers, r->trailers_out,
backend->r->trailers_in, NULL);
apr_table_clear(backend->r->trailers_in);
}
apr_brigade_length(bb, 0, &readbytes);
backend->worker->s->read += readbytes;
#if DEBUGGING
{
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01111)
"readbytes: %#x", readbytes);
}
#endif
/* Switch the allocator lifetime of the buckets */
rv = ap_proxy_buckets_lifetime_transform(r, bb, pass_bb);
if (rv != APR_SUCCESS) {
/* Same, half way through a response, our only option is
* to notice the output filters and then disconnect the
* client and backend.
*/
if (!APR_BRIGADE_EMPTY(pass_bb)) {
/* Pass what we have still */
ap_pass_brigade(r->output_filters, pass_bb);
apr_brigade_cleanup(pass_bb);
}
ap_proxy_fill_error_brigade(r, HTTP_INTERNAL_SERVER_ERROR,
pass_bb, 1);
ap_pass_brigade(r->output_filters, pass_bb);
apr_brigade_cleanup(pass_bb);
backend_broke = 1;
backend->close = 1;
break;
}
/* found the last brigade? */
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pass_bb))) {
/* signal that we must leave */
finish = TRUE;
/* the brigade may contain transient buckets that contain
* data that lives only as long as the backend connection.
* Force a setaside so these transient buckets become heap
* buckets that live as long as the request.
*/
for (e = APR_BRIGADE_FIRST(pass_bb); e
!= APR_BRIGADE_SENTINEL(pass_bb); e
= APR_BUCKET_NEXT(e)) {
apr_bucket_setaside(e, r->pool);
}
/* finally it is safe to clean up the brigade from the
* connection pool, as we have forced a setaside on all
* buckets.
*/
apr_brigade_cleanup(bb);
/* make sure we release the backend connection as soon
* as we know we are done, so that the backend isn't
* left waiting for a slow client to eventually
* acknowledge the data.
*/
proxy_run_detach_backend(r, backend);
ap_proxy_release_connection(backend->worker->s->scheme,
backend, r->server);
/* Ensure that the backend is not reused */
req->backend = NULL;
}
/* try send what we read */
if (ap_pass_brigade(r->output_filters, pass_bb) != APR_SUCCESS
|| c->aborted) {
/* Ack! Phbtt! Die! User aborted! */
/* Only close backend if we haven't got all from the
* backend. Furthermore if req->backend is NULL it is no
* longer safe to fiddle around with backend as it might
* be already in use by another thread.
*/
if (req->backend) {
/* this causes socket close below */
req->backend->close = 1;
}
finish = TRUE;
}
/* make sure we always clean up after ourselves */
apr_brigade_cleanup(pass_bb);
apr_brigade_cleanup(bb);
} while (!finish);
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "end body send");
}
else {
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "header only");
/* make sure we release the backend connection as soon
* as we know we are done, so that the backend isn't
* left waiting for a slow client to eventually
* acknowledge the data.
*/
proxy_run_detach_backend(r, backend);
ap_proxy_release_connection(backend->worker->s->scheme,
backend, r->server);
/* Ensure that the backend is not reused */
req->backend = NULL;
/* Pass EOS bucket down the filter chain. */
e = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
ap_pass_brigade(r->output_filters, bb);
apr_brigade_cleanup(bb);
}
} while (interim_response && (interim_response < AP_MAX_INTERIM_RESPONSES));
/* We have to cleanup bb brigade, because buckets inserted to it could be
* created from scpool and this pool can be freed before this brigade. */
apr_brigade_cleanup(bb);
/* See define of AP_MAX_INTERIM_RESPONSES for why */
if (interim_response >= AP_MAX_INTERIM_RESPONSES) {
return ap_proxyerror(r, HTTP_BAD_GATEWAY,
apr_psprintf(p,
"Too many (%d) interim responses from origin server",
interim_response));
}
/* If our connection with the client is to be aborted, return DONE. */
if (c->aborted || backend_broke) {
return DONE;
}
return OK;
}