in modules/proxy/mod_proxy_http.c [1926:2189]
static int proxy_http_handler(request_rec *r, proxy_worker *worker,
proxy_server_conf *conf,
char *url, const char *proxyname,
apr_port_t proxyport)
{
int status;
const char *scheme;
const char *u = url;
proxy_http_req_t *req = NULL;
proxy_conn_rec *backend = NULL;
apr_bucket_brigade *input_brigade = NULL;
int mpm_can_poll = 0;
int is_ssl = 0;
conn_rec *c = r->connection;
proxy_dir_conf *dconf;
int retry = 0;
char *locurl = url;
int toclose = 0;
/*
* Use a shorter-lived pool to reduce memory usage
* and avoid a memory leak
*/
apr_pool_t *p = r->pool;
apr_uri_t *uri;
scheme = get_url_scheme(&u, &is_ssl);
if (!scheme && proxyname && strncasecmp(url, "ftp:", 4) == 0) {
u = url + 4;
scheme = "ftp";
is_ssl = 0;
}
if (!scheme || u[0] != '/' || u[1] != '/' || u[2] == '\0') {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01113)
"HTTP: declining URL %s", url);
return DECLINED; /* only interested in HTTP, WS or FTP via proxy */
}
if (is_ssl && !ap_ssl_has_outgoing_handlers()) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01112)
"HTTP: declining URL %s (mod_ssl not configured?)", url);
return DECLINED;
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "HTTP: serving URL %s", url);
/* create space for state information */
if ((status = ap_proxy_acquire_connection(scheme, &backend,
worker, r->server)) != OK) {
return status;
}
backend->is_ssl = is_ssl;
dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
ap_mpm_query(AP_MPMQ_CAN_POLL, &mpm_can_poll);
req = apr_pcalloc(p, sizeof(*req));
req->p = p;
req->r = r;
req->sconf = conf;
req->dconf = dconf;
req->worker = worker;
req->backend = backend;
req->proto = scheme;
req->bucket_alloc = c->bucket_alloc;
req->can_go_async = (mpm_can_poll &&
dconf->async_delay_set &&
dconf->async_delay >= 0);
req->state = PROXY_HTTP_REQ_HAVE_HEADER;
req->rb_method = RB_INIT;
if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
req->force10 = 1;
}
else if (*worker->s->upgrade || *req->proto == 'w') {
/* Forward Upgrade header if it matches the configured one(s),
* the default being "WebSocket" for ws[s] schemes.
*/
const char *upgrade = apr_table_get(r->headers_in, "Upgrade");
if (upgrade && ap_proxy_worker_can_upgrade(p, worker, upgrade,
(*req->proto == 'w')
? "WebSocket" : NULL)) {
req->upgrade = upgrade;
}
}
if (req->can_go_async || req->upgrade) {
/* If ProxyAsyncIdleTimeout is not set, use backend timeout */
if (req->can_go_async && dconf->async_idle_timeout_set) {
req->idle_timeout = dconf->async_idle_timeout;
}
else if (worker->s->timeout_set) {
req->idle_timeout = worker->s->timeout;
}
else if (conf->timeout_set) {
req->idle_timeout = conf->timeout;
}
else {
req->idle_timeout = r->server->timeout;
}
}
/* We possibly reuse input data prefetched in previous call(s), e.g. for a
* balancer fallback scenario, and in this case the 100 continue settings
* should be consistent between balancer members. If not, we need to ignore
* Proxy100Continue on=>off once we tried to prefetch already, otherwise
* the HTTP_IN filter won't send 100 Continue for us anymore, and we might
* deadlock with the client waiting for each other. Note that off=>on is
* not an issue because in this case r->expecting_100 is false (the 100
* Continue is out already), but we make sure that prefetch will be
* nonblocking to avoid passing more time there.
*/
apr_pool_userdata_get((void **)&input_brigade, "proxy-req-input", p);
/* Should we handle end-to-end or ping 100-continue? */
if (!req->force10
&& ((r->expecting_100 && (dconf->forward_100_continue || input_brigade))
|| PROXY_SHOULD_PING_100_CONTINUE(worker, r))) {
/* Tell ap_proxy_create_hdrbrgd() to preserve/add the Expect header */
apr_table_setn(r->notes, "proxy-100-continue", "1");
req->do_100_continue = 1;
}
/* Should we block while prefetching the body or try nonblocking and flush
* data to the backend ASAP?
*/
if (input_brigade
|| req->can_go_async
|| req->do_100_continue
|| apr_table_get(r->subprocess_env,
"proxy-prefetch-nonblocking")) {
req->prefetch_nonblocking = 1;
}
/*
* In the case that we are handling a reverse proxy connection and this
* is not a request that is coming over an already kept alive connection
* with the client, do NOT reuse the connection to the backend, because
* we cannot forward a failure to the client in this case as the client
* does NOT expect this in this situation.
* Yes, this creates a performance penalty.
*/
if ((r->proxyreq == PROXYREQ_REVERSE) && (!c->keepalives)
&& (apr_table_get(r->subprocess_env, "proxy-initial-not-pooled"))) {
backend->close = 1;
}
/* Step One: Determine Who To Connect To */
uri = apr_palloc(p, sizeof(*uri));
if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend,
uri, &locurl, proxyname,
proxyport, req->server_portstr,
sizeof(req->server_portstr))))
goto cleanup;
/* The header is always (re-)built since it depends on worker settings,
* but the body can be fetched only once (even partially), so it's saved
* in between proxy_http_handler() calls should we come back here.
*/
req->header_brigade = apr_brigade_create(p, req->bucket_alloc);
if (input_brigade == NULL) {
input_brigade = apr_brigade_create(p, req->bucket_alloc);
apr_pool_userdata_setn(input_brigade, "proxy-req-input", NULL, p);
}
req->input_brigade = input_brigade;
/* Prefetch (nonlocking) the request body so to increase the chance to get
* the whole (or enough) body and determine Content-Length vs chunked or
* spooled. By doing this before connecting or reusing the backend, we want
* to minimize the delay between this connection is considered alive and
* the first bytes sent (should the client's link be slow or some input
* filter retain the data). This is a best effort to prevent the backend
* from closing (from under us) what it thinks is an idle connection, hence
* to reduce to the minimum the unavoidable local is_socket_connected() vs
* remote keepalive race condition.
*/
if ((status = ap_proxy_http_prefetch(req, uri, locurl)) != OK)
goto cleanup;
/* We need to reset backend->close now, since ap_proxy_http_prefetch() set
* it to disable the reuse of the connection *after* this request (no keep-
* alive), not to close any reusable connection before this request. However
* assure what is expected later by using a local flag and do the right thing
* when ap_proxy_connect_backend() below provides the connection to close.
*/
toclose = backend->close;
backend->close = 0;
while (retry < 2) {
if (retry) {
char *newurl = url;
/* Step One (again): (Re)Determine Who To Connect To */
if ((status = ap_proxy_determine_connection(p, r, conf, worker,
backend, uri, &newurl, proxyname, proxyport,
req->server_portstr, sizeof(req->server_portstr))))
break;
/* The code assumes locurl is not changed during the loop, or
* ap_proxy_http_prefetch() would have to be called every time,
* and header_brigade be changed accordingly...
*/
AP_DEBUG_ASSERT(strcmp(newurl, locurl) == 0);
}
/* Step Two: Make the Connection */
if (ap_proxy_check_connection(scheme, backend, r->server, 1,
PROXY_CHECK_CONN_EMPTY)
&& ap_proxy_connect_backend(scheme, backend, worker,
r->server)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01114)
"HTTP: failed to make connection to backend: %s",
backend->hostname);
status = HTTP_SERVICE_UNAVAILABLE;
break;
}
/* Step Three: Create conn_rec */
if ((status = ap_proxy_connection_create_ex(scheme, backend, r)) != OK)
break;
req->origin = backend->connection;
/* Don't recycle the connection if prefetch (above) told not to do so */
if (toclose) {
backend->close = 1;
req->origin->keepalive = AP_CONN_CLOSE;
}
/* Step Four: Send the Request
* On the off-chance that we forced a 100-Continue as a
* kinda HTTP ping test, allow for retries
*/
status = ap_proxy_http_request(req);
if (status != OK) {
proxy_run_detach_backend(r, backend);
if (req->do_100_continue && status == HTTP_SERVICE_UNAVAILABLE) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, status, r, APLOGNO(01115)
"HTTP: 100-Continue failed to %pI (%s:%d)",
backend->addr, backend->hostname, backend->port);
backend->close = 1;
retry++;
continue;
}
break;
}
/* Step Five: Receive the Response... Fall thru to cleanup */
status = ap_proxy_http_process_response(req);
if (status == SUSPENDED) {
return SUSPENDED;
}
if (req->backend) {
proxy_run_detach_backend(r, req->backend);
}
break;
}
/* Step Six: Clean Up */
cleanup:
if (req->backend) {
if (status != OK)
req->backend->close = 1;
ap_proxy_release_connection(scheme, req->backend, r->server);
}
return status;
}