static apr_status_t read_aggregate()

in buckets/aggregate_buckets.c [218:332]


static apr_status_t read_aggregate(serf_bucket_t *bucket,
                                   apr_size_t requested,
                                   int vecs_size, struct iovec *vecs,
                                   int *vecs_used)
{
    aggregate_context_t *ctx = bucket->data;
    int cur_vecs_used;
    apr_status_t status;

    *vecs_used = 0;

    if (!ctx->list) {
        if (ctx->hold_open) {
            return ctx->hold_open(ctx->hold_open_baton, bucket);
        }
        else {
            return APR_EOF;
        }
    }

    status = APR_SUCCESS;
    while (requested) {
        serf_bucket_t *head = ctx->list->bucket;

        status = serf_bucket_read_iovec(head, requested, vecs_size, vecs,
                                        &cur_vecs_used);

        if (SERF_BUCKET_READ_ERROR(status))
            return status;

        /* Add the number of vecs we read to our running total. */
        *vecs_used += cur_vecs_used;

        if (cur_vecs_used > 0 || status) {
            bucket_list_t *next_list;

            /* If we got SUCCESS (w/bytes) or EAGAIN, we want to return now
             * as it isn't safe to read more without returning to our caller.
             */
            if (!status || APR_STATUS_IS_EAGAIN(status) || status == SERF_ERROR_WAIT_CONN) {
                return status;
            }

            /* However, if we read EOF, we can stash this bucket in a
             * to-be-freed list and move on to the next bucket.  This ensures
             * that the bucket stays alive (so as not to violate our read
             * semantics).  We'll destroy this list of buckets the next time
             * we are asked to perform a read operation - thus ensuring the
             * proper read lifetime.
             */
            if (cur_vecs_used > 0) {
                next_list = ctx->list->next;
                ctx->list->next = ctx->done;
                ctx->done = ctx->list;
                ctx->list = next_list;
            }
            else {
                /* This bucket didn't add a single byte.
                   We can destroy it directly */
                next_list = ctx->list;
                ctx->list = next_list->next;
                serf_bucket_destroy(next_list->bucket);
                serf_bucket_mem_free(bucket->allocator, next_list);
            }

            /* If we have no more in our list, return EOF. */
            if (!ctx->list) {
                ctx->last = NULL;

                if (ctx->hold_open) {
                    status = ctx->hold_open(ctx->hold_open_baton, bucket);
                    if (status || !ctx->list)
                        return status;
                    /* Wow, we 'magically' refilled! */
                }
                else {
                    return APR_EOF;
                }
            }

            /* At this point, it safe to read the next bucket - if we can. */

            /* If the caller doesn't want ALL_AVAIL, decrement the size
             * of the items we just read from the list.
             */
            if (requested != SERF_READ_ALL_AVAIL) {
                int i;

                for (i = 0; i < cur_vecs_used; i++)
                    requested -= vecs[i].iov_len;
            }

            /* Adjust our vecs to account for what we just read. */
            vecs_size -= cur_vecs_used;
            vecs += cur_vecs_used;

            /* We reached our max.  Oh well. */
            if (!requested || !vecs_size) {
                return APR_SUCCESS;
            }
        }
        else if (!status) {
            /* Success and no data. Let's return what we have.
               Better luck next time.

               This scenario is triggered by test_split_buckets(),
               in a case where EAGAIN is really not what we want.
             */

            return APR_SUCCESS;
        }
    }

    return status;
}