static int s_stream_write_chunk()

in source/h1_stream.c [143:239]


static int s_stream_write_chunk(struct aws_http_stream *stream_base, const struct aws_http1_chunk_options *options) {
    AWS_PRECONDITION(stream_base);
    AWS_PRECONDITION(options);
    struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base);

    if (options->chunk_data == NULL && options->chunk_data_size > 0) {
        AWS_LOGF_ERROR(
            AWS_LS_HTTP_STREAM, "id=%p: Chunk data cannot be NULL if data size is non-zero", (void *)stream_base);
        return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
    }

    struct aws_h1_chunk *chunk = aws_h1_chunk_new(stream_base->alloc, options);
    if (AWS_UNLIKELY(NULL == chunk)) {
        AWS_LOGF_ERROR(
            AWS_LS_HTTP_STREAM,
            "id=%p: Failed to initialize streamed chunk, error %d (%s).",
            (void *)stream_base,
            aws_last_error(),
            aws_error_name(aws_last_error()));
        return AWS_OP_ERR;
    }

    int error_code = 0;
    bool should_schedule_task = false;

    { /* BEGIN CRITICAL SECTION */
        s_stream_lock_synced_data(stream);

        /* Can only add chunks while stream is active. */
        if (stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE) {
            error_code = (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_INIT)
                             ? AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED
                             : AWS_ERROR_HTTP_STREAM_HAS_COMPLETED;
            goto unlock;
        }

        /* Prevent user trying to submit chunks without having set the required headers.
         * This check also prevents a server-user submitting chunks before the response has been submitted. */
        if (!stream->synced_data.using_chunked_encoding) {
            AWS_LOGF_ERROR(
                AWS_LS_HTTP_STREAM,
                "id=%p: Cannot write chunks without 'transfer-encoding: chunked' header.",
                (void *)stream_base);
            error_code = AWS_ERROR_INVALID_STATE;
            goto unlock;
        }

        if (stream->synced_data.has_final_chunk) {
            AWS_LOGF_ERROR(
                AWS_LS_HTTP_STREAM, "id=%p: Cannot write additional chunk after final chunk.", (void *)stream_base);
            error_code = AWS_ERROR_INVALID_STATE;
            goto unlock;
        }

        /* success */
        if (chunk->data_size == 0) {
            stream->synced_data.has_final_chunk = true;
        }
        aws_linked_list_push_back(&stream->synced_data.pending_chunk_list, &chunk->node);
        should_schedule_task = !stream->synced_data.is_cross_thread_work_task_scheduled;
        stream->synced_data.is_cross_thread_work_task_scheduled = true;

    unlock:
        s_stream_unlock_synced_data(stream);
    } /* END CRITICAL SECTION */

    if (error_code) {
        AWS_LOGF_ERROR(
            AWS_LS_HTTP_STREAM,
            "id=%p: Failed to add chunk, error %d (%s)",
            (void *)stream_base,
            error_code,
            aws_error_name(error_code));

        aws_h1_chunk_destroy(chunk);
        return aws_raise_error(error_code);
    }

    AWS_LOGF_TRACE(
        AWS_LS_HTTP_STREAM,
        "id=%p: Adding chunk with size %" PRIu64 " to stream",
        (void *)stream,
        options->chunk_data_size);

    if (should_schedule_task) {
        /* Keep stream alive until task completes */
        aws_atomic_fetch_add(&stream->base.refcount, 1);
        AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)stream_base);
        aws_channel_schedule_task_now(
            stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task);
    } else {
        AWS_LOGF_TRACE(
            AWS_LS_HTTP_STREAM, "id=%p: Stream cross-thread work task was already scheduled.", (void *)stream_base);
    }

    return AWS_OP_SUCCESS;
}