in source/ring_buffer.c [128:229]
int aws_ring_buffer_acquire_up_to(
struct aws_ring_buffer *ring_buf,
size_t minimum_size,
size_t requested_size,
struct aws_byte_buf *dest) {
AWS_PRECONDITION(requested_size >= minimum_size);
AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
if (requested_size == 0 || minimum_size == 0 || !ring_buf || !dest) {
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
uint8_t *tail_cpy;
uint8_t *head_cpy;
AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
/* this branch is, we don't have any vended buffers. */
if (head_cpy == tail_cpy) {
size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation;
size_t allocation_size = ring_space > requested_size ? requested_size : ring_space;
if (allocation_size < minimum_size) {
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return aws_raise_error(AWS_ERROR_OOM);
}
/* go as big as we can. */
/* we don't have any vended, so this should be safe. */
AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + allocation_size);
AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
*dest = aws_byte_buf_from_empty_array(ring_buf->allocation, allocation_size);
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return AWS_OP_SUCCESS;
}
/* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
/* after N + 1 wraps */
if (tail_cpy > head_cpy) {
size_t space = tail_cpy - head_cpy;
/* this shouldn't be possible. */
AWS_ASSERT(space);
space -= 1;
size_t returnable_size = space > requested_size ? requested_size : space;
if (returnable_size >= minimum_size) {
AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + returnable_size);
*dest = aws_byte_buf_from_empty_array(head_cpy, returnable_size);
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return AWS_OP_SUCCESS;
}
/* after N wraps */
} else if (tail_cpy < head_cpy) {
size_t head_space = ring_buf->allocation_end - head_cpy;
size_t tail_space = tail_cpy - ring_buf->allocation;
/* if you can vend the whole thing do it. Also prefer head space to tail space. */
if (head_space >= requested_size) {
AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
*dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return AWS_OP_SUCCESS;
}
if (tail_space > requested_size) {
AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
*dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return AWS_OP_SUCCESS;
}
/* now vend as much as possible, once again preferring head space. */
if (head_space >= minimum_size && head_space >= tail_space) {
AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + head_space);
*dest = aws_byte_buf_from_empty_array(head_cpy, head_space);
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return AWS_OP_SUCCESS;
}
if (tail_space > minimum_size) {
AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + tail_space - 1);
*dest = aws_byte_buf_from_empty_array(ring_buf->allocation, tail_space - 1);
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return AWS_OP_SUCCESS;
}
}
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return aws_raise_error(AWS_ERROR_OOM);
}