in source/windows/secure_channel_tls_handler.c [1232:1339]
static int s_process_read_message(
struct aws_channel_handler *handler,
struct aws_channel_slot *slot,
struct aws_io_message *message) {
struct secure_channel_handler *sc_handler = handler->impl;
if (message) {
/* note, most of these functions log internally, so the log messages in this function are sparse. */
AWS_LOGF_TRACE(
AWS_LS_IO_TLS,
"id=%p: processing incoming message of size %zu",
(void *)handler,
message->message_data.len);
struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data);
/* The SSPI interface forces us to manage incomplete records manually. So when we had extra after
the previous read, it needs to be shifted to the beginning of the current read, then the current
read data is appended to it. If we had an incomplete record, we don't need to shift anything but
we do need to append the current read data to the end of the incomplete record from the previous read.
Keep going until we've processed everything in the message we were just passed.
*/
int err = AWS_OP_SUCCESS;
while (!err && message_cursor.len) {
size_t available_buffer_space =
sc_handler->buffered_read_in_data_buf.capacity - sc_handler->buffered_read_in_data_buf.len;
size_t available_message_len = message_cursor.len;
size_t amount_to_move_to_buffer =
available_buffer_space > available_message_len ? available_message_len : available_buffer_space;
memcpy(
sc_handler->buffered_read_in_data_buf.buffer + sc_handler->buffered_read_in_data_buf.len,
message_cursor.ptr,
amount_to_move_to_buffer);
sc_handler->buffered_read_in_data_buf.len += amount_to_move_to_buffer;
err = sc_handler->s_connection_state_fn(handler);
if (err && aws_last_error() == AWS_IO_READ_WOULD_BLOCK) {
if (sc_handler->buffered_read_in_data_buf.len == sc_handler->buffered_read_in_data_buf.capacity) {
/* throw this one as a protocol error. */
aws_raise_error(AWS_IO_TLS_ERROR_WRITE_FAILURE);
} else {
if (sc_handler->buffered_read_out_data_buf.len) {
err = s_process_pending_output_messages(handler);
if (err) {
break;
}
}
/* prevent a deadlock due to downstream handlers wanting more data, but we have an incomplete
record, and the amount they're requesting is less than the size of a tls record. */
size_t window_size = slot->window_size;
if (!window_size &&
aws_channel_slot_increment_read_window(slot, sc_handler->estimated_incomplete_size)) {
err = AWS_OP_ERR;
} else {
sc_handler->estimated_incomplete_size = 0;
err = AWS_OP_SUCCESS;
}
}
aws_byte_cursor_advance(&message_cursor, amount_to_move_to_buffer);
continue;
} else if (err) {
break;
}
/* handle any left over extra data from the decrypt operation here. */
if (sc_handler->read_extra) {
size_t move_pos = sc_handler->buffered_read_in_data_buf.len - sc_handler->read_extra;
memmove(
sc_handler->buffered_read_in_data_buf.buffer,
sc_handler->buffered_read_in_data_buf.buffer + move_pos,
sc_handler->read_extra);
sc_handler->buffered_read_in_data_buf.len = sc_handler->read_extra;
sc_handler->read_extra = 0;
} else {
sc_handler->buffered_read_in_data_buf.len = 0;
}
if (sc_handler->buffered_read_out_data_buf.len) {
err = s_process_pending_output_messages(handler);
if (err) {
break;
}
}
aws_byte_cursor_advance(&message_cursor, amount_to_move_to_buffer);
}
if (!err) {
aws_mem_release(message->allocator, message);
return AWS_OP_SUCCESS;
}
aws_channel_shutdown(slot->channel, aws_last_error());
return AWS_OP_ERR;
}
if (sc_handler->buffered_read_out_data_buf.len) {
if (s_process_pending_output_messages(handler)) {
return AWS_OP_ERR;
}
aws_mem_release(message->allocator, message);
}
return AWS_OP_SUCCESS;
}