in source/memtrace.c [293:418]
void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) {
struct alloc_tracer *tracer = trace_allocator->impl;
if (tracer->level == AWS_MEMTRACE_NONE || aws_atomic_load_int(&tracer->allocated) == 0) {
return;
}
aws_mutex_lock(&tracer->mutex);
size_t num_allocs = aws_hash_table_get_entry_count(&tracer->allocs);
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #\n");
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE,
"tracer: %zu bytes still allocated in %zu allocations\n",
aws_atomic_load_int(&tracer->allocated),
num_allocs);
/* convert stacks from pointers -> symbols */
struct aws_hash_table stack_info;
AWS_ZERO_STRUCT(stack_info);
if (tracer->level == AWS_MEMTRACE_STACKS) {
AWS_FATAL_ASSERT(
AWS_OP_SUCCESS ==
aws_hash_table_init(
&stack_info, aws_default_allocator(), 64, aws_hash_ptr, aws_ptr_eq, NULL, s_stack_info_destroy));
/* collect active stacks, tally up sizes and counts */
aws_hash_table_foreach(&tracer->allocs, s_collect_stack_stats, &stack_info);
/* collect stack traces for active stacks */
aws_hash_table_foreach(&stack_info, s_collect_stack_trace, tracer);
}
/* sort allocs by time */
struct aws_priority_queue allocs;
AWS_FATAL_ASSERT(
AWS_OP_SUCCESS ==
aws_priority_queue_init_dynamic(
&allocs, aws_default_allocator(), num_allocs, sizeof(struct alloc_info *), s_alloc_compare));
aws_hash_table_foreach(&tracer->allocs, s_insert_allocs, &allocs);
/* dump allocs by time */
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:\n");
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
while (aws_priority_queue_size(&allocs)) {
struct alloc_info *alloc = NULL;
aws_priority_queue_pop(&allocs, &alloc);
AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes\n", alloc->size);
if (alloc->stack) {
struct aws_hash_element *item = NULL;
AWS_FATAL_ASSERT(
AWS_OP_SUCCESS == aws_hash_table_find(&stack_info, (void *)(uintptr_t)alloc->stack, &item));
struct stack_metadata *stack = item->value;
AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, " stacktrace:\n%s\n", (const char *)aws_string_bytes(stack->trace));
}
}
aws_priority_queue_clean_up(&allocs);
if (tracer->level == AWS_MEMTRACE_STACKS) {
size_t num_stacks = aws_hash_table_get_entry_count(&stack_info);
/* sort stacks by total size leaked */
struct aws_priority_queue stacks_by_size;
AWS_FATAL_ASSERT(
AWS_OP_SUCCESS == aws_priority_queue_init_dynamic(
&stacks_by_size,
aws_default_allocator(),
num_stacks,
sizeof(struct stack_metadata *),
s_stack_info_compare_size));
aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_size);
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE,
"################################################################################\n");
AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:\n");
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE,
"################################################################################\n");
while (aws_priority_queue_size(&stacks_by_size) > 0) {
struct stack_metadata *stack = NULL;
aws_priority_queue_pop(&stacks_by_size, &stack);
AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu bytes in %zu allocations:\n", stack->size, stack->count);
AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace));
}
aws_priority_queue_clean_up(&stacks_by_size);
/* sort stacks by number of leaks */
struct aws_priority_queue stacks_by_count;
AWS_FATAL_ASSERT(
AWS_OP_SUCCESS == aws_priority_queue_init_dynamic(
&stacks_by_count,
aws_default_allocator(),
num_stacks,
sizeof(struct stack_metadata *),
s_stack_info_compare_count));
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE,
"################################################################################\n");
AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:\n");
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE,
"################################################################################\n");
aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_count);
while (aws_priority_queue_size(&stacks_by_count) > 0) {
struct stack_metadata *stack = NULL;
aws_priority_queue_pop(&stacks_by_count, &stack);
AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu allocations leaking %zu bytes:\n", stack->count, stack->size);
AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace));
}
aws_priority_queue_clean_up(&stacks_by_count);
aws_hash_table_clean_up(&stack_info);
}
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #\n");
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
aws_mutex_unlock(&tracer->mutex);
}