def _print_report()

in evalbench/evaluator/progress_reporter.py [0:0]


def _print_report(progress_reporting, tmp_buffer):
    setup_i = progress_reporting["setup_i"].value
    prompt_i = progress_reporting["prompt_i"].value
    gen_i = progress_reporting["gen_i"].value
    exec_i = progress_reporting["exec_i"].value
    score_i = progress_reporting["score_i"].value
    dataset_len = progress_reporting["total"]
    databases = progress_reporting["total_dbs"]

    if tmp_buffer:
        buffer_content = tmp_buffer.getvalue()
        tmp_buffer.seek(0)
        tmp_buffer.truncate(0)
        if buffer_content != "":
            _ORIGINAL_STDOUT.write("\n")
            _ORIGINAL_STDOUT.write(buffer_content)
            _ORIGINAL_STDOUT.write("\n" * (_NUM_LINES_FOR_PROGRESS + 1))

    _ORIGINAL_STDOUT.write("\033[F\033[K" * _NUM_LINES_FOR_PROGRESS)

    report_progress(
        setup_i, databases, prefix="DBs Setup:", suffix="Complete", length=50
    )
    report_progress(
        prompt_i, dataset_len, prefix="Prompts:  ", suffix="Complete", length=50
    )
    report_progress(
        gen_i, dataset_len, prefix="SQLGen:   ", suffix="Complete", length=50
    )
    report_progress(
        exec_i, dataset_len, prefix="SQLExec:  ", suffix="Complete", length=50
    )
    report_progress(
        score_i, dataset_len, prefix="Scoring:  ", suffix="Complete", length=50
    )
    _ORIGINAL_STDOUT.flush()