def eval_wer()

in eval/eval_WER.py [0:0]


def eval_wer(loader,
             criterion,
             lm_weight,
             index2letter,
             n_processes=32):

    criterion.eval()

    bar = progressbar.ProgressBar(len(loader))
    bar.start()

    task_q, result_q = mp.JoinableQueue(), mp.Queue()
    processes = []
    for _ in range(n_processes):
        p = mp.Process(target=Worker(
            lm_weight, index2letter, task_q, result_q))
        p.start()
        processes.append(p)

    tasks_fed = 0
    mean_wer = 0.0
    results = 0.0

    for index, data in enumerate(loader):
        bar.update(index)
        batch_size = data[0].size(0)
        tasks_fed += batch_size

        with torch.no_grad():
            seq, seq_lengths, labels, label_lengths = prepare_data(
                data, put_on_cuda=False)
            seq = seq.cuda()

            predictions = criterion.letter_classifier(
                seq).log_softmax(dim=-1).cpu()

        for k in range(batch_size):
            p_ = predictions[k, :, :]
            labels_ = (labels[k, :label_lengths[k]])
            task_q.put((p_,  labels_))

        task_q.join()
        while not result_q.empty():
            mean_wer += result_q.get()
            results += 1
        assert results == tasks_fed
    bar.finish()

    for _ in processes:
        task_q.put(None)

    for p in processes:
        p.join()

    mean_wer /= results
    return mean_wer