recipes/lexicon_free/utilities/compute_lower_ppl_convlm.py [187:211]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

        if n % 10 == 0:
            print(
                "Evaluated",
                n,
                "sentences among",
                n_logging,
                "upper limit perplexity",
                numpy.exp(-ppl / n_words),
                "lower limit perplexity",
                numpy.exp(-ppl_lower / n_words),
                "number of words",
                n_words,
                flush=True,
            )

    print("Final loss", ppl, "loss lower", ppl_lower)
    print("Upper limit on perplexity:", numpy.exp(-ppl / n_words))
    print("Lower limit on perplexity:", numpy.exp(-ppl_lower / n_words))
    print("Total number of words:", n_words, "unknown words:", unk_n_words)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



recipes/lexicon_free/utilities/compute_lower_ppl_kenlm.py [126:150]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

        if n % 10 == 0:
            print(
                "Evaluated",
                n,
                "sentences among",
                n_logging,
                "upper limit perplexity",
                numpy.exp(-ppl / n_words),
                "lower limit perplexity",
                numpy.exp(-ppl_lower / n_words),
                "number of words",
                n_words,
                flush=True,
            )

    print("Final loss", ppl, "loss lower", ppl_lower)
    print("Upper limit on perplexity:", numpy.exp(-ppl / n_words))
    print("Lower limit on perplexity:", numpy.exp(-ppl_lower / n_words))
    print("Total number of words:", n_words, "unknown words:", unk_n_words)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



