custom/gpt2/run_gpt2.py [153:162]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    }

    # Sum each statistic, which will be normalized by the number of sentences in `aggregate_logging_outputs`.
    stats = defaultdict(float)
    for tok_list in pred_toks.cpu().tolist():
        ms = ngram_metrics(tok_list)
        for k, v in ms.items():
            stats[k] += v
    for k, v in stats.items():
        logging_output[k] = v
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



custom/sequence_penalty_loss.py [59:68]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        }

        # Sum each statistic, which will be normalized by the number of sentences in `aggregate_logging_outputs`.
        stats = defaultdict(float)
        for tok_list in pred_toks.cpu().tolist():
            ms = ngram_metrics(tok_list)
            for k, v in ms.items():
                stats[k] += v
        for k, v in stats.items():
            logging_output[k] = v
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



