preprocess/sm_inference_asum.py [446:480]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                        hypotheses_batch, score_batch, unnormalized_score_batch, pos_score_batch, tokens_batch = \
                            _sample_wrapper(
                                bart,
                                sentences=slines,
                                beam=args.beam,
                                lenpen=1.0,
                                max_len_b=args.max_len,
                                min_len=args.min_len,
                                sampling=args.sampling,
                                sampling_topk=args.sampling_topk,
                                sampling_topp=args.sampling_topp,
                                return_all=args.return_all,
                                input_is_bpe=False,
                                return_token_scores=args.return_token_scores,
                                diverse_beam_groups=args.diverse_beam_groups,
                                diverse_beam_strength=args.diverse_beam_strength,
                            )
                        assert len(hypotheses_batch) == len(score_batch) == len(unnormalized_score_batch), \
                            "lens not equal: {} and {} and {}".format(
                            len(hypotheses_batch), len(score_batch), len(unnormalized_score_batch)
                        )
                        assert len(hypotheses_batch) == len(slines), "slines={}, generated_score length={}".format(
                            slines, len(hypotheses_batch)
                        )
                        if args.return_token_scores:
                            for t, s, unnormalized_s, pos_s, toks in zip(hypotheses_batch, score_batch,
                                                                         unnormalized_score_batch,
                                                                         pos_score_batch, tokens_batch):
                                all_hypos.append((t, s, unnormalized_s, pos_s, toks))
                        else:
                            for t, s, unnormalized_s in zip(hypotheses_batch, score_batch, unnormalized_score_batch):
                                all_hypos.append((t, s, unnormalized_s))

                        for id in pa_ids:
                            all_ids.append(id)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



preprocess/sm_inference_asum.py [487:522]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                    hypotheses_batch, score_batch, unnormalized_score_batch, pos_score_batch, tokens_batch = \
                        _sample_wrapper(
                            bart,
                            sentences=slines,
                            beam=args.beam,
                            lenpen=1.0,
                            max_len_b=args.max_len,
                            min_len=args.min_len,
                            sampling=args.sampling,
                            sampling_topk=args.sampling_topk,
                            sampling_topp=args.sampling_topp,
                            return_all=args.return_all,
                            input_is_bpe=False,
                            return_token_scores=args.return_token_scores,
                            diverse_beam_groups=args.diverse_beam_groups,
                            diverse_beam_strength=args.diverse_beam_strength,
                        )
                    assert len(hypotheses_batch) == len(score_batch) == len(unnormalized_score_batch), \
                        "lens not equal: {} and {} and {}".format(
                            len(hypotheses_batch), len(score_batch), len(unnormalized_score_batch)
                        )
                    assert len(hypotheses_batch) == len(slines), "slines={}, generated_score length={}".format(
                        slines, len(hypotheses_batch)
                    )

                    if args.return_token_scores:
                        for t, s, unnormalized_s, pos_s, toks in zip(hypotheses_batch, score_batch,
                                                                     unnormalized_score_batch,
                                                                     pos_score_batch, tokens_batch):
                            all_hypos.append((t, s, unnormalized_s, pos_s, toks))
                    else:
                        for t, s, unnormalized_s in zip(hypotheses_batch, score_batch, unnormalized_score_batch):
                            all_hypos.append((t, s, unnormalized_s))

                    for id in pa_ids:
                        all_ids.append(id)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



