pytorch_translate/beam_search_and_decode_v2.py [554:608]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

        return outputs

    @classmethod
    def build_from_checkpoints(
        cls,
        checkpoint_filenames,
        src_dict_filename,
        dst_dict_filename,
        beam_size,
        length_penalty,
        nbest,
        word_reward=0,
        unk_reward=0,
        lexical_dict_paths=None,
    ):
        length = 10
        models, _, tgt_dict = load_models_from_checkpoints(
            checkpoint_filenames,
            src_dict_filename,
            dst_dict_filename,
            lexical_dict_paths,
        )
        src_tokens = torch.LongTensor(np.ones((length, 1), dtype="int64"))
        src_lengths = torch.IntTensor(np.array([length], dtype="int32"))
        eos_token_id = tgt_dict.eos()

        return cls(
            models,
            tgt_dict,
            src_tokens,
            src_lengths,
            eos_token_id,
            length_penalty=length_penalty,
            nbest=nbest,
            beam_size=beam_size,
            stop_at_eos=True,
            word_reward=word_reward,
            unk_reward=unk_reward,
            quantize=True,
        )

    def save_to_pytorch(self, output_path):
        def pack(s):
            if hasattr(s, "_pack"):
                s._pack()

        def unpack(s):
            if hasattr(s, "_unpack"):
                s._unpack()

        self.apply(pack)
        torch.jit.save(self, output_path)
        self.apply(unpack)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



pytorch_translate/ensemble_export.py [1525:1579]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )

        return outputs

    @classmethod
    def build_from_checkpoints(
        cls,
        checkpoint_filenames,
        src_dict_filename,
        dst_dict_filename,
        beam_size,
        length_penalty,
        nbest,
        word_reward=0,
        unk_reward=0,
        lexical_dict_paths=None,
    ):
        length = 10
        models, _, tgt_dict = load_models_from_checkpoints(
            checkpoint_filenames,
            src_dict_filename,
            dst_dict_filename,
            lexical_dict_paths,
        )
        src_tokens = torch.LongTensor(np.ones((length, 1), dtype="int64"))
        src_lengths = torch.IntTensor(np.array([length], dtype="int32"))
        eos_token_id = tgt_dict.eos()

        return cls(
            models,
            tgt_dict,
            src_tokens,
            src_lengths,
            eos_token_id,
            length_penalty=length_penalty,
            nbest=nbest,
            beam_size=beam_size,
            stop_at_eos=True,
            word_reward=word_reward,
            unk_reward=unk_reward,
            quantize=True,
        )

    def save_to_pytorch(self, output_path):
        def pack(s):
            if hasattr(s, "_pack"):
                s._pack()

        def unpack(s):
            if hasattr(s, "_unpack"):
                s._unpack()

        self.apply(pack)
        torch.jit.save(self, output_path)
        self.apply(unpack)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



