def _postprocessing()

in templates/inference-endpoints/postprocessing/1/model.py [0:0]


    def _postprocessing(self, tokens_batch):
        outputs = []
        for beam_tokens in tokens_batch:
            for tokens in beam_tokens:
                output = self.tokenizer.decode(tokens)
                outputs.append(output.encode('utf8'))
        return outputs