def embed_benchmark_value()

in Synthesis_incorporation/models/prediction_model.py [0:0]


    def embed_benchmark_value(self, example):
        it_pad = []

        input_list = example['inputs']

        for input_tensor in input_list:
            if input_tensor == 0:
                embedding_size = self.embedding_size
                if self.use_shape_encoding:
                    embedding_size += self.shape_embedding_size
                if self.use_type_encoding:
                    embedding_size += 2
                it_pad.append(torch.zeros(embedding_size + 1))
            else:
                if input_tensor.is_tensor:
                    input_tensor = input_tensor.value
                elif input_tensor.is_sequence and not input_tensor.elem_type_is_tensor:
                    input_tensor = torch.tensor(input_tensor.value)
                else:
                    input_tensor = torch.tensor(input_tensor.value)

                it_pad.append(self.tensor_flatten_pad(input_tensor))

        for _ in range(len(it_pad),3):
            embedding_size = self.embedding_size
            if self.use_shape_encoding:
                embedding_size += self.shape_embedding_size
            if self.use_type_encoding:
                embedding_size += 2
            t = torch.zeros(embedding_size + 1)
            t[-1] = -1
            it_pad.append(t)

        output_tensor = example['output'].value
        if not isinstance(output_tensor, torch.Tensor):
            output_tensor = torch.tensor(output_tensor.value)
        ot_pad = self.tensor_flatten_pad(output_tensor)
        domain_embedding = torch.flatten(torch.stack((it_pad[0], it_pad[1], it_pad[2], ot_pad)))
        return domain_embedding.float()