in ludwig/models/ecd.py [0:0]
def call(self, inputs, training=None, mask=None):
# parameter inputs is a dict feature_name -> tensor / ndarray
# or
# parameter (inputs, targets) where
# inputs is a dict feature_name -> tensor/ndarray
# targets is dict feature_name -> tensor/ndarray
if isinstance(inputs, tuple):
inputs, targets = inputs
else:
targets = None
assert inputs.keys() == self.input_features.keys()
encoder_outputs = {}
for input_feature_name, input_values in inputs.items():
encoder = self.input_features[input_feature_name]
encoder_output = encoder(input_values, training=training,
mask=mask)
encoder_outputs[input_feature_name] = encoder_output
combiner_outputs = self.combiner(encoder_outputs)
output_logits = {}
output_last_hidden = {}
for output_feature_name, decoder in self.output_features.items():
# use presence or absence of targets
# to signal training or prediction
decoder_inputs = (combiner_outputs, copy.copy(output_last_hidden))
if targets is not None:
# targets are only used during training,
# during prediction they are omitted
decoder_inputs = (decoder_inputs, targets[output_feature_name])
decoder_outputs = decoder(
decoder_inputs,
training=training,
mask=mask
)
output_logits[output_feature_name] = decoder_outputs
# output_logits[output_feature_name][LOGITS] = decoder_logits
# output_logits[output_feature_name][
# LAST_HIDDEN] = decoder_last_hidden
# todo Piero: not sure this is needed,
# if combiner had lengths and the decoder wants to return them
# the decoder should do it, otherwise
# this can override the decoder outputs
# if LENGTHS in combiner_outputs:
# output_logits[output_feature_name][LENGTHS] = \
# combiner_outputs[LENGTHS]
output_last_hidden[output_feature_name] = decoder_outputs[
'last_hidden']
return output_logits