def build()

in tensorflow_lattice/python/linear_layer.py [0:0]


  def build(self, input_shape):
    """Standard Keras build() method.

    Args:
      input_shape: Must be: (batch_size, num_input_dims) if units == 1, or
        (batch_size, units, num_input_dims) if units > 1.

    Raises:
      ValueError: If shape is invalid.
    """
    linear_lib.verify_hyperparameters(
        num_input_dims=self.num_input_dims,
        units=self.units,
        input_shape=input_shape)

    if (any(self.monotonicities) or self.monotonic_dominances or
        self.range_dominances or self.normalization_order):
      constraints = LinearConstraints(
          monotonicities=self.monotonicities,
          monotonic_dominances=self.monotonic_dominances,
          range_dominances=self.range_dominances,
          input_min=self.input_min,
          input_max=self.input_max,
          normalization_order=self.normalization_order)
    else:
      constraints = None

    if not self.kernel_regularizer:
      kernel_reg = None
    elif len(self.kernel_regularizer) == 1:
      kernel_reg = self.kernel_regularizer[0]
    else:
      # Keras interface assumes only one regularizer, so summ all regularization
      # losses which we have.
      kernel_reg = lambda x: tf.add_n([r(x) for r in self.kernel_regularizer])

    self.kernel = self.add_weight(
        LINEAR_LAYER_KERNEL_NAME,
        # 1 column matrix rather than verctor for matrix multiplication.
        shape=[self.num_input_dims, self.units],
        initializer=self.kernel_initializer,
        regularizer=kernel_reg,
        constraint=constraints,
        dtype=self.dtype)

    if self.use_bias:
      if not self.bias_regularizer:
        bias_reg = None
      elif len(self.bias_regularizer) == 1:
        bias_reg = self.bias_regularizer[0]
      else:
        bias_reg = lambda x: tf.add_n([r(x) for r in self.bias_regularizer])
      self.bias = self.add_weight(
          LINEAR_LAYER_BIAS_NAME,
          shape=[] if self.units == 1 else [self.units],
          initializer=self.bias_initializer,
          regularizer=bias_reg,
          constraint=None,
          dtype=self.dtype)

    input_min = utils.canonicalize_input_bounds(self.input_min)
    input_max = utils.canonicalize_input_bounds(self.input_max)
    if ((input_min and input_min.count(None) < len(input_min)) or
        (input_max and input_max.count(None) < len(input_max))):
      lower_bounds = [val if val is not None else -np.inf
                      for val in input_min or [None] * self.num_input_dims]
      upper_bounds = [val if val is not None else np.inf
                      for val in input_max or [None] * self.num_input_dims]
      self.clip_value_min = tf.constant(lower_bounds, dtype=self.dtype)
      self.clip_value_max = tf.constant(upper_bounds, dtype=self.dtype)
    else:
      self.clip_value_min = None
      self.clip_value_max = None

    super(Linear, self).build(input_shape)