def __init__()

in tensorflow_ranking/python/keras/layers.py [0:0]


  def __init__(self,
               example_feature_num: int,
               example_hidden_layer_dims: List[int],
               context_feature_num: Optional[int] = None,
               context_hidden_layer_dims: Optional[List[int]] = None,
               activation: Optional[Callable[..., tf.Tensor]] = None,
               use_batch_norm: bool = True,
               batch_norm_moment: float = 0.999,
               dropout: float = 0.5,
               name: Optional[str] = None,
               **kwargs: Dict[Any, Any]):
    """Initializes the layer.

    Args:
      example_feature_num: Number of example features.
      example_hidden_layer_dims: Iterable of number hidden units for an tower.
        Each example feature will have an identical tower.
      context_feature_num: Number of context features. If `None` or 0 then no
        context weighting will be applied, otherwise `context_hidden_layer_dims`
        is required.
      context_hidden_layer_dims: Iterable of number hidden units for an tower.
        Each context feature (if any) will have an identical tower. Required if
        `context_feature_num` is specified.
      activation: Activation function applied to each layer. If `None`, will use
        an identity activation.
      use_batch_norm: Whether to use batch normalization after each hidden
        layer.
      batch_norm_moment: Momentum for the moving average in batch normalization.
      dropout: When not `None`, the probability of dropout for the dropoout
        layer in each tower.
      name: Name of the Keras layer.
      **kwargs: Keyword arguments.
    """

    super().__init__(name=name, **kwargs)
    self._example_feature_num = example_feature_num
    self._context_feature_num = context_feature_num
    self._example_hidden_layer_dims = example_hidden_layer_dims
    self._context_hidden_layer_dims = context_hidden_layer_dims
    self._activation = tf.keras.activations.get(activation)
    self._use_batch_norm = use_batch_norm
    self._batch_norm_moment = batch_norm_moment
    self._dropout = dropout

    self._example_towers = []
    for i in range(self._example_feature_num):
      self._example_towers.append(
          create_tower(
              hidden_layer_dims=self._example_hidden_layer_dims,
              output_units=1,
              activation=self._activation,
              use_batch_norm=self._use_batch_norm,
              batch_norm_moment=self._batch_norm_moment,
              dropout=self._dropout,
              name='{}_example_tower_{}'.format(name, i)))

    self._context_towers = None
    if context_feature_num and context_feature_num > 0:
      if not context_hidden_layer_dims:
        raise ValueError(
            'When `context_feature_num` > 0, `context_hidden_layer_dims` is '
            'required! Currently `context_feature_num` is {}, but '
            '`context_hidden_layer_dims` is {}'.format(
                context_feature_num, context_hidden_layer_dims))
      self._context_towers = []
      for i in range(self._context_feature_num):
        self._context_towers.append(
            create_tower(
                hidden_layer_dims=self._context_hidden_layer_dims,
                output_units=self._example_feature_num,
                activation=self._activation,
                use_batch_norm=self._use_batch_norm,
                batch_norm_moment=self._batch_norm_moment,
                dropout=self._dropout,
                name='{}_context_tower_{}'.format(name, i)))