in tensorflow_ranking/python/keras/canned/gam.py [0:0]
def __init__(self,
context_feature_columns=None,
example_feature_columns=None,
context_hidden_layer_dims=None,
example_hidden_layer_dims=None,
activation=None,
use_batch_norm=True,
batch_norm_moment=0.999,
dropout=0.5,
name='gam_ranking_model',
**kwargs):
"""Initializes an instance of `GAMRankingNetwork`.
Args:
context_feature_columns: A dict containing all the context feature columns
used by the network. Keys are feature names, and values are instances of
classes derived from `_FeatureColumn`.
example_feature_columns: A dict containing all the example feature columns
used by the network. Keys are feature names, and values are instances of
classes derived from `_FeatureColumn`.
context_hidden_layer_dims: Iterable of number hidden units per layer for
context features. See `example_hidden_units`.
example_hidden_layer_dims: Iterable of number hidden units per layer for
example features. All layers are fully connected. Ex. `[64, 32]` means
first layer has 64 nodes and second one has 32.
activation: Activation function applied to each layer. If `None`, will use
an identity activation, which is default behavior in Keras activations.
use_batch_norm: Whether to use batch normalization after each hidden
layer.
batch_norm_moment: Momentum for the moving average in batch normalization.
dropout: When not `None`, the probability we will drop out a given
coordinate.
name: name of the keras network.
**kwargs: Keyword arguments.
Raises:
`ValueError` if `example_feature_columns` is empty or if
`example_hidden_lyaer_dims` is empty.
"""
if not example_feature_columns or not example_hidden_layer_dims:
raise ValueError('example_feature_columns or example_hidden_layer_dims '
'must not be empty.')
super(GAMRankingNetwork, self).__init__(
context_feature_columns=context_feature_columns,
example_feature_columns=example_feature_columns,
name=name,
**kwargs)
context_hidden_layer_dims = context_hidden_layer_dims or []
self._context_hidden_layer_dims = [
int(d) for d in context_hidden_layer_dims
]
self._example_hidden_layer_dims = [
int(d) for d in example_hidden_layer_dims
]
self._num_features = len(self.example_feature_columns)
self._activation = activation
self._use_batch_norm = use_batch_norm
self._batch_norm_moment = batch_norm_moment
self._dropout = dropout
self._per_context_feature_layers = {}
for name in self._context_feature_columns:
self._per_context_feature_layers[name] = _make_tower_layers(
hidden_layer_dims=self._context_hidden_layer_dims,
output_units=self._num_features,
activation=self._activation,
use_batch_norm=self._use_batch_norm,
batch_norm_moment=self._batch_norm_moment,
dropout=self._dropout)
self._per_example_feature_layers = {}
for name in self._example_feature_columns:
self._per_example_feature_layers[name] = _make_tower_layers(
hidden_layer_dims=self._example_hidden_layer_dims,
output_units=1,
activation=self._activation,
use_batch_norm=self._use_batch_norm,
batch_norm_moment=self._batch_norm_moment,
dropout=self._dropout)