in tensorflow_lattice/python/lattice_layer.py [0:0]
def __init__(self,
lattice_sizes,
units=1,
monotonicities=None,
unimodalities=None,
edgeworth_trusts=None,
trapezoid_trusts=None,
monotonic_dominances=None,
range_dominances=None,
joint_monotonicities=None,
joint_unimodalities=None,
output_min=None,
output_max=None,
num_projection_iterations=10,
monotonic_at_every_step=True,
clip_inputs=True,
interpolation="hypercube",
kernel_initializer="random_uniform_or_linear_initializer",
kernel_regularizer=None,
**kwargs):
# pyformat: disable
"""Initializes an instance of `Lattice`.
Args:
lattice_sizes: List or tuple of length d of integers which represents
number of lattice vertices per dimension (minimum is 2). Second
dimension of input shape must match the number of elements in lattice
sizes.
units: Output dimension of the layer. See class comments for details.
monotonicities: None or list or tuple of same length as lattice_sizes of
{'none', 'increasing', 0, 1} which specifies if the model output should
be monotonic in corresponding feature, using 'increasing' or 1 to
indicate increasing monotonicity and 'none' or 0 to indicate no
monotonicity constraints.
unimodalities: None or list or tuple of same length as lattice_sizes of
{'none', 'valley', 'peak', 0, 1, -1} which specifies if the model output
should be unimodal in corresponding feature, using 'valley' or 1 to
indicate that function first decreases then increases, using 'peak' or
-1 to indicate that funciton first increases then decreases, using
'none' or 0 to indicate no unimodality constraints.
edgeworth_trusts: None or three-element tuple or iterable of three-element
tuples. First element is the index of the main (monotonic) feature.
Second element is the index of the conditional feature. Third element is
the direction of trust: 'positive' or 1 if higher values of the
conditional feature should increase trust in the main feature and
'negative' or -1 otherwise.
trapezoid_trusts: None or three-element tuple or iterable of three-element
tuples. First element is the index of the main (monotonic) feature.
Second element is the index of the conditional feature. Third element is
the direction of trust: 'positive' or 1 if higher values of the
conditional feature should increase trust in the main feature and
'negative' or -1 otherwise.
monotonic_dominances: None or two-element tuple or iterable of two-element
tuples. First element is the index of the dominant feature. Second
element is the index of the weak feature.
range_dominances: None or two-element tuple or iterable of two-element
tuples. First element is the index of the dominant feature. Second
element is the index of the weak feature.
joint_monotonicities: None or two-element tuple or iterable of two-element
tuples which represents indices of two features requiring joint
monotonicity.
joint_unimodalities: None or tuple or iterable of tuples. Each tuple
contains 2 elements: iterable of indices of single group of jointly
unimodal features followed by string 'valley' or 'peak', using 'valley'
to indicate that function first decreases then increases, using 'peak'
to indicate that funciton first increases then decreases. For example:
([0, 3, 4], 'valley').
output_min: None or lower bound of the output.
output_max: None or upper bound of the output.
num_projection_iterations: Number of iterations of Dykstra projections
algorithm. Projection updates will be closer to a true projection (with
respect to the L2 norm) with higher number of iterations. Increasing
this number has diminishing return on projection precsion. Infinite
number of iterations would yield perfect projection. Increasing this
number might slightly improve convergence by cost of slightly increasing
running time. Most likely you want this number to be proportional to
number of lattice vertices in largest constrained dimension.
monotonic_at_every_step: Whether to strictly enforce monotonicity and
trust constraints after every gradient update by applying a final
imprecise projection. Setting this parameter to True together with small
num_projection_iterations parameter is likely to hurt convergence.
clip_inputs: If inputs should be clipped to the input range of the
lattice.
interpolation: One of 'hypercube' or 'simplex' interpolation. For a
d-dimensional lattice, 'hypercube' interpolates 2^d parameters, whereas
'simplex' uses d+1 parameters and thus scales better. For details see
`tfl.lattice_lib.evaluate_with_simplex_interpolation` and
`tfl.lattice_lib.evaluate_with_hypercube_interpolation`.
kernel_initializer: None or one of:
- `'linear_initializer'`: initialize parameters to form a linear
function with positive and equal coefficients for monotonic dimensions
and 0.0 coefficients for other dimensions. Linear function is such
that minimum possible output is equal to output_min and maximum
possible output is equal to output_max. See
`tfl.lattice_layer.LinearInitializer` class docstring for more
details.
- `'random_monotonic_initializer'`: initialize parameters uniformly at
random such that all parameters are monotonically increasing for each
input. Parameters will be sampled uniformly at random from the range
`[output_min, output_max]`. See
`tfl.lattice_layer.RandomMonotonicInitializer` class docstring for
more details.
- `random_uniform_or_linear_initializer`: if the lattice has a single
joint unimodality constraint group encompassing all features then use
the Keras 'random_uniform' initializer; otherwise, use TFL's
'linear_initializer'.
- Any Keras initializer object.
kernel_regularizer: None or a single element or a list of following:
- Tuple `('torsion', l1, l2)` where l1 and l2 represent corresponding
regularization amount for graph Torsion regularizer. l1 and l2 can
either be single floats or lists of floats to specify different
regularization amount for every dimension.
- Tuple `('laplacian', l1, l2)` where l1 and l2 represent corresponding
regularization amount for graph Laplacian regularizer. l1 and l2 can
either be single floats or lists of floats to specify different
regularization amount for every dimension.
- Any Keras regularizer object.
**kwargs: Other args passed to `tf.keras.layers.Layer` initializer.
Raises:
ValueError: If layer hyperparameters are invalid.
"""
# pyformat: enable
lattice_lib.verify_hyperparameters(
lattice_sizes=lattice_sizes,
monotonicities=monotonicities,
unimodalities=unimodalities,
interpolation=interpolation)
super(Lattice, self).__init__(**kwargs)
self.lattice_sizes = lattice_sizes
self.units = units
self.monotonicities = monotonicities
self.unimodalities = unimodalities
# Check if inputs are a single tuple of ints (vs an iterable of tuples)
if (isinstance(edgeworth_trusts, tuple) and
isinstance(edgeworth_trusts[0], int)):
self.edgeworth_trusts = [edgeworth_trusts]
else:
self.edgeworth_trusts = edgeworth_trusts
if (isinstance(trapezoid_trusts, tuple) and
isinstance(trapezoid_trusts[0], int)):
self.trapezoid_trusts = [trapezoid_trusts]
else:
self.trapezoid_trusts = trapezoid_trusts
if (isinstance(monotonic_dominances, tuple) and
isinstance(monotonic_dominances[0], int)):
self.monotonic_dominances = [monotonic_dominances]
else:
self.monotonic_dominances = monotonic_dominances
if (isinstance(range_dominances, tuple) and
isinstance(range_dominances[0], int)):
self.range_dominances = [range_dominances]
else:
self.range_dominances = range_dominances
if (isinstance(joint_monotonicities, tuple) and
isinstance(joint_monotonicities[0], int)):
self.joint_monotonicities = [joint_monotonicities]
else:
self.joint_monotonicities = joint_monotonicities
if (isinstance(joint_unimodalities, tuple) and
len(joint_unimodalities) == 2 and
isinstance(joint_unimodalities[1], six.string_types)):
self.joint_unimodalities = [joint_unimodalities]
else:
self.joint_unimodalities = joint_unimodalities
self.output_min = output_min
self.output_max = output_max
self.num_projection_iterations = num_projection_iterations
self.monotonic_at_every_step = monotonic_at_every_step
self.clip_inputs = clip_inputs
self.interpolation = interpolation
self.kernel_initializer = create_kernel_initializer(
kernel_initializer, self.lattice_sizes, self.monotonicities,
self.output_min, self.output_max, self.unimodalities,
self.joint_unimodalities)
self.kernel_regularizer = []
if kernel_regularizer:
if (callable(kernel_regularizer) or
(isinstance(kernel_regularizer, tuple) and
isinstance(kernel_regularizer[0], six.string_types))):
kernel_regularizer = [kernel_regularizer]
for regularizer in kernel_regularizer:
if isinstance(regularizer, tuple):
(name, l1, l2) = regularizer
if name.lower() == "torsion":
self.kernel_regularizer.append(
TorsionRegularizer(
lattice_sizes=self.lattice_sizes, l1=l1, l2=l2))
elif name.lower() == "laplacian":
self.kernel_regularizer.append(
LaplacianRegularizer(
lattice_sizes=self.lattice_sizes, l1=l1, l2=l2))
else:
raise ValueError("Unknown custom lattice regularizer: %s" %
regularizer)
else:
# This is needed for Keras deserialization logic to be aware of our
# custom objects.
with keras.utils.custom_object_scope({
"TorsionRegularizer": TorsionRegularizer,
"LaplacianRegularizer": LaplacianRegularizer,
}):
self.kernel_regularizer.append(keras.regularizers.get(regularizer))