in tensorflow_model_optimization/python/core/common/keras/compression/internal/optimize.py [0:0]
def _map_to_training_weights(
algorithm,
layer,
compressible_weights: List[tf.Variable]) -> List[tf.Tensor]:
"""Construct the training weight values from the layer's pretrained weights.
The weight values have the same structure as the output of
`tf.keras.layers.Layer.get_weights`.
Args:
algorithm: weight compression algorithm
layer: layer
compressible_weights: weight attributes of layer that should be compressed
Returns:
Initial weight values for training.
"""
# Need to know for each layer that kernel is the first weight, etc.
# TODO(tfmot): consider implication on custom Keras layers. The
# user has to pass in the information that 'kernel' is the first
# variable, 'bias' is the second variable, and so on.
# TODO(tfmot): see if Keras can introduce changes to simplify this.
original_weights = []
training_weights = []
if isinstance(layer, tf.keras.layers.Conv2D) or \
isinstance(layer, tf.keras.layers.Dense):
for weight in layer.weights:
if _find(weight, compressible_weights):
algorithm.weight_reprs = []
algorithm.init_training_weights(weight)
for weight_repr in algorithm.weight_reprs:
# Assumes initializer is tf.keras.initializers.Constant.
# TODO(tfmot): add check for this assumption.
# TODO(tfmot): the documentation for
# tf.keras.initializers.Constant(value)
# suggests that the `value` cannot be any arbitrary shape and
# only a single scalar value. It works in this implementation
# to make `value` any tensor - check this.
training_weights.append(weight_repr.kwargs['initializer'](
shape=None, dtype=weight_repr.kwargs['dtype']))
else:
original_weights.append(weight)
return training_weights + original_weights