in research/gam/gam/models/wide_resnet.py [0:0]
def _get_encoding(self, inputs, is_train, update_batch_stats, **kwargs):
"""Creates the model hidden representations and prediction ops.
For this model, the hidden representation is the last layer before the logit
computation. The predictions are unnormalized logits.
Args:
inputs: A tensor containing the model inputs. The first dimension is the
batch size.
is_train: A placeholder representing a boolean value that specifies if
this model will be used for training or for test.
update_batch_stats: Boolean specifying whether to update the batch norm
statistics.
**kwargs: Other keyword arguments.
Returns:
encoding: A tensor containing an encoded batch of samples. The first
dimension corresponds to the batch size.
all_vars: A dictionary mapping from variable name to TensorFlow op
containing all variables used in this model.
reg_params: A dictionary mapping from a variable name to a Tensor of
parameters which will be used for regularization.
"""
# Helper functions
def _conv(name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
"DW",
[filter_size, filter_size, in_filters, out_filters],
tf.float32,
initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0 / n)),
)
return tf.nn.conv2d(x, kernel, strides, padding="SAME")
def _relu(x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name="leaky_relu")
def _residual(x,
in_filter,
out_filter,
stride,
activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope("shared_activation"):
x = tf.layers.batch_normalization(
x, axis=1, scale=True, training=is_train)
x = _relu(x, self.lrelu_leakiness)
orig_x = x
else:
with tf.variable_scope("residual_only_activation"):
orig_x = x
x = tf.layers.batch_normalization(
x, axis=1, scale=True, training=is_train)
x = _relu(x, self.lrelu_leakiness)
with tf.variable_scope("sub1"):
x = _conv("conv1", x, 3, in_filter, out_filter, stride)
with tf.variable_scope("sub2"):
x = tf.layers.batch_normalization(
x, axis=1, scale=True, training=is_train)
x = _relu(x, self.lrelu_leakiness)
x = _conv("conv2", x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope("sub_add"):
if in_filter != out_filter:
orig_x = _conv("conv1x1", orig_x, 1, in_filter, out_filter, stride)
x += orig_x
return x
x = inputs
tf.summary.image("images_in_net", x)
if self.horizontal_flip:
x = fast_flip(x, is_training=is_train)
if self.random_translation:
raise NotImplementedError("Random translations are not implemented yet.")
if self.gaussian_noise:
x = tf.cond(is_train, lambda: x + tf.random_normal(tf.shape(x)) * 0.15,
lambda: x)
x = _conv("init_conv", x, 3, 3, 16, [1, 1, 1, 1])
activate_before_residual = [True, False, False]
res_func = _residual
filters = [16, 16 * self.width, 32 * self.width, 64 * self.width]
with tf.variable_scope("unit_1_0"):
x = res_func(x, filters[0], filters[1], [1, 1, 1, 1],
activate_before_residual[0])
for i in range(1, self.num_residual_units):
with tf.variable_scope("unit_1_%d" % i):
x = res_func(x, filters[1], filters[1], [1, 1, 1, 1], False)
with tf.variable_scope("unit_2_0"):
x = res_func(x, filters[1], filters[2], [1, 2, 2, 1],
activate_before_residual[1])
for i in range(1, self.num_residual_units):
with tf.variable_scope("unit_2_%d" % i):
x = res_func(x, filters[2], filters[2], [1, 1, 1, 1], False)
with tf.variable_scope("unit_3_0"):
x = res_func(x, filters[2], filters[3], [1, 2, 2, 1],
activate_before_residual[2])
for i in range(1, self.num_residual_units):
with tf.variable_scope("unit_3_%d" % i):
x = res_func(x, filters[3], filters[3], [1, 1, 1, 1], False)
with tf.variable_scope("unit_last"):
x = tf.layers.batch_normalization(
x, axis=1, scale=True, training=is_train)
x = _relu(x, self.lrelu_leakiness)
# Global average pooling.
x = tf.reduce_mean(x, [1, 2])
return x