in moonlight/glyphs/neural.py [0:0]
def __init__(self,
input_placeholder,
hidden_layer,
reconstruction_layer=None,
autoencoder_vars=None,
labels_placeholder=None,
prediction_layer=None,
prediction_vars=None):
"""Builds the NeuralNetworkGlyphClassifier that holds the TensorFlow model.
Args:
input_placeholder: A tf.placeholder representing the input staffline
image. Dtype float32 and shape (batch_size, target_height, None).
hidden_layer: An inner layer in the model. Should be the last layer in the
autoencoder model before reconstructing the input, and/or an
intermediate layer in the prediction network. self is intended to be the
last common ancestor of the reconstruction_layer output and the
prediction_layer output, if both are present.
reconstruction_layer: The reconstruction of the input, for an autoencoder
model. If non-None, should have the same shape as input_placeholder.
autoencoder_vars: The variables for the autoencoder model (parameters
affecting hidden_layer and reconstruction_layer), or None. If non-None,
a dict mapping variable name to tf.Variable object.
labels_placeholder: The labels tensor. A placeholder will be created if
None is given. Dtype int32 and shape (batch_size, width). Values are
between 0 and NUM_GLYPHS - 1 (where each value is the Glyph.Type enum
value minus one, to skip UNKNOWN_TYPE).
prediction_layer: The logit probability of each glyph for each column.
Must be able to be passed to tf.nn.softmax to produce the probability of
each glyph. 2D (width, NUM_GLYPHS). May be None if the model is not
being used for classification.
prediction_vars: The variables for the classification model (parameters
affecting hidden_layer and prediction_layer), or None. If non-None, a
dict mapping variable name to tf.Variable object.
"""
self.input_placeholder = input_placeholder
self.hidden_layer = hidden_layer
self.reconstruction_layer = reconstruction_layer
self.autoencoder_vars = autoencoder_vars or {}
# Calculate the loss that will be minimized for the autoencoder model.
self.autoencoder_loss = None
if self.reconstruction_layer is not None:
self.autoencoder_loss = (
tf.reduce_mean(
tf.squared_difference(self.input_placeholder,
self.reconstruction_layer)))
self.prediction_layer = prediction_layer
self.prediction_vars = prediction_vars or {}
self.labels_placeholder = (
labels_placeholder if labels_placeholder is not None else
tf.placeholder(tf.int32, (None, None)))
# Calculate the loss that will be minimized for the prediction model.
self.prediction_loss = None
if self.prediction_layer is not None:
self.prediction_loss = (
tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=self.prediction_layer,
labels=tf.one_hot(self.labels_placeholder, NUM_GLYPHS))))
# The probabilities of each glyph for each column.
self.prediction = tf.nn.softmax(self.prediction_layer)