in example_zoo/tensorflow/probability/logistic_regression/trainer/logistic_regression.py [0:0]
def main(argv):
del argv # unused
if tf.io.gfile.exists(FLAGS.model_dir):
tf.compat.v1.logging.warning(
"Warning: deleting old log directory at {}".format(FLAGS.model_dir))
tf.io.gfile.rmtree(FLAGS.model_dir)
tf.io.gfile.makedirs(FLAGS.model_dir)
# Generate (and visualize) a toy classification dataset.
w_true, b_true, x, y = toy_logistic_data(FLAGS.num_examples, 2)
features, labels = build_input_pipeline(x, y, FLAGS.batch_size)
# Define a logistic regression model as a Bernoulli distribution
# parameterized by logits from a single linear layer. We use the Flipout
# Monte Carlo estimator for the layer: this enables lower variance
# stochastic gradients than naive reparameterization.
with tf.compat.v1.name_scope("logistic_regression", values=[features]):
layer = tfp.layers.DenseFlipout(
units=1,
activation=None,
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(),
bias_posterior_fn=tfp.layers.default_mean_field_normal_fn())
logits = layer(features)
labels_distribution = tfd.Bernoulli(logits=logits)
# Compute the -ELBO as the loss, averaged over the batch size.
neg_log_likelihood = -tf.reduce_mean(
input_tensor=labels_distribution.log_prob(labels))
kl = sum(layer.losses) / FLAGS.num_examples
elbo_loss = neg_log_likelihood + kl
# Build metrics for evaluation. Predictions are formed from a single forward
# pass of the probabilistic layers. They are cheap but noisy predictions.
predictions = tf.cast(logits > 0, dtype=tf.int32)
accuracy, accuracy_update_op = tf.compat.v1.metrics.accuracy(
labels=labels, predictions=predictions)
with tf.compat.v1.name_scope("train"):
optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate)
train_op = optimizer.minimize(elbo_loss)
init_op = tf.group(tf.compat.v1.global_variables_initializer(),
tf.compat.v1.local_variables_initializer())
with tf.compat.v1.Session() as sess:
sess.run(init_op)
# Fit the model to data.
for step in range(FLAGS.max_steps):
_ = sess.run([train_op, accuracy_update_op])
if step % 100 == 0:
loss_value, accuracy_value = sess.run([elbo_loss, accuracy])
print("Step: {:>3d} Loss: {:.3f} Accuracy: {:.3f}".format(
step, loss_value, accuracy_value))
# Visualize some draws from the weights posterior.
w_draw = layer.kernel_posterior.sample()
b_draw = layer.bias_posterior.sample()
candidate_w_bs = []
for _ in range(FLAGS.num_monte_carlo):
w, b = sess.run((w_draw, b_draw))
candidate_w_bs.append((w, b))
visualize_decision(x, y, (w_true, b_true),
candidate_w_bs,
fname=os.path.join(FLAGS.model_dir,
"weights_inferred.png"))