in Experiments/PolicyManagers.py [0:0]
def update_plots(self, counter, viz_dict):
# VAE Losses.
self.tf_logger.scalar_summary('Policy LogLikelihood', self.likelihood_loss, counter)
self.tf_logger.scalar_summary('Discriminability Loss', self.discriminability_loss, counter)
self.tf_logger.scalar_summary('Encoder KL', self.encoder_KL, counter)
self.tf_logger.scalar_summary('VAE Loss', self.VAE_loss, counter)
self.tf_logger.scalar_summary('Total VAE Loss', self.total_VAE_loss, counter)
self.tf_logger.scalar_summary('Domain', viz_dict['domain'], counter)
# Plot discriminator values after we've started training it.
if self.training_phase>1:
# Discriminator Loss.
self.tf_logger.scalar_summary('Discriminator Loss', self.discriminator_loss, counter)
# Compute discriminator prob of right action for logging.
self.tf_logger.scalar_summary('Discriminator Probability', viz_dict['discriminator_probs'], counter)
# If we are displaying things:
if counter%self.args.display_freq==0:
self.gt_gif_list = []
self.rollout_gif_list = []
# Now using both TSNE and PCA.
# Plot source, target, and shared embeddings via TSNE.
tsne_source_embedding, tsne_target_embedding, tsne_combined_embeddings, tsne_combined_traj_embeddings = self.get_embeddings(projection='tsne')
# Now actually plot the images.
self.tf_logger.image_summary("TSNE Source Embedding", [tsne_source_embedding], counter)
self.tf_logger.image_summary("TSNE Target Embedding", [tsne_target_embedding], counter)
self.tf_logger.image_summary("TSNE Combined Embeddings", [tsne_combined_embeddings], counter)
# Plot source, target, and shared embeddings via PCA.
pca_source_embedding, pca_target_embedding, pca_combined_embeddings, pca_combined_traj_embeddings = self.get_embeddings(projection='pca')
# Now actually plot the images.
self.tf_logger.image_summary("PCA Source Embedding", [pca_source_embedding], counter)
self.tf_logger.image_summary("PCA Target Embedding", [pca_target_embedding], counter)
self.tf_logger.image_summary("PCA Combined Embeddings", [pca_combined_embeddings], counter)
if self.args.source_domain=='ContinuousNonZero' and self.args.target_domain=='ContinuousNonZero':
self.tf_logger.image_summary("PCA Combined Trajectory Embeddings", [pca_combined_traj_embeddings], counter)
self.tf_logger.image_summary("TSNE Combined Trajectory Embeddings", [tsne_combined_traj_embeddings], counter)
# We are also going to log Ground Truth trajectories and their reconstructions in each of the domains, to make sure our networks are learning.
# Should be able to use the policy manager's functions to do this.
source_trajectory, source_reconstruction, target_trajectory, target_reconstruction = self.get_trajectory_visuals()
if source_trajectory is not None:
# Now actually plot the images.
if self.args.source_domain=='ContinuousNonZero':
self.tf_logger.image_summary("Source Trajectory", [source_trajectory], counter)
self.tf_logger.image_summary("Source Reconstruction", [source_reconstruction], counter)
else:
self.tf_logger.gif_summary("Source Trajectory", [source_trajectory], counter)
self.tf_logger.gif_summary("Source Reconstruction", [source_reconstruction], counter)
if self.args.target_domain=='ContinuousNonZero':
self.tf_logger.image_summary("Target Trajectory", [target_trajectory], counter)
self.tf_logger.image_summary("Target Reconstruction", [target_reconstruction], counter)
else:
self.tf_logger.gif_summary("Target Trajectory", [target_trajectory], counter)
self.tf_logger.gif_summary("Target Reconstruction", [target_reconstruction], counter)
if self.args.source_domain=='ContinuousNonZero' and self.args.target_domain=='ContinuousNonZero':
# Evaluate metrics and plot them.
# self.evaluate_correspondence_metrics(computed_sets=False)
# Actually, we've probably computed trajectory and latent sets.
self.evaluate_correspondence_metrics()
self.tf_logger.scalar_summary('Source To Target Trajectory Distance', self.source_target_trajectory_distance, counter)
self.tf_logger.scalar_summary('Target To Source Trajectory Distance', self.target_source_trajectory_distance, counter)