def evaluate()

in Experiments/PolicyManagers.py [0:0]


	def evaluate(self, model):

		self.set_epoch(0)

		if model:
			self.load_all_models(model)

		np.set_printoptions(suppress=True,precision=2)

		print("Running Evaluation of State Distances on small test set.")
		self.evaluate_metrics()

		# Visualize space if the subpolicy has been trained...
		if (self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap') and (self.args.fix_subpolicy==0):
			print("Running Visualization on Robot Data.")	
			self.pretrain_policy_manager = PolicyManager_Pretrain(self.args.number_policies, self.dataset, self.args)
			self.pretrain_policy_manager.setup()
			self.pretrain_policy_manager.load_all_models(model, only_policy=True)			
			self.pretrain_policy_manager.visualize_robot_data()			

		if self.args.subpolicy_model:
			print("Loading encoder.")
			self.setup_eval_against_encoder()

		# Evaluate NLL and (potentially Expected Value Difference) on Validation / Test Datasets. 		
		self.epsilon = 0.

		# np.set_printoptions(suppress=True,precision=2)
		# for i in range(60):
		# 	self.run_iteration(0, i)

		if self.args.debug:
			embed()