rlkit/torch/sac/bear.py [294:337]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            ))
            self.eval_statistics.update(create_stats_ordered_dict(
                'MMD Loss',
                ptu.get_numpy(mmd_loss)
            ))
            self.eval_statistics.update(create_stats_ordered_dict(
                'Action Divergence',
                ptu.get_numpy(action_divergence)
            ))
            self.eval_statistics.update(create_stats_ordered_dict(
                'Raw Action Divergence',
                ptu.get_numpy(raw_action_divergence)
            ))
            if self.mode == 'auto':
                self.eval_statistics['Alpha'] = self.log_alpha.exp().item()
        
        self._n_train_steps_total += 1
    
    def get_diagnostics(self):
        return self.eval_statistics

    def end_epoch(self, epoch):
        self._need_to_update_eval_statistics = True

    @property
    def networks(self):
        return [
            self.policy,
            self.qf1,
            self.qf2,
            self.target_qf1,
            self.target_qf2,
            self.vae
        ]

    def get_snapshot(self):
        return dict(
            policy=self.policy,
            qf1=self.qf1,
            qf2=self.qf2,
            target_qf1=self.target_qf1,
            target_qf2=self.target_qf2,
            vae=self.vae,
        )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



rlkit/torch/sac/uwac_dropout.py [332:375]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            ))
            self.eval_statistics.update(create_stats_ordered_dict(
                'MMD Loss',
                ptu.get_numpy(mmd_loss)
            ))
            self.eval_statistics.update(create_stats_ordered_dict(
                'Action Divergence',
                ptu.get_numpy(action_divergence)
            ))
            self.eval_statistics.update(create_stats_ordered_dict(
                'Raw Action Divergence',
                ptu.get_numpy(raw_action_divergence)
            ))
            if self.mode == 'auto':
                self.eval_statistics['Alpha'] = self.log_alpha.exp().item()
        
        self._n_train_steps_total += 1
    
    def get_diagnostics(self):
        return self.eval_statistics

    def end_epoch(self, epoch):
        self._need_to_update_eval_statistics = True

    @property
    def networks(self):
        return [
            self.policy,
            self.qf1,
            self.qf2,
            self.target_qf1,
            self.target_qf2,
            self.vae
        ]

    def get_snapshot(self):
        return dict(
            policy=self.policy,
            qf1=self.qf1,
            qf2=self.qf2,
            target_qf1=self.target_qf1,
            target_qf2=self.target_qf2,
            vae=self.vae,
        )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



