def on_phase_end()

in vissl/hooks/tensorboard_hook.py [0:0]


    def on_phase_end(self, task: "tasks.ClassyTask") -> None:
        """
        Called at the end of every epoch if the tensorboard hook is
        enabled.
        Log model parameters and/or parameter gradients as set by user
        in the tensorboard configuration. Also resents the CUDA memory counter.
        """
        # Log train/test accuracy
        if is_primary():
            phase_type = "Training" if task.train else "Testing"
            for meter in task.meters:
                for metric_name, val in meter.value.items():
                    for i, val in val.items():
                        tag_name = f"{phase_type}/{meter.name}_{metric_name}_Output_{i}"
                        self.tb_writer.add_scalar(
                            tag=tag_name,
                            scalar_value=round(val, 5),
                            global_step=task.train_phase_idx,
                        )
        if not (self.log_params or self.log_params_gradients):
            return

        if is_primary() and task.train:
            # Log the weights and bias at the end of the epoch
            if self.log_params:
                for name, parameter in task.base_model.named_parameters():
                    self.tb_writer.add_histogram(
                        f"Parameters/{name}",
                        parameter,
                        global_step=task.train_phase_idx,
                    )
            # Log the parameter gradients at the end of the epoch
            if self.log_params_gradients:
                for name, parameter in task.base_model.named_parameters():
                    if parameter.grad is not None:
                        try:
                            self.tb_writer.add_histogram(
                                f"Gradients/{name}",
                                parameter.grad,
                                global_step=task.train_phase_idx,
                            )
                        except ValueError:
                            logging.info(
                                f"Gradient histogram empty for {name}, "
                                f"iteration {task.iteration}. Unable to "
                                f"log gradient."
                            )

            # Reset the GPU Memory counter
            if torch.cuda.is_available():
                torch.cuda.reset_max_memory_allocated()
                torch.cuda.reset_max_memory_cached()