in quant/utils/visualization.py [0:0]
def hook(self, split: str, metrics: Dict[str, Metric],
epoch: int, global_step: int, log_interval: int = 10,
values_dict: Optional[Dict[str, float]] = None, **kwargs: Any) -> None:
"""
Provide a training / test loop-compatible hook for logging evaluation metrics.
Args:
split: The split to visualize, e.g. train or test
metrics: Dictionary mapping metric names to Metric objects
epoch: Training epoch
global_step: Unique incrementing integer across all epochs indicating the step
log_interval: frequency for logging metrics
values_dict: Dictionary mapping names to values
for other non-metric values to log
"""
if values_dict is None:
values_dict = {}
if split != 'train':
for name, metric in metrics.items():
name = name.replace(' ', '_')
self.writer.add_scalar(f'{name}/{split}', metric.compute(), epoch)
for name, val in values_dict.items():
name = name.replace(' ', '_')
self.writer.add_scalar(f'{name}/{split}', val, epoch)
elif global_step % log_interval == 0:
for name, metric in metrics.items():
name = name.replace(' ', '_')
self.writer.add_scalar(f'{name}/{split}', metric.compute(), global_step)
for name, val in values_dict.items():
name = name.replace(' ', '_')
self.writer.add_scalar(f'{name}/{split}', val, global_step)