in common/log.py [0:0]
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}',
'time: {time}', 'data: {data}', 'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}',
'time: {time}', 'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
self.meters["data_time"].update(time.time() - end)
yield obj
self.meters["iter_time"].update(time.time() - end)
if i % print_freq == 0:
self._write_meters()
eta_seconds = self.meters["iter_time"].global_avg * (
len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
self.logger(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(self.meters["iter_time"]),
data=str(self.meters["data_time"]),
memory=torch.cuda.max_memory_allocated() / MB))
else:
self.logger(
log_msg.format(i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(self.meters["iter_time"]),
data=str(self.meters["data_time"])))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
self.logger('{} Total time: {}'.format(header, total_time_str))
self._write_epoch(total_time_str)