in ocr/paragraph_segmentation_dcnn.py [0:0]
def run_epoch(e, network, dataloader, loss_function, trainer, log_dir, print_name, is_train):
total_loss = nd.zeros(1, ctx)
for i, (data, label) in enumerate(dataloader):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
with autograd.record(train_mode=is_train):
output = network(data)
loss_i = loss_function(output, label)
if is_train:
loss_i.backward()
trainer.step(data.shape[0])
total_loss += loss_i.mean()
if e % send_image_every_n == 0 and e > 0 and i == 0:
output_image = draw_box_on_image(output.asnumpy(), label.asnumpy(), data.asnumpy())
epoch_loss = float(total_loss .asscalar())/len(dataloader)
with SummaryWriter(logdir=log_dir, verbose=False, flush_secs=5) as sw:
sw.add_scalar('loss', {print_name: epoch_loss}, global_step=e)
if e % send_image_every_n == 0 and e > 0:
output_image[output_image<0] = 0
output_image[output_image>1] = 1
sw.add_image('bb_{}_image'.format(print_name), output_image, global_step=e)
return epoch_loss