in empose/eval/helpers.py [0:0]
def evaluate(data_loader, net, preprocess_fn, metrics_engine, window_size=None):
"""
Evaluate the model on a hold-out dataset.
:param data_loader: The DataLoader object to loop over the validation set.
:param net: The model.
:param preprocess_fn: A function that preprocesses a batch.
:param metrics_engine: Metrics engine to compute additional metrics other than the loss.
:param window_size: Test Batches are too big to be evaluated in one go, so we use sliding windows.
:return: All losses aggregated over the entire validation set.
"""
# Put the model in evaluation mode.
net.eval()
loss_vals_agg = collections.defaultdict(float)
n_samples = 0
metrics_engine.reset()
with torch.no_grad():
for b, abatch in enumerate(data_loader):
# We normalize here before we split into chunks because the normalization might be sequence dependent.
abatch = preprocess_fn(abatch, mode='normalize_only')
first_shape_hat = None
loss_vals_seq = collections.defaultdict(float)
for i, achunk in enumerate(window_generator(abatch, window_size)):
# Move data to GPU.
batch_gpu = achunk.to_gpu()
# Preprocess.
batch_gpu = preprocess_fn(batch_gpu, mode='after_normalize', reset_rng=(i+b == 0))
# Get the predictions.
model_out = net(batch_gpu, is_new_sequence=(i == 0))
# Compute the loss.
_, loss_vals = net.backward(batch_gpu, model_out)
for k in loss_vals:
loss_vals_seq[k] += loss_vals[k]
# Update the metrics.
pose_hat = model_out['pose_hat'] if model_out['pose_hat'] is not None else batch_gpu.poses_body
# If we have several chunks, we take the shape of the first chunk for the entire sequence.
if i == 0:
shape_hat = model_out['shape_hat'][:, 0] if model_out['shape_hat'] is not None else None
first_shape_hat = shape_hat
else:
shape_hat = first_shape_hat
metrics_engine.compute(batch_gpu.poses_body, batch_gpu.shapes, pose_hat,
shape_hat, batch_gpu.seq_lengths,
batch_gpu.poses_root, model_out['root_ori_hat'],
frame_mask=batch_gpu.marker_masks)
for k in loss_vals_seq:
loss_vals_agg[k] += loss_vals_seq[k] / (i+1) * batch_gpu.batch_size
n_samples += batch_gpu.batch_size
for k in loss_vals_agg:
loss_vals_agg[k] /= n_samples
return loss_vals_agg