in c3dm/c3dpo.py [0:0]
def visualize( self, visdom_env, trainmode, \
preds, stats, clear_env=False ):
viz = get_visdom_connection(server=stats.visdom_server,\
port=stats.visdom_port )
if not viz.check_connection():
print("no visdom server! -> skipping batch vis")
return;
if clear_env: # clear visualisations
print(" ... clearing visdom environment")
viz.close(env=visdom_env,win=None)
print('vis into env:\n %s' % visdom_env)
it = stats.it[trainmode]
epoch = stats.epoch
idx_image = 0
title="e%d_it%d_im%d"%(stats.epoch,stats.it[trainmode],idx_image)
# get the connectivity pattern
sticks = STICKS[self.connectivity_setup] if \
self.connectivity_setup in STICKS else None
var_kp = { 'orthographic': 'kp_reprojected_image',
'perspective': 'kp_reprojected_image_uncal'}[self.projection_type]
# show reprojections
p = np.stack( \
[ preds[k][idx_image].detach().cpu().numpy() \
for k in (var_kp, 'kp_loc') ] )
v = preds['kp_vis'][idx_image].detach().cpu().numpy()
show_projections( viz, visdom_env, p, v=v,
title=title, cmap__='gist_ncar',
markersize=50, sticks=sticks,
stickwidth=1, plot_point_order=True,
image_path=preds['image_path'][idx_image],
win='projections' )
# show 3d reconstruction
if True:
var3d = { 'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal'}[self.projection_type]
pcl = {'pred': preds[var3d][idx_image].detach().cpu().numpy().copy()}
if 'kp_loc_3d' in preds:
pcl['gt'] = preds['kp_loc_3d'][idx_image].detach().cpu().numpy().copy()
if self.projection_type=='perspective':
# for perspective projections, we dont know the scale
# so we estimate it here ...
scale = argmin_scale( torch.from_numpy(pcl['pred'][None]),
torch.from_numpy(pcl['gt'][None]) )
pcl['pred'] = pcl['pred'] * float(scale)
elif self.projection_type=='orthographic':
pcl['pred'] = pcl['pred'] - pcl['pred'].mean(1)
visdom_plot_pointclouds(viz, pcl, visdom_env, title, \
plot_legend=False, markersize=20, \
sticks=sticks, win='3d' )