def visualize()

in model.py [0:0]


    def visualize(self, visdom_env, trainmode,
                  preds, stats, clear_env=False):
        viz = get_visdom_connection(server=stats.visdom_server,
                                    port=stats.visdom_port)
        if not viz.check_connection():
            print("no visdom server! -> skipping batch vis")
            return

        if clear_env:  # clear visualisations
            print("  ... clearing visdom environment")
            viz.close(env=visdom_env, win=None)

        print('vis into env:\n   %s' % visdom_env)

        it = stats.it[trainmode]
        epoch = stats.epoch
        idx_image = 0

        title = "e%d_it%d_im%d" % (epoch, it, idx_image)

        # get the connectivity pattern
        sticks = STICKS[self.connectivity_setup] if \
            self.connectivity_setup in STICKS else None

        var_kp = {'orthographic': 'kp_reprojected_image',
                  'perspective':  'kp_reprojected_image_uncal'
                  }[self.projection_type]

        # show reprojections
        p = np.stack(
            [preds[k][idx_image].detach().cpu().numpy()
             for k in (var_kp, 'kp_loc')])
        v = preds['kp_vis'][idx_image].detach().cpu().numpy()

        show_projections(p, visdom_env=visdom_env, v=v,
                         title='projections_'+title, cmap__='gist_ncar',
                         markersize=50, sticks=sticks,
                         stickwidth=1, plot_point_order=False,
                         image_path=preds['image_path'][idx_image],
                         visdom_win='projections')

        # show 3d reconstruction
        if True:
            var3d = {'orthographic': 'shape_image_coord',
                     'perspective': 'shape_image_coord_cal'
                     }[self.projection_type]
            pcl = {'pred': preds[var3d]
                   [idx_image].detach().cpu().numpy().copy()}
            if 'kp_loc_3d' in preds:
                pcl['gt'] = preds['kp_loc_3d'][idx_image].detach(
                ).cpu().numpy().copy()
                if self.projection_type == 'perspective':
                    # for perspective projections, we dont know the scale
                    # so we estimate it here ...
                    scale = argmin_scale(torch.from_numpy(pcl['pred'][None]),
                                         torch.from_numpy(pcl['gt'][None]))
                    pcl['pred'] = pcl['pred'] * float(scale)
                elif self.projection_type == 'orthographic':
                    # here we depth-center gt and predictions
                    for k in ('pred', 'gt'):
                        pcl_ = pcl[k].copy()
                        meanz = pcl_.mean(1) * np.array([0., 0., 1.])
                        pcl[k] = pcl_ - meanz[:, None]
                else:
                    raise ValueError(self.projection_type)

            visdom_plot_pointclouds(viz, pcl, visdom_env, '3d_'+title,
                                    plot_legend=False, markersize=20,
                                    sticks=sticks, win='3d')