def get_floorplans()

in utils/callbacks.py [0:0]


def get_floorplans(data, voxel_res, voxel_size, floorplan_res, batch_size=8):
    B, T, C, H, W = data['rgb'].shape

    volume_length = voxel_res * voxel_size  # recalculate original length of volume
    voxel_size = volume_length / floorplan_res  # scale to new resolution

    if data['depth'].shape[3] != H:
        data['depth'] = resize_trajectory(x=data['depth'], size=H)

    floorplans = []
    for i in range(0, B, batch_size):
        with torch.no_grad():
            volume = backproject(
                voxel_dim=(floorplan_res, floorplan_res, floorplan_res),
                voxel_size=voxel_size,  # should be roughly equivalent to (self.opt.far * 2 / vd)
                world_center=(0, 0, 0),
                Rt=collapse_trajectory_dim(data['Rt'][i : i + batch_size]),
                K=collapse_trajectory_dim(data['K'][i : i + batch_size]),
                features=collapse_trajectory_dim(data['rgb'][i : i + batch_size]),
                depth=collapse_trajectory_dim(data['depth'][i : i + batch_size]),
            )

            volume = expand_trajectory_dim(volume, T=T)
            volume = exclusive_mean(volume, dim=1)  # merge along trajectory dim

            # remove the top half of the scene (i.e the ceiling)
            height = volume.shape[3]
            volume = volume[:, :, :, : height // 2, :]

            # take the first nonzero pixel, as if looking down from a bird's-eye view
            depth_idx = torch.argmax((volume > 0).float(), dim=3, keepdim=True)
            floorplan = torch.gather(volume, 3, depth_idx).squeeze(3)
            floorplans.append(floorplan.cpu())

    floorplans = torch.cat(floorplans, dim=0)
    floorplans = floorplans.permute(0, 1, 3, 2)  # swap height and width dimensions
    return floorplans