in tensorflow_graphics/projects/points_to_3Dobjects/models/centernet_vid.py [0:0]
def more_things(self, output, sample, training=True, apply_sigmoid=True):
"""Helper function."""
batch_size = output['centers'].shape[0]
# Get shape-voxel-grid from shape-id.
shape_logits = output['shapes'] # (BS, 128, 128, 300)
indices = sample['indices']
groundtruth_k = tf.shape(indices)[-1]
if not training:
centers = output['centers']
if apply_sigmoid:
centers = tf.math.sigmoid(centers)
output['centers_sigmoid'] = centers
centers = self.nms(centers)
output['centers_nms'] = centers
b, h, w, c = centers.shape
assert b == 1
centers = tf.reshape(centers, [1, h, w, c])
_, _, _, _, _ = self._top_scores_heatmaps(
centers, self.k)
# offset = self._get_offsets(output['offset'], topk_inds)
# b, h, w, c = centers.shape
# centers_t = tf.transpose(centers, [0, 3, 1, 2])
# scores, indices = tf.math.top_k(tf.reshape(centers_t, [b, c, -1]), K)
# topk_inds = indices % (h * w)
# topk_ys = tf.cast(tf.cast((indices / w), tf.int32), tf.float32)
# topk_xs = tf.cast(tf.cast((indices % w), tf.int32), tf.float32)
# scores, indices = tf.math.top_k(tf.reshape(scores, [b, -1]), K)
# topk_classes = tf.cast(indices / K, tf.int32)
# topk_inds = tf.gather(
# tf.reshape(topk_inds, [b, -1]), indices, axis=1, batch_dims=1)
# ys= tf.gather(tf.reshape(topk_ys, [b, -1]), indices, axis=1, batch_dims=1)
# xs= tf.gather(tf.reshape(topk_xs, [b, -1]), indices, axis=1, batch_dims=1)
# return xs, ys, topk_classes, topk_inds, scores
# indices = topk_inds
top_shape_logits = centernet_utils.get_heatmap_values(shape_logits, indices)
sizes_3d, translations_3d, rotations_3d = \
centernet_utils.decode_box_3d(output, indices, self.rotation_svd)
# b, h, w, c = centers.shape
# centers_2d_indices = indices % (h * w)
# ys_2d = tf.cast(tf.cast((centers_2d_indices / w), tf.int32), tf.float32)
# xs_2d = tf.cast(tf.cast((centers_2d_indices % w), tf.int32), tf.float32)
# center2d = sample['center2d']
# offset_proj_3dS = sample['offset']
# xs, ys = xs + offset[..., 0], ys + offset[..., 1]
#
# center_2d = [xs_2d, ys_2d] * self.output_stride
#
# offset_x = translations_3d[:, 0]
# offset_y = translations_3d[:, 1]
# depth = translations_3d[:, 2]
output['sizes_3d'] = sizes_3d
output['translations_3d'] = translations_3d
output['rotations_3d'] = rotations_3d
# Get ground truth point cloud
groundtruth_pointclouds = tf.gather(self.shape_pointclouds,
tf.expand_dims(sample['shapes'],
axis=-1))
groundtruth_pointclouds = tf.reshape(groundtruth_pointclouds,
[batch_size, -1, 512, 3])
# Transform ground truth point cloud using ground truth pose
groundtruth_pointclouds_groundtruth_transformed = \
centernet_utils.transform_pointcloud(
groundtruth_pointclouds / 2.0,
sample['sizes_3d'],
sample['rotations_3d'],
sample['translations_3d'])
sample['pose_groundtruth_pointclouds'] = \
groundtruth_pointclouds_groundtruth_transformed
# Transform ground truth point cloud using predicted pose
groundtruth_pointclouds_predicted_transformed = \
centernet_utils.transform_pointcloud(
groundtruth_pointclouds / 2.0,
sample['sizes_3d'],
output['rotations_3d'],
output['translations_3d'])
output['pose_groundtruth_pointclouds'] = \
groundtruth_pointclouds_predicted_transformed
# Get predicted SDFs
predicted_sdfs = centernet_utils.softargmax(top_shape_logits,
self.shape_sdfs,
self.beta)
# predicted_sdfs = self.softargmax_sdf(top_shape_logits)
predicted_sdfs = tf.reshape(predicted_sdfs, [batch_size, -1, 32, 32, 32])
output['sdfs'] = predicted_sdfs
# Get predicted pointclouds
predicted_pointclouds = centernet_utils.softargmax(top_shape_logits,
self.shape_pointclouds,
self.beta)
predicted_pointclouds = tf.reshape(predicted_pointclouds,
[batch_size, -1, 512, 3])
groundtruth_sdfs = tf.squeeze(tf.gather(self.shape_sdfs,
tf.expand_dims(sample['shapes'],
axis=-1)),
axis=2)
output['collisions'] = (predicted_sdfs, predicted_pointclouds, sizes_3d,
translations_3d, rotations_3d)
output['collisions_gt_shapes'] = (groundtruth_sdfs, groundtruth_pointclouds,
sample['sizes_3d'],
translations_3d, rotations_3d)
# predicted_pointclouds =
# (predicted_pointclouds * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5
# sdf_values = trilinear.interpolate(tf.expand_dims(predicted_sdfs, -1),
# predicted_pointclouds)
output['groundtruth_sdfs'] = groundtruth_sdfs
output['predicted_sdfs'] = predicted_sdfs
output['groundtruth_pointclouds'] = groundtruth_pointclouds
output['predicted_pointclouds'] = predicted_pointclouds
output['sdfs'] = tf.reshape(predicted_sdfs, [batch_size, -1, 32**3, 1])
sample['sdfs'] = tf.reshape(groundtruth_sdfs, [batch_size, -1, 32**3, 1])
# Transform predicted point cloud
transformed_pointclouds = \
centernet_utils.transform_pointcloud(predicted_pointclouds / 2.0,
sizes_3d, rotations_3d,
translations_3d)
transformed_pointclouds = \
tf.concat([transformed_pointclouds,
tf.ones([batch_size, groundtruth_k, 512, 1])], axis=-1)
transformed_pointclouds = tf.transpose(transformed_pointclouds,
[0, 1, 3, 2]) # (5, 3, 4, 512)
output['transformed_pointclouds'] = transformed_pointclouds
intrinsics = tf.reshape(sample['k'], [batch_size, 1, 3, 3]) # (5, 3, 3)
intrinsics = tf.tile(intrinsics, [1, groundtruth_k, 1, 1]) # (5, 1, 3, 4)
extrinsics = tf.expand_dims(sample['rt'], axis=1) # (5, 1, 3, 4)
extrinsics = tf.tile(extrinsics, [1, groundtruth_k, 1, 1]) # (5, 1, 3, 4)
projected_pointclouds = intrinsics @ extrinsics @ transformed_pointclouds
projected_pointclouds = tf.transpose(projected_pointclouds, [0, 1, 3, 2])
projected_pointclouds = \
projected_pointclouds / projected_pointclouds[:, :, :, -1:]
output['projected_pointclouds'] = projected_pointclouds
# 2D Loss preparation
pointcloud = groundtruth_pointclouds_groundtruth_transformed
pointcloud = tf.concat([pointcloud,
tf.ones([batch_size, groundtruth_k, 512, 1])],
axis=-1)
pointcloud = tf.transpose(pointcloud, [0, 1, 3, 2]) # (5, 3, 4, 512)
pointcloud = intrinsics @ extrinsics @ pointcloud
pointcloud = tf.transpose(pointcloud, [0, 1, 3, 2])
sample['projected_gt_shapes'] = \
(pointcloud / pointcloud[:, :, :, -1:])[:, :, :, 0:2]
pointcloud = groundtruth_pointclouds_predicted_transformed
pointcloud = tf.concat([pointcloud,
tf.ones([batch_size, groundtruth_k, 512, 1])],
axis=-1)
pointcloud = tf.transpose(pointcloud, [0, 1, 3, 2]) # (5, 3, 4, 512)
pointcloud = intrinsics @ extrinsics @ pointcloud
pointcloud = tf.transpose(pointcloud, [0, 1, 3, 2])
output['projected_gt_shapes'] = \
(pointcloud / pointcloud[:, :, :, -1:])[:, :, :, 0:2]
return output