def get()

in visualization/POF/data/BaseReader.py [0:0]


    def get(self, withPAF=True, PAF_normalize3d=True, read_image=True, imw=1920, imh=1080, bbox2d=0):
        # bbox2d: 0: computed from 3D bounding box, 1: compute from openpose
        assert bbox2d in (0, 1)
        assert type(withPAF) == bool

        # produce data from slice_input_producer
        flow_list = tf.train.slice_input_producer(list(self.tensor_dict.values()), shuffle=self.shuffle)
        flow_dict = {key: flow_list[ik] for ik, key in enumerate(self.tensor_dict.keys())}

        # build data dictionary
        data_dict = {}
        data_dict['img_dir'] = flow_dict['img_dirs']
        data_dict['K'] = flow_dict['K']

        # rotate and project to camera frame
        if self.objtype == 0:
            body2d, body3d = self.project_tf(flow_dict['body'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
            body3d = tf.cast(body3d, tf.float32)
            body2d = tf.cast(body2d, tf.float32)
            data_dict['keypoint_xyz_origin'] = body3d
            data_dict['keypoint_uv_origin'] = body2d
            data_dict['body_valid'] = flow_dict['body_valid']
        elif self.objtype == 1:
            cond_left = tf.reduce_any(tf.cast(flow_dict['left_hand_valid'], dtype=tf.bool))  # 0 for right hand, 1 for left hand
            hand3d = tf.cond(cond_left, lambda: flow_dict['left_hand'], lambda: flow_dict['right_hand'])  # in world coordinate
            hand2d, hand3d = self.project_tf(hand3d, flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])  # in camera coordinate
            hand3d = tf.cast(hand3d, tf.float32)
            hand2d = tf.cast(hand2d, tf.float32)
            data_dict['keypoint_xyz_origin'] = hand3d
            data_dict['keypoint_uv_origin'] = hand2d
            data_dict['cond_left'] = cond_left
            data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
            data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
        elif self.objtype == 2:
            body2d, body3d = self.project_tf(flow_dict['body'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
            lhand2d, lhand3d = self.project_tf(flow_dict['left_hand'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
            rhand2d, rhand3d = self.project_tf(flow_dict['right_hand'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
            data_dict['body_xyz_origin'] = body3d
            data_dict['body_uv_origin'] = body2d
            data_dict['lhand_xyz_origin'] = lhand3d
            data_dict['lhand_uv_origin'] = lhand2d
            data_dict['rhand_xyz_origin'] = rhand3d
            data_dict['rhand_uv_origin'] = rhand2d
            data_dict['body_valid'] = flow_dict['body_valid']
            data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
            data_dict['right_hand_valid'] = flow_dict['right_hand_valid']

        # read image
        if read_image:
            img_file = tf.read_file(flow_dict['img_dirs'])
            image = tf.image.decode_image(img_file, channels=3)
            image = tf.image.pad_to_bounding_box(image, 0, 0, imh, imw)
            image.set_shape((imh, imw, 3))
            image = tf.cast(image, tf.float32) / 255.0 - 0.5
            data_dict['image'] = image
        if 'mask_dirs' in flow_dict:
            mask_file = tf.read_file(flow_dict['mask_dirs'])
            mask = tf.image.decode_image(mask_file, channels=3)
            mask = tf.image.pad_to_bounding_box(mask, 0, 0, imh, imw)
            mask.set_shape((imh, imw, 3))
            mask = mask[:, :, 0]
            mask = tf.cast(mask, tf.float32)
        else:
            mask = tf.ones((imh, imw), dtype=tf.float32)
        data_dict['mask'] = mask

        # calculate crop size
        if self.objtype in (0, 1):
            if self.objtype == 0:
                keypoints = body3d
                valid = flow_dict['body_valid']
            elif self.objtype == 1:
                keypoints = hand3d
                valid = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
                data_dict['hand_valid'] = valid
            crop_center3d, scale3d, crop_center2d, scale2d = self.calc_crop_scale(keypoints, flow_dict['K'], flow_dict['distCoef'], valid)
            data_dict['crop_center2d'], data_dict['scale2d'] = crop_center2d, scale2d
            data_dict['crop_center3d'], data_dict['scale3d'] = crop_center3d, scale3d

            # do cropping
            if self.objtype == 1:
                body2d = hand2d
                body3d = hand3d
            if self.rotate_augmentation:
                print('using rotation augmentation')
                rotate_angle = tf.random_uniform([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)
                R2 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), tf.sin(rotate_angle), tf.cos(rotate_angle)]), [2, 2])
                R3 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), 0, tf.sin(rotate_angle), tf.cos(rotate_angle), 0, 0, 0, 1]), [3, 3])
                body2d = tf.matmul((body2d - crop_center2d), R2) + crop_center2d
                body3d = tf.matmul((body3d - crop_center3d), R3) + crop_center3d
                data_dict['keypoint_xyz_origin'] = body3d  # note that the projection of 3D might not be aligned with 2D any more after rotation
                data_dict['keypoint_uv_origin'] = body2d
            body2d_local = self.update_keypoint2d(body2d, crop_center2d, scale2d)
            data_dict['keypoint_uv_local'] = body2d_local
            if read_image:
                image_crop = self.crop_image(image, crop_center2d, scale2d)
                data_dict['image_crop'] = image_crop
            mask_crop = self.crop_image(tf.stack([mask] * 3, axis=2), crop_center2d, scale2d)
            data_dict['mask_crop'] = mask_crop[:, :, 0]
            if self.rotate_augmentation:
                data_dict['image_crop'] = tf.contrib.image.rotate(data_dict['image_crop'], rotate_angle)
                data_dict['mask_crop'] = tf.contrib.image.rotate(data_dict['mask_crop'], rotate_angle)
            if self.blur_augmentation:
                print('using blur augmentation')
                rescale_factor = tf.random_uniform([], minval=0.1, maxval=1.0)
                rescale = tf.cast(rescale_factor * self.crop_size, tf.int32)
                resized_image = tf.image.resize_images(data_dict['image_crop'], [rescale, rescale])
                data_dict['image_crop'] = tf.image.resize_images(resized_image, [self.crop_size, self.crop_size])

            # create 2D gaussian map
            scoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid, extra=True)  # coord_hw, imsize_hw
            data_dict['scoremap2d'] = scoremap2d

            if withPAF:
                from utils.PAF import createPAF
                data_dict['PAF'] = createPAF(body2d_local, body3d, self.objtype, (self.crop_size, self.crop_size), PAF_normalize3d, valid_vec=valid)
                data_dict['PAF_type'] = tf.ones([], dtype=bool)  # 0 for 2D PAF, 1 for 3D PAF

            # create 3D gaussian_map
            body3d_local = self.update_keypoint3d(body3d, crop_center3d, scale3d)
            data_dict['keypoint_xyz_local'] = body3d_local
            # scoremap3d = self.create_multiple_gaussian_map_3d(body3d_local, self.grid_size, self.sigma3d, valid_vec=valid, extra=True)
            # data_dict['scoremap3d'] = scoremap3d

            if self.objtype == 1:  # this is hand, flip the image if it is right hand
                data_dict['image_crop'] = tf.cond(cond_left, lambda: data_dict['image_crop'], lambda: data_dict['image_crop'][:, ::-1, :])
                data_dict['mask_crop'] = tf.cond(cond_left, lambda: data_dict['mask_crop'], lambda: data_dict['mask_crop'][:, ::-1])
                data_dict['scoremap2d'] = tf.cond(cond_left, lambda: data_dict['scoremap2d'], lambda: data_dict['scoremap2d'][:, ::-1, :])
                data_dict['keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['keypoint_uv_local'],
                                                         lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['keypoint_uv_local'])
                if withPAF:
                    data_dict['PAF'] = tf.cond(cond_left, lambda: data_dict['PAF'],
                                               lambda: (data_dict['PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))

        elif self.objtype == 2:
            bcrop_center3d, bscale3d, bcrop_center2d, bscale2d = self.calc_crop_scale(body3d, flow_dict['K'], flow_dict['distCoef'], flow_dict['body_valid'])
            lcrop_center3d, lscale3d, lcrop_center2d, lscale2d = self.calc_crop_scale(lhand3d, flow_dict['K'], flow_dict['distCoef'], flow_dict['left_hand_valid'])
            rcrop_center3d, rscale3d, rcrop_center2d, rscale2d = self.calc_crop_scale(rhand3d, flow_dict['K'], flow_dict['distCoef'], flow_dict['right_hand_valid'])

            body3d_local = self.update_keypoint3d(body3d, bcrop_center3d, bscale3d)
            lhand3d_local = self.update_keypoint3d(lhand3d, lcrop_center3d, lscale3d)
            rhand3d_local = self.update_keypoint3d(rhand3d, rcrop_center3d, rscale3d)
            bscoremap3d = self.create_multiple_gaussian_map_3d(body3d_local, self.grid_size, self.sigma3d,
                                                               valid_vec=flow_dict['body_valid'], extra=True)  # coord_hw, imsize_hw
            lscoremap3d = self.create_multiple_gaussian_map_3d(lhand3d_local, self.grid_size, self.sigma3d,
                                                               valid_vec=flow_dict['left_hand_valid'], extra=True)  # coord_hw, imsize_hw
            rscoremap3d = self.create_multiple_gaussian_map_3d(rhand3d_local, self.grid_size, self.sigma3d,
                                                               valid_vec=flow_dict['right_hand_valid'], extra=True)  # coord_hw, imsize_hw
            data_dict['bscoremap3d'] = bscoremap3d
            data_dict['lscoremap3d'] = lscoremap3d
            data_dict['rscoremap3d'] = rscoremap3d

            data_dict['body_xyz_local'] = body3d_local
            data_dict['lhand_xyz_local'] = lhand3d_local
            data_dict['rhand_xyz_local'] = rhand3d_local

            # 2D keypoints and cropped images
            if bbox2d == 1:
                # crop the 2D bounding box from openpose data
                body2d = flow_dict['openpose_body']
                lhand2d = flow_dict['openpose_lhand']
                rhand2d = flow_dict['openpose_rhand']

                bvalid = tf.logical_and(tf.not_equal(body2d[:, 0], 0.0), tf.not_equal(body2d[:, 1], 0.0))
                lvalid = tf.logical_and(tf.not_equal(lhand2d[:, 0], 0.0), tf.not_equal(lhand2d[:, 1], 0.0))
                rvalid = tf.logical_and(tf.not_equal(rhand2d[:, 0], 0.0), tf.not_equal(rhand2d[:, 1], 0.0))

                data_dict['body_valid'] = bvalid
                data_dict['left_hand_valid'] = lvalid
                data_dict['right_hand_valid'] = rvalid

                if 'openpose_foot' in flow_dict:
                    data_dict['openpose_foot'] = flow_dict['openpose_foot']

                bcrop_center2d, bscale2d = self.calc_crop_scale2d(body2d, bvalid)
                lcrop_center2d, lscale2d = self.calc_crop_scale2d(lhand2d, lvalid)
                rcrop_center2d, rscale2d = self.calc_crop_scale2d(rhand2d, rvalid)

            body2d_local = self.update_keypoint2d(body2d, bcrop_center2d, bscale2d)
            lhand2d_local = self.update_keypoint2d(lhand2d, lcrop_center2d, lscale2d)
            rhand2d_local = self.update_keypoint2d(rhand2d, rcrop_center2d, rscale2d)

            data_dict['body_uv_local'] = body2d_local
            data_dict['lhand_uv_local'] = lhand2d_local
            data_dict['rhand_uv_local'] = rhand2d_local
            data_dict['bcrop_center2d'] = bcrop_center2d
            data_dict['lcrop_center2d'] = lcrop_center2d
            data_dict['rcrop_center2d'] = rcrop_center2d
            data_dict['bscale2d'] = bscale2d
            data_dict['lscale2d'] = lscale2d
            data_dict['rscale2d'] = rscale2d

            if read_image:
                bimage_crop = self.crop_image(image, bcrop_center2d, bscale2d)
                limage_crop = self.crop_image(image, lcrop_center2d, lscale2d)
                rimage_crop = self.crop_image(image, rcrop_center2d, rscale2d)
                data_dict['bimage_crop'] = bimage_crop
                data_dict['limage_crop'] = limage_crop
                data_dict['rimage_crop'] = rimage_crop

            bscoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma,
                                                            valid_vec=flow_dict['body_valid'], extra=True)  # coord_hw, imsize_hw
            lscoremap2d = self.create_multiple_gaussian_map(lhand2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma,
                                                            valid_vec=flow_dict['left_hand_valid'], extra=True)  # coord_hw, imsize_hw
            rscoremap2d = self.create_multiple_gaussian_map(rhand2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma,
                                                            valid_vec=flow_dict['right_hand_valid'], extra=True)  # coord_hw, imsize_hw
            data_dict['bscoremap2d'] = bscoremap2d
            data_dict['lscoremap2d'] = lscoremap2d
            data_dict['rscoremap2d'] = rscoremap2d

            # for openpose data
            for key, val in flow_dict.items():
                if 'openpose' not in key:
                    continue
                data_dict[key] = val

        names, tensors = zip(*data_dict.items())

        if self.shuffle:
            tensors = tf.train.shuffle_batch_join([tensors],
                                                  batch_size=self.batch_size,
                                                  capacity=100,
                                                  min_after_dequeue=20,
                                                  enqueue_many=False)
        else:
            tensors = tf.train.batch_join([tensors],
                                          batch_size=self.batch_size,
                                          capacity=20,
                                          enqueue_many=False)

        return dict(zip(names, tensors))