src/parsers/bvh_converter.py [75:104]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            id_bvh = list(self.joints.keys()).index(bvh_joint_name)
            p_converted[:, id_keyframe] = p[:, id_bvh] / self.scale
            r_converted[:, id_keyframe] = r[:, id_bvh]

        r_local = self.Keyframe.batch_torch_recover_local_rotations(self.Skeleton,
                                                                    torch.from_numpy(r_converted)).numpy()

        result = np.zeros((len(p), len(self.KeyframeIdx.all), 10), dtype=np.float32)
        result[:, :, :3] = p_converted
        result[:, :, 3:6] = r_converted
        result[:, :, 6:9] = r_local
        result[:, 0, 9] = np.linspace(0.0, len(p) / float(self.fps), len(p))

        use_out_of_view = True
        use_too_close = True
        occ_mask = np.array(occlusions)
        if use_out_of_view and len(occlusions) > 0:
            occ_mask |= np.array(out_of_views)
        if use_too_close and len(occlusions) > 0:
            occ_mask |= np.array(too_close)

        if len(occlusions) > 0:
            for occlusion_joint_idx, joint_name in enumerate(self.file_occlusions_joints):
                if joint_name not in self.KeyframeIdx.all:
                    continue

                joint_bit_mask = 1 << occlusion_joint_idx
                result_joint_index = self.KeyframeIdx.all[joint_name]
                result[:, result_joint_index, 9][occ_mask & joint_bit_mask == joint_bit_mask] += 1.
        return result
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/parsers/unoc_parser.py [226:255]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                id_bvh = list(self.joints.keys()).index(bvh_joint_name)
                p_converted[:, id_keyframe] = p[:, id_bvh] / self.scale
                r_converted[:, id_keyframe] = r[:, id_bvh]

        r_local = self.Keyframe.batch_torch_recover_local_rotations(self.Skeleton,
                                                                    torch.from_numpy(r_converted)).numpy()

        result = np.zeros((len(p), len(self.KeyframeIdx.all), 10), dtype=np.float32)
        result[:, :, :3] = p_converted
        result[:, :, 3:6] = r_converted
        result[:, :, 6:9] = r_local
        result[:, 0, 9] = np.linspace(0.0, len(p) / float(self.fps), len(p))

        use_out_of_view = True
        use_too_close = True
        occ_mask = np.array(occlusions)
        if use_out_of_view and len(occlusions) > 0:
            occ_mask |= np.array(out_of_views)
        if use_too_close and len(occlusions) > 0:
            occ_mask |= np.array(too_close)

        if len(occlusions) > 0:
            for occlusion_joint_idx, joint_name in enumerate(self.file_occlusions_joints):
                if joint_name not in self.KeyframeIdx.all:
                    continue

                joint_bit_mask = 1 << occlusion_joint_idx
                result_joint_index = self.KeyframeIdx.all[joint_name]
                result[:, result_joint_index, 9][occ_mask & joint_bit_mask == joint_bit_mask] += 1.
        return result
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



