def get_item()

in lib/data/EvalWPoseDataset.py [0:0]


    def get_item(self, index):
        img_path = self.img_files[index]
        joint_path = self.img_files[index].replace('.%s' % (self.img_files[index].split('.')[-1]), '_keypoints.json')
        # Name
        img_name = os.path.splitext(os.path.basename(img_path))[0]
        # Calib
        with open(joint_path) as json_file:
            data = json.load(json_file)
            if len(data['people']) == 0:
                raise IOError('non human found!!')
            
            # if True, the person with the largest height will be chosen. 
            # set to False for multi-person processing
            if True:
                selected_data = data['people'][0]
                height = 0
                if len(data['people']) != 1:
                    for i in range(len(data['people'])):
                        tmp = data['people'][i]
                        keypoints = np.array(tmp['pose_keypoints_2d']).reshape(-1,3)

                        flags = keypoints[:,2] > 0.5 #openpose
                        # flags = keypoints[:,2] > 0.2  #detectron
                        if sum(flags) == 0:
                            continue
                        bbox = keypoints[flags]
                        bbox_max = bbox.max(0)
                        bbox_min = bbox.min(0)

                        if height < bbox_max[1] - bbox_min[1]:
                            height = bbox_max[1] - bbox_min[1]
                            selected_data = tmp
            else:
                pid = min(len(data['people'])-1, self.person_id)
                selected_data = data['people'][pid]

            keypoints = np.array(selected_data['pose_keypoints_2d']).reshape(-1,3)

            flags = keypoints[:,2] > 0.5   #openpose
            # flags = keypoints[:,2] > 0.2    #detectron

            nflag = flags[0]
            mflag = flags[1]

            check_id = [2, 5, 15, 16, 17, 18]
            cnt = sum(flags[check_id])
            if self.opt.crop_type == 'face' and (not (nflag and cnt > 3)):
                print('Waring: face should not be backfacing.')
            if self.opt.crop_type == 'upperbody' and (not (mflag and nflag and cnt > 3)):
                print('Waring: upperbody should not be backfacing.')
            if self.opt.crop_type == 'fullbody' and sum(flags) < 15:
                print('Waring: not sufficient keypoints.')

        im = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
        if im.shape[2] == 4:
            im = im / 255.0
            im[:,:,:3] /= im[:,:,3:] + 1e-8
            im = im[:,:,3:] * im[:,:,:3] + 0.5 * (1.0 - im[:,:,3:])
            im = (255.0 * im).astype(np.uint8)
        h, w = im.shape[:2]
        
        intrinsic = np.identity(4)

        trans_mat = np.identity(4)
        rect = self.crop_func(keypoints)

        im = crop_image(im, rect)

        scale_im2ndc = 1.0 / float(w // 2)
        scale = w / rect[2]
        trans_mat *= scale
        trans_mat[3,3] = 1.0
        trans_mat[0, 3] = -scale*(rect[0] + rect[2]//2 - w//2) * scale_im2ndc
        trans_mat[1, 3] = scale*(rect[1] + rect[3]//2 - h//2) * scale_im2ndc
        
        intrinsic = np.matmul(trans_mat, intrinsic)
        im_512 = cv2.resize(im, (512, 512))
        im = cv2.resize(im, (self.load_size, self.load_size))

        image_512 = Image.fromarray(im_512[:,:,::-1]).convert('RGB')
        image = Image.fromarray(im[:,:,::-1]).convert('RGB')
        
        B_MIN = np.array([-1, -1, -1])
        B_MAX = np.array([1, 1, 1])
        projection_matrix = np.identity(4)
        projection_matrix[1, 1] = -1
        calib = torch.Tensor(projection_matrix).float()

        calib_world = torch.Tensor(intrinsic).float()

        # image
        image_512 = self.to_tensor(image_512)
        image = self.to_tensor(image)
        return {
            'name': img_name,
            'img': image.unsqueeze(0),
            'img_512': image_512.unsqueeze(0),
            'calib': calib.unsqueeze(0),
            'calib_world': calib_world.unsqueeze(0),
            'b_min': B_MIN,
            'b_max': B_MAX,
        }