def __getitem__()

in generation/data/pickle_dataset.py [0:0]


    def __getitem__(self, index):
        ### person bbox (used by all inputs)
        bbox = self.pickle_file[index]['bbox']
        ### input A (label maps)
        A_path = os.path.join(self.dir_A, self.pickle_file[index]['seg'])
        A = Image.open(A_path)
        A = self.crop_person(A, bbox)
        params = get_params(self.opt, A.size)
        if self.opt.label_nc == 0:
            transform_A = get_transform(self.opt, params)
            A_tensor = transform_A(A.convert('RGB'))
        else:
            transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
            A_tensor = transform_A(A) * 255.0

        random_B_tensor = B_tensor = inst_tensor = feat_tensor = 0
        ### if using instance maps
        if not self.opt.no_instance:
            inst_path = os.path.join(self.dir_A, self.pickle_file[index]['seg'])
            inst = Image.open(inst_path)
            inst = self.crop_person(inst, bbox)
            inst_tensor = transform_A(inst)

            if self.opt.load_features:
                feat_path = self.feat_paths[index]
                feat = Image.open(feat_path).convert('RGB')
                norm = normalize()
                feat_tensor = norm(transform_A(feat))

        ### input B (real images)
        if self.opt.isTrain:
            B_path = os.path.join(self.dir_B, self.pickle_file[index]['filename'])
            B = Image.open(B_path).convert('RGB')
            B = self.crop_person(B, bbox)
            B = self.remove_background(B, A)

            if self.opt.color_mode == 'Lab':
                B = self.pil_rgb2lab(B)
            if self.opt.color_mode == 'Lab':
                transform_B = get_transform(self.opt, params, labimg=True)
            else:
                transform_B = get_transform(self.opt, params)

            B_tensor = transform_B(B)

            # We need a random real image to train the discriminator
            if (self.opt.model == 'cvae++pix2pixHD') or (self.opt.model == 'bicycle-pix2pixHD'):
                random_index = random.randint(0, self.dataset_size-1)
                bbox = self.pickle_file[random_index]['bbox']
                ### input A (label maps) for removing background of random B
                A_path = os.path.join(self.dir_A, self.pickle_file[random_index]['seg'])
                A = Image.open(A_path)
                A = self.crop_person(A, bbox)
                ### input random B (real images)
                B_path = os.path.join(self.dir_B, self.pickle_file[random_index]['filename'])
                B = Image.open(B_path).convert('RGB')
                B = self.crop_person(B, bbox)
                B = self.remove_background(B, A)
                # print(B_path)
                if self.opt.color_mode == 'Lab':
                    B = self.pil_rgb2lab(B)

                random_B_tensor = transform_B(B)

                input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor,
                              'feat': feat_tensor, 'path': A_path, 'random_image': random_B_tensor}
            else:
                input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor,
                              'feat': feat_tensor, 'path': A_path}

        return input_dict