def load_data()

in experiments/grasp_stability/train.py [0:0]


    def load_data(self, K, i):
        # K-fold, test the i-th fold, train the rest

        # rootDir = "data/test/"
        # rootDir = "data/resmid/"
        # rootDir = "/media/shawn/Extreme SSD/Code/stability/data/separate"
        rootDir = "data/grasp/"
        # fileNames = glob.glob(os.path.join(rootDir, "*.h5"))
        fileNames = glob.glob(os.path.join(rootDir, "*"))
        fileNames = sorted(fileNames)[: args.N]
        # print(fileNames)

        # Split K fold
        N = len(fileNames)
        n = N // K

        idx = list(range(N))
        testIdx = idx[n * i : n * (i + 1)]
        trainIdx = list(set(idx) - set(testIdx))

        trainFileNames = [fileNames[i] for i in trainIdx]
        testFileNames = [fileNames[i] for i in testIdx]

        trainTransform = transforms.Compose(
            [
                transforms.ToPILImage(),
                transforms.Resize(256),
                transforms.RandomCrop(224),
                # transforms.RandomHorizontalFlip(p=0.5),
                transforms.ToTensor(),
                transforms.Normalize(mean=(0.5,), std=(0.5,)),
                AddGaussianNoise(0.0, 0.01),
            ]
        )

        trainTransformDepth = transforms.Compose(
            [
                transforms.ToPILImage(),
                transforms.Resize(256),
                transforms.RandomCrop(224),
                # transforms.RandomHorizontalFlip(p=0.5),
                transforms.ToTensor(),
                transforms.Normalize(mean=(0.1,), std=(0.2,)),
                AddGaussianNoise(0.0, 0.01),
            ]
        )

        # Create training dataset and dataloader
        trainDataset = GraspingDataset(
            trainFileNames,
            fields=self.fields,
            transform=trainTransform,
            transformDepth=trainTransformDepth,
        )
        trainLoader = torch.utils.data.DataLoader(
            trainDataset, batch_size=32, shuffle=False, num_workers=12, pin_memory=True
        )

        testTransform = transforms.Compose(
            [
                transforms.ToPILImage(),
                transforms.Resize(256),
                transforms.RandomCrop(224),
                # transforms.RandomHorizontalFlip(p=0.5),
                transforms.ToTensor(),
                transforms.Normalize(mean=(0.5,), std=(0.5,)),
            ]
        )

        testTransformDepth = transforms.Compose(
            [
                transforms.ToPILImage(),
                transforms.Resize(256),
                transforms.RandomCrop(224),
                # transforms.RandomHorizontalFlip(p=0.5),
                transforms.ToTensor(),
                transforms.Normalize(mean=(0.1,), std=(0.2,)),
                # AddGaussianNoise(0.0, 0.01),
            ]
        )

        # Create training dataset and dataloader
        testDataset = GraspingDataset(
            testFileNames,
            fields=self.fields,
            transform=testTransform,
            transformDepth=testTransformDepth,
        )
        testLoader = torch.utils.data.DataLoader(
            testDataset, batch_size=32, shuffle=False, num_workers=12, pin_memory=True
        )

        # tot = 0
        # suc = 0
        # for i, data in enumerate(trainLoader):

        #     x = {}
        #     for k in self.fields:
        #         x[k] = data[k].to(self.device).squeeze(0)
        #         print(k, x[k].size())

        #     label = data["label"].squeeze(0)
        #     print(label.size())
        #     suc += label.sum().item()
        #     tot += label.size(0)
        # print("ratio", suc / tot)

        self.trainLoader, self.testLoader = trainLoader, testLoader