in datasets/cifar_.py [0:0]
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, self.dataset_name)
self.preprocessed = os.path.join(self.image_dir, "preprocessed.pkl")
self.split_fewshot_dir = os.path.join(self.image_dir, "split_fewshot")
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.preprocessed):
with open(self.preprocessed, "rb") as f:
preprocessed = pickle.load(f)
train = preprocessed["train"]
test = preprocessed["test"]
else:
classnames = self.load_classnames()
train = self.read_data(classnames, "train")
# Follow standard practice to perform evaluation on the val set
# Also used as the val set (so evaluate the last-step model)
test = self.read_data(classnames, "test")
preprocessed = {"train": train, "test": test}
with open(self.preprocessed, "wb") as f:
pickle.dump(preprocessed, f, protocol=pickle.HIGHEST_PROTOCOL)
num_shots = cfg.DATASET.NUM_SHOTS
if num_shots >= 1:
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f"shot_{num_shots}-seed_{seed}.pkl")
if os.path.exists(preprocessed):
print(f"Loading preprocessed few-shot data from {preprocessed}")
with open(preprocessed, "rb") as file:
data = pickle.load(file)
train = data["train"]
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
data = {"train": train}
print(f"Saving preprocessed few-shot data to {preprocessed}")
with open(preprocessed, "wb") as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
train, test = OxfordPets.subsample_classes(train, test, subsample=subsample)
super().__init__(train_x=train, val=test, test=test)