in videoalignment/datasets.py [0:0]
def split_train_val(self):
# Split overlapping pairs according to connected components
ncc = self.annotate_connected_components()
videos_train = []
videos_val = []
pairs_train = []
pairs_val = []
cc = list(range(ncc))
val_cc = cc[
(ncc // self.n_folds)
* self.args.fold_index : (ncc // self.n_folds)
* (self.args.fold_index + 1)
]
train_cc = (
cc[: (ncc // self.n_folds) * self.args.fold_index]
+ cc[(ncc // self.n_folds) * (self.args.fold_index + 1) :]
)
for cc in val_cc:
videos_val.extend([v for v in self.gt_all_videos if v["label"] == cc])
pairs_val.extend(
[p for p in self.gt_all_overlapping_pairs if p["label"] == cc]
)
for cc in train_cc:
videos_train.extend([v for v in self.gt_all_videos if v["label"] == cc])
pairs_train.extend(
[p for p in self.gt_all_overlapping_pairs if p["label"] == cc]
)
if self.phase == "train":
self.videos = videos_train
self.overlapping_pairs = pairs_train
elif self.phase == "val":
self.videos = videos_val
self.overlapping_pairs = pairs_val
elif self.phase == "all":
self.videos = self.gt_all_videos
self.overlapping_pairs = self.gt_all_overlapping_pairs