in src/parsers/bvh_converter.py [0:0]
def load_numpy(self, normalized=False):
# check if dataset was already converted. Otherwise, do now
self._check_and_convert(normalized)
folders = self.__class__.get_all_keyframe_folders()
skeletons = []
keyframes = []
for folder in folders:
file_count = 0
_suffix = ("_normalized" if normalized else "")
if self.save_separately:
suffix = f"{self.Keyframe.__name__}{file_count}{_suffix}"
while os.path.exists(os.path.join(folder, f"skeletons_{suffix}.pickle")):
try:
with open(os.path.join(folder, f"skeletons_{suffix}.pickle"), "rb+") as infile:
_skeletons = pickle.load(infile)
_keyframes = np.load(os.path.join(folder, f"keyframes_{suffix}.npy"), allow_pickle=True)
except:
print(f"could not import {folder}{suffix}")
if _keyframes.shape[0] == 1:
_keyframes = _keyframes.reshape(_keyframes.shape[1:])
_keyframes = self._apply_postprocessing(_keyframes)
_keyframes = list(_keyframes.reshape((1, *_keyframes.shape)))
yield _skeletons, _keyframes
file_count += 1
suffix = f"{self.Keyframe.__name__}{file_count}{_suffix}"
return
else:
suffix = _suffix
try:
with open(os.path.join(folder, f"skeletons{suffix}.pickle"), "rb+") as infile:
_skeletons = pickle.load(infile)
_keyframes = np.load(os.path.join(folder, f"keyframes{suffix}.npy"), allow_pickle=True)
except:
print(f"could not import {folder}{suffix}")
_keyframes = self._apply_postprocessing(_keyframes)
skeletons.extend(_skeletons)
keyframes.extend(list(_keyframes))
if not self.save_separately:
return zip(skeletons, keyframes)