src/models.py [96:137]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        for file in ckpts[:-10]:
            # and also keep the weights every 50k iterations
            epoch = int(file.split('.weights')[0].split('_')[-1])
            if epoch % 50000 == 0 and epoch > 0:
                continue
            os.remove(file)
            if optimizer is not None:
                os.remove(f"{file.split('.weights')[0]}.optimizer")

    def load_weights(self, path, device):
        print('Reloading Checkpoint from', path)
        model = torch.load(path, map_location=device)
        # no idea why, but sometimes torch.load returns an ordered_dict...
        if type(model) == type(OrderedDict()):
            self.load_state_dict(model)
        else:
            self.load_state_dict(model.state_dict())

    def load_optimizer(self, path, optimizer, device):
        if os.path.exists(path):
            print(f"Reloading optimizer checkpoint from {path}")
            optimizer_state = torch.load(path, map_location=device)
            optimizer.load_state_dict(optimizer_state)

    def load_specific_weights(self, path, checkpoint_name, optimizer=None, device=0):
        ckpts = [os.path.join(path, f) for f in sorted(os.listdir(os.path.join(path))) if
                 checkpoint_name in f and self.name in f]
        if len(ckpts) == 0:
            print("no Checkpoints found")
            return 0

        ckpt_path = ckpts[-1]

        self.load_weights(ckpt_path, device)

        if optimizer is not None:
            optim_path = f"{ckpt_path.split('.weights')[0]}.optimizer"
            self.load_optimizer(optim_path, optimizer, device)
        return 1

    def load_latest_weights(self, path, optimizer=None, device=0, config=None):
        ckpts = [os.path.join(path, f) for f in sorted(os.listdir(os.path.join(path))) if
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/models.py [272:313]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        for file in ckpts[:-10]:
            # and also keep the weights every 50k iterations
            epoch = int(file.split('.weights')[0].split('_')[-1])
            if epoch % 50000 == 0 and epoch > 0:
                continue
            os.remove(file)
            if optimizer is not None:
                os.remove(f"{file.split('.weights')[0]}.optimizer")

    def load_weights(self, path, device):
        print('Reloading Checkpoint from', path)
        model = torch.load(path, map_location=device)
        # no idea why, but sometimes torch.load returns an ordered_dict...
        if type(model) == type(OrderedDict()):
            self.load_state_dict(model)
        else:
            self.load_state_dict(model.state_dict())

    def load_optimizer(self, path, optimizer, device):
        if os.path.exists(path):
            print(f"Reloading optimizer checkpoint from {path}")
            optimizer_state = torch.load(path, map_location=device)
            optimizer.load_state_dict(optimizer_state)

    def load_specific_weights(self, path, checkpoint_name, optimizer=None, device=0):
        ckpts = [os.path.join(path, f) for f in sorted(os.listdir(os.path.join(path))) if
                 checkpoint_name in f and self.name in f]
        if len(ckpts) == 0:
            print("no Checkpoints found")
            return 0

        ckpt_path = ckpts[-1]

        self.load_weights(ckpt_path, device)

        if optimizer is not None:
            optim_path = f"{ckpt_path.split('.weights')[0]}.optimizer"
            self.load_optimizer(optim_path, optimizer, device)
        return 1

    def load_latest_weights(self, path, optimizer=None, device=0, config=None):
        ckpts = [os.path.join(path, f) for f in sorted(os.listdir(os.path.join(path))) if
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



