PyTorchClassification/data_loader.py [28:165]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def show_image(input, bbox=None):

    import matplotlib.pyplot as plt

    img = np.transpose(input.data.numpy(),[1, 2, 0])
    plt.imshow(img)

    if (bbox):

        x = [bbox[0], bbox[0], bbox[0]+bbox[2], bbox[0]+bbox[2], bbox[0]]
        y = [bbox[1], bbox[1]+bbox[3], bbox[1]+bbox[3], bbox[1], bbox[1]]

        plt.plot(x, y)

    plt.show()


def deploy_model(filein, fileout):
    """
    Loads a model from a checkpoint, then re-saves it in a format that is more practical
    for deployment for inference-only applications.
    """

    print("=> deploying checkpoint '{}'".format(filein))

    checkpoint = torch.load(filein, map_location=lambda storage, loc: storage)

    deploy_checkpoint = {
            'epoch' : checkpoint['epoch'],
            'state_dict': checkpoint['state_dict'],
            'classnames' : checkpoint['classnames'],
            'model_type' : checkpoint['model_type']}

    torch.save(deploy_checkpoint, fileout)


def save_model(state, is_best, filename='checkpoint.pth.tar'):
    """
    Saves a model to a checkpoint.
    """

    torch.save(state, filename)
    if is_best:
        print("\tSaving new best model")
        shutil.copyfile(filename, 'model_best.pth.tar')


def load_model(filename, useGPU=True):
    """
    Loads a model from a checkpoint.
    """

    if os.path.isfile(filename):
        print("=> loading checkpoint '{}'".format(filename))

        if useGPU:
            cuda_device = torch.cuda.current_device()
            checkpoint = torch.load(filename, map_location=lambda storage, loc: storage.cuda(cuda_device))
        else:
            checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)

        start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 0
        best_prec1 = checkpoint['best_prec1'] if 'best_prec1' in checkpoint else 0
        best_prec3 = checkpoint['best_prec3'] if 'best_prec3' in checkpoint else 0
        best_prec5 = checkpoint['best_prec5'] if 'best_prec5' in checkpoint else 0

        state_dict = checkpoint['state_dict']
        classnames = checkpoint['classnames']
        model_type = checkpoint['model_type']

        print('Loaded %d classes' % len(classnames))

        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            module = k[0:7] # check for 'module.' of dataparallel
            name = k[7:] # remove 'module.' of dataparallel            

            if k[:7] == 'module.':
                k = k[7:]
            if k[:2] == '1.':
                k = k[2:]
            if k[:6] == 'model.':
                k = k[6:]

            new_state_dict[k] = v

            #print("%s" % (k))

        model_dict = new_state_dict        
        optimizer_dict = checkpoint['optimizer'] if 'optimizer' in checkpoint else None

        print("=> loaded checkpoint '{}' (epoch {})"
                .format(filename, start_epoch))

        data.best_prec1 = best_prec1
        data.best_prec3 = best_prec3
        data.best_prec5 = best_prec5
        data.start_epoch = start_epoch
        data.classnames = classnames
        data.model_dict = model_dict
        data.optimizer_dict = optimizer_dict
        data.model_type = model_type

        return data 

    else:
        print("=> no checkpoint found at '{}'".format(filename))

# ...def load_model(filename, useGPU=True)


class ImageLoader():

    def __init__(self, image_sizes):
        # The largest image size is used as target size in preprocessing
        # The scaling to the proper size should be done within the model 
        self.im_size = [max(image_sizes), max(image_sizes)] 
        self.mu_data = [0.5, 0.5, 0.5]
        self.std_data = [0.5, 0.5, 0.5]
        self.brightness = 0.4
        self.contrast = 0.4
        self.saturation = 0.4
        self.hue = 0.25

        # augmentations
        self.center_crop = transforms.CenterCrop((self.im_size[0], self.im_size[1]))
        self.random_crop = transforms.RandomCrop((self.im_size[0], self.im_size[1]),pad_if_needed=True)
        self.scale_aug = transforms.RandomResizedCrop(size=self.im_size[0])
        self.flip_aug = transforms.RandomHorizontalFlip()
        self.flip = transforms.RandomHorizontalFlip(1.0)
        self.color_aug = transforms.ColorJitter(self.brightness, self.contrast, self.saturation, self.hue)
        self.tensor_aug = transforms.ToTensor()
        self.norm_aug = transforms.Normalize(mean=self.mu_data, std=self.std_data)
        self.resize = transforms.Resize((self.im_size[0], self.im_size[1]))
        self.resize_for_crop = transforms.Resize((int(1.14 * self.im_size[0]), int(1.14 * self.im_size[1])))
        self.multi_crop = transforms.TenCrop((self.im_size[0], self.im_size[1]))

    def load_image(self, path):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



PyTorchClassification/data_loader_cv.py [29:166]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def show_image(input, bbox=None):

    import matplotlib.pyplot as plt

    img = np.transpose(input.data.numpy(),[1, 2, 0])
    plt.imshow(img)

    if (bbox):

        x = [bbox[0], bbox[0], bbox[0]+bbox[2], bbox[0]+bbox[2], bbox[0]]
        y = [bbox[1], bbox[1]+bbox[3], bbox[1]+bbox[3], bbox[1], bbox[1]]

        plt.plot(x, y)

    plt.show()


def deploy_model(filein, fileout):
    """
    Loads a model from a checkpoint, then re-saves it in a format that is more practical
    for deployment for inference-only applications.
    """

    print("=> deploying checkpoint '{}'".format(filein))

    checkpoint = torch.load(filein, map_location=lambda storage, loc: storage)

    deploy_checkpoint = {
            'epoch' : checkpoint['epoch'],
            'state_dict': checkpoint['state_dict'],
            'classnames' : checkpoint['classnames'],
            'model_type' : checkpoint['model_type']}

    torch.save(deploy_checkpoint, fileout)


def save_model(state, is_best, filename='checkpoint.pth.tar'):
    """
    Saves a model to a checkpoint.
    """

    torch.save(state, filename)
    if is_best:
        print("\tSaving new best model")
        shutil.copyfile(filename, 'model_best.pth.tar')


def load_model(filename, useGPU=True):
    """
    Loads a model from a checkpoint.
    """

    if os.path.isfile(filename):
        print("=> loading checkpoint '{}'".format(filename))

        if useGPU:
            cuda_device = torch.cuda.current_device()
            checkpoint = torch.load(filename, map_location=lambda storage, loc: storage.cuda(cuda_device))
        else:
            checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)

        start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 0
        best_prec1 = checkpoint['best_prec1'] if 'best_prec1' in checkpoint else 0
        best_prec3 = checkpoint['best_prec3'] if 'best_prec3' in checkpoint else 0
        best_prec5 = checkpoint['best_prec5'] if 'best_prec5' in checkpoint else 0

        state_dict = checkpoint['state_dict']
        classnames = checkpoint['classnames']
        model_type = checkpoint['model_type']

        print('Loaded %d classes' % len(classnames))

        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            module = k[0:7] # check for 'module.' of dataparallel
            name = k[7:] # remove 'module.' of dataparallel            

            if k[:7] == 'module.':
                k = k[7:]
            if k[:2] == '1.':
                k = k[2:]
            if k[:6] == 'model.':
                k = k[6:]

            new_state_dict[k] = v

            #print("%s" % (k))

        model_dict = new_state_dict        
        optimizer_dict = checkpoint['optimizer'] if 'optimizer' in checkpoint else None

        print("=> loaded checkpoint '{}' (epoch {})"
                .format(filename, start_epoch))

        data.best_prec1 = best_prec1
        data.best_prec3 = best_prec3
        data.best_prec5 = best_prec5
        data.start_epoch = start_epoch
        data.classnames = classnames
        data.model_dict = model_dict
        data.optimizer_dict = optimizer_dict
        data.model_type = model_type

        return data 

    else:
        print("=> no checkpoint found at '{}'".format(filename))

# ...def load_model(filename, useGPU=True)


class ImageLoader():

    def __init__(self, image_sizes):
        # The largest image size is used as target size in preprocessing
        # The scaling to the proper size should be done within the model 
        self.im_size = [max(image_sizes), max(image_sizes)] 
        self.mu_data = [0.5, 0.5, 0.5]
        self.std_data = [0.5, 0.5, 0.5]
        self.brightness = 0.4
        self.contrast = 0.4
        self.saturation = 0.4
        self.hue = 0.25

        # augmentations
        self.center_crop = transforms.CenterCrop((self.im_size[0], self.im_size[1]))                
        self.random_crop = transforms.RandomCrop((self.im_size[0], self.im_size[1]),pad_if_needed=True)
        self.scale_aug = transforms.RandomResizedCrop(size=self.im_size[0])
        self.flip_aug = transforms.RandomHorizontalFlip()
        self.flip = transforms.RandomHorizontalFlip(1.0)
        self.color_aug = transforms.ColorJitter(self.brightness, self.contrast, self.saturation, self.hue)
        self.tensor_aug = transforms.ToTensor()
        self.norm_aug = transforms.Normalize(mean=self.mu_data, std=self.std_data)
        self.resize = transforms.Resize((self.im_size[0], self.im_size[1]))
        self.resize_for_crop = transforms.Resize((int(1.14 * self.im_size[0]), int(1.14 * self.im_size[1])))
        self.multi_crop = transforms.TenCrop((self.im_size[0], self.im_size[1]))

    def load_image(self, path):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



