FasterRCNNDetection/data/coco_camera_traps_dataset.py [131:211]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        print("The dataset has {} images containing {} classes".format(
                  len(self.image_ids),
                  len(self.classes)))
        
        if max_images is not None:
            print('Selecting a subset of {} images from training and validation'.
                format(max_images))
            self.impaths = self.impaths[:max_images]
            self.image_ids = self.image_ids[:max_images]
            self.bboxes = self.bboxes[:max_images]
            self.labels = self.labels[:max_images]
            
        if opt.dataset == 'oneclass':
            print('Merging all classes to one category')
            for ll in self.labels:
                for l_idx in range(len(ll)):
                    ll[l_idx] = 0
        
                  
        # To make sure we loaded the bboxes correctly:        
        #self.validate_bboxes()
        #print("All checks passed")
            

    def __len__(self):

        return len(self.image_ids)
        

    def validate_bboxes(self):

        import ipdb      
        import traceback
        import sys
        from tqdm import tqdm
        try:
            for idx in tqdm(range(len(self.image_ids))):
                img_file = os.path.join(self.root, self.impaths[idx])
                width,height = PIL.Image.open(img_file).size
                for bbox in self.bboxes[idx]:
                    assert bbox[1] <= width 
                    assert bbox[3] <= width
                    assert bbox[0] <= height
                    assert bbox[2] <= height
                    assert bbox[3] > bbox[1]
                    assert bbox[2] > bbox[0]
                    # Make sure all are greater equal 0
                    assert np.all(np.array(bbox) >= 0)
        except:
            extype, value, tb = sys.exc_info()
            traceback.print_exc()
            ipdb.post_mortem(tb)
            

    def get_class_count(self):

        if opt.dataset == 'oneclass':
            return 1
        else:
            # We have to add 1 as the framework assumes that labels start from 0
            return np.max(self.classes).tolist() + 1


    def get_example(self, i):
        """Returns the i-th example.

        Returns a color image and bounding boxes. The image is in CHW format.
        The returned image is RGB.

        Args:
            i (int): The index of the example.

        Returns:
            tuple of an image in CHW format, bounding boxes in 
            ('ymin', 'xmin', 'ymax', 'xmax')  format, label as int32
            starting from 0 and difficult_flag, which is usually 0.

        """

        img_file = os.path.join(self.root, self.impaths[i])
        img = read_image(img_file, color=True)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



FasterRCNNDetection/data/iwildcam_dataset.py [139:216]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        print("The dataset has {} images containing {} classes".format(
                  len(self.image_ids),
                  len(self.classes)))
        
        if max_images is not None:
            print('Selecting a subset of {} images from training and validation'.
                format(max_images))
            self.impaths = self.impaths[:max_images]
            self.image_ids = self.image_ids[:max_images]
            self.bboxes = self.bboxes[:max_images]
            self.labels = self.labels[:max_images]
            
        if opt.dataset == 'oneclass':
            print('Merging all classes to one category')
            for ll in self.labels:
                for l_idx in range(len(ll)):
                    ll[l_idx] = 0
                          
        # To make sure we loaded the bboxes correctly:        
        # self.validate_bboxes()
            

    def __len__(self):

        return len(self.image_ids)
        

    def validate_bboxes(self):

        import ipdb      
        import traceback
        import sys
        from tqdm import tqdm
        try:
            for idx in tqdm(range(len(self.image_ids))):
                img_file = os.path.join(self.root, self.impaths[idx])
                width,height = PIL.Image.open(img_file).size
                for bbox in self.bboxes[idx]:
                    assert bbox[1] <= width 
                    assert bbox[3] <= width
                    assert bbox[0] <= height
                    assert bbox[2] <= height
                    assert bbox[3] > bbox[1]
                    assert bbox[2] > bbox[0]
                    # Make sure all are greater equal 0
                    assert np.all(np.array(bbox) >= 0)
        except:
            extype, value, tb = sys.exc_info()
            traceback.print_exc()
            ipdb.post_mortem(tb)
            

    def get_class_count(self):

        if opt.dataset == 'oneclass':
            return 1
        else:
            # We have to add 1 as the framework assumes that labels start from 0
            return np.max(self.classes).tolist() + 1


    def get_example(self, i):
        """Returns the i-th example.

        Returns a color image and bounding boxes. The image is in CHW format.
        The returned image is RGB.

        Args:
            i (int): The index of the example.

        Returns:
            tuple of an image in CHW format, bounding boxes in 
            ('ymin', 'xmin', 'ymax', 'xmax')  format, label as int32
            starting from 0 and difficult_flag, which is usually 0.
        """

        img_file = os.path.join(self.root, self.impaths[i])
        img = read_image(img_file, color=True)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



