in models/01_YoloV5/01_Pytorch/processing.py [0:0]
def __init__(self, class_names, input_shape=(640, 640), threshold=0.5, iou_threshold=0.6, keep_ratio=True):
"""Constructor
Args:
class_names (list(str)): class names of the model
input_shape (int,int): model's input shape (actual input is of shape [1, 3, input_shape[0], input_shape[1]])
threshold (float): detections with scores lower than this will not be considered, lower value may produce more results
iou_threshold (float): used for box IOU filtering, higher value may increase post-processing time
keep_ratio (bool): if True then non-square input images will be made square prior to being resized to input size
"""
self.input_shape = np.array(input_shape)
self.class_names = class_names
self.class_count = len(class_names)
self.feature_count = self.class_count + 5 # outputs per anchor (bbox, confidence and class probabilities)
self.threshold = threshold
self.iou_threshold = iou_threshold
self.keep_ratio = keep_ratio
self.strides = np.array([8., 16., 32.])
self.grids = [self.__make_grid__(int(self.input_shape[0]/x), int(self.input_shape[1]/x)) for x in self.strides]
anchors = np.array([
[[10, 13], [16, 30], [33, 23]],
[[30, 61], [62, 45], [59, 119]],
[[116, 90], [156, 198], [373, 326]],
])
self.nl = len(anchors)
a = anchors.copy().astype(np.float32)
a = a.reshape(self.nl, -1, 2)
self.anchors = a.copy()
self.anchor_grid = a.copy().reshape(self.nl, 1, -1, 1, 1, 2)
self.colors = [[rnd.randint(0, 255) for _ in range(3)] for _ in range(self.class_count)]