in src/chug/image/transforms_torch.py [0:0]
def __call__(self, img):
kernel_size = self.get_params(self.scale)
if isinstance(img, torch.Tensor):
padding = kernel_size // 2
img = torch.nn.functional.max_pool2d(img, kernel_size=kernel_size, stride=1, padding=padding)
elif isinstance(img, Image.Image):
img = img.filter(ImageFilter.MaxFilter(kernel_size))
return img