in vits.py [0:0]
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):
super().__init__()
assert patch_size == 16, 'ConvStem only supports patch size of 16'
assert embed_dim % 8 == 0, 'Embed dimension must be divisible by 8 for ConvStem'
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
# build stem, similar to the design in https://arxiv.org/abs/2106.14881
stem = []
input_dim, output_dim = 3, embed_dim // 8
for l in range(4):
stem.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=2, padding=1, bias=False))
stem.append(nn.BatchNorm2d(output_dim))
stem.append(nn.ReLU(inplace=True))
input_dim = output_dim
output_dim *= 2
stem.append(nn.Conv2d(input_dim, embed_dim, kernel_size=1))
self.proj = nn.Sequential(*stem)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()