in models/encoderdecoder.py [0:0]
def __init__(self, opt):
super().__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 16, 3, 2, padding=1),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(16), # 128
nn.Conv2d(16, 32, 3, 2, padding=1),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(32), # 64
nn.Conv2d(32, 64, 3, 2, padding=1),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(64), # 32
nn.Conv2d(64, 128, 3, 2, padding=1),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(128), # 16
nn.Conv2d(128, 256, 3, 2, padding=1),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(256), # 8
nn.Conv2d(256, 512, 3, 2, padding=1),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(512), # 4
CollapseLayer(),
nn.Linear(8192, 4096),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(4096),
nn.Linear(4096, 4096),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(4096),
)
self.decoder = nn.Sequential(
nn.Linear(4096 + 64, 4096),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(4096),
nn.Linear(4096, 4096),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(4096),
UnCollapseLayer(64, 8, 8),
nn.Conv2d(64, 256, 3, 1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(256),
nn.Upsample(scale_factor=2),
nn.Conv2d(256, 128, 3, 1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, 1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Upsample(scale_factor=2),
nn.Conv2d(64, 32, 3, 1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Upsample(scale_factor=2),
nn.Conv2d(32, 16, 3, 1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(16),
nn.Upsample(scale_factor=2),
nn.Conv2d(16, 3, 3, 1, padding=1),
nn.Tanh(),
)
self.angle_transformer = nn.Sequential(
nn.Linear(12, 64),
nn.LeakyReLU(0.2),
nn.BatchNorm1d(64),
nn.Linear(64, 64),
nn.LeakyReLU(0.2),
nn.BatchNorm1d(64),
)
self.loss_function = SynthesisLoss(opt=opt)
self.opt = opt