in src/model.py [0:0]
def __init__(self, params):
super(LatentDiscriminator, self).__init__()
self.img_sz = params.img_sz
self.img_fm = params.img_fm
self.init_fm = params.init_fm
self.max_fm = params.max_fm
self.n_layers = params.n_layers
self.n_skip = params.n_skip
self.hid_dim = params.hid_dim
self.dropout = params.lat_dis_dropout
self.attr = params.attr
self.n_attr = params.n_attr
self.n_dis_layers = int(np.log2(self.img_sz))
self.conv_in_sz = self.img_sz / (2 ** (self.n_layers - self.n_skip))
self.conv_in_fm = min(self.init_fm * (2 ** (self.n_layers - self.n_skip - 1)), self.max_fm)
self.conv_out_fm = min(self.init_fm * (2 ** (self.n_dis_layers - 1)), self.max_fm)
# discriminator layers are identical to encoder, but convolve until size 1
enc_layers, _ = build_layers(self.img_sz, self.img_fm, self.init_fm, self.max_fm,
self.n_dis_layers, self.n_attr, 0, 'convtranspose',
False, self.dropout, 0)
self.conv_layers = nn.Sequential(*(enc_layers[self.n_layers - self.n_skip:]))
self.proj_layers = nn.Sequential(
nn.Linear(self.conv_out_fm, self.hid_dim),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(self.hid_dim, self.n_attr)
)