in ar-cnn/model.py [0:0]
def build_model(self):
# Create a list of encoder sampling layers
down_sampling_layers = []
up_sampling_layers = []
inputs = Input(self.input_dim)
layer_input = inputs
num_filters = self.num_filters
# encoder samplimg layers
for layer in range(self.num_layers):
encoder, pooling_layer = self.down_sampling(
layer_input=layer_input,
num_filters=num_filters,
batch_normalization=self.batch_norm_encoder[layer],
dropout_rate=self.dropout_rate_encoder[layer])
down_sampling_layers.append(encoder)
layer_input = pooling_layer # Get the previous pooling_layer_input
num_filters *= self.growth_factor
# bottle_neck layer
bottle_neck = Conv2D(num_filters, (3, 3),
activation='relu',
padding='same')(pooling_layer)
bottle_neck = Conv2D(num_filters, (3, 3),
activation='relu',
padding='same')(bottle_neck)
num_filters //= self.growth_factor
# upsampling layers
decoder = bottle_neck
for index, layer in enumerate(reversed(down_sampling_layers)):
decoder = self.up_sampling(
layer_input=decoder,
skip_input=layer,
num_filters=num_filters,
batch_normalization=self.batch_norm_decoder[index],
dropout_rate=self.dropout_rate_decoder[index])
up_sampling_layers.append(decoder)
num_filters //= self.growth_factor
output = Conv2D(1, 1, activation='linear')(up_sampling_layers[-1])
model = Model(inputs=inputs, outputs=output)
optimizer = self.get_optimizer(self.optimizer_enum, self.learning_rate)
model.compile(optimizer=optimizer, loss=Loss.built_in_softmax_kl_loss)
if self.pre_trained:
model.load_weights(self.pre_trained)
model.summary()
return model