in src/speech_reps/models/bertphone.py [0:0]
def __init__(self, units=512, hidden_size=2048, dropout=0.0, use_residual=True,
weight_initializer=None, bias_initializer='zeros',
prefix=None, params=None, activation='gelu', layer_norm_eps=None):
super(BERTPositionwiseFFN, self).__init__(units=units, hidden_size=hidden_size,
dropout=dropout, use_residual=use_residual,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
prefix=prefix, params=params,
# extra configurations for BERT
activation=activation,
use_bert_layer_norm=True,
layer_norm_eps=layer_norm_eps)