def __init__()

in optimum/bettertransformer/models/encoder_models.py [0:0]


    def __init__(self, albert_layer, config):
        r"""
        A simple conversion of the ALBERT layer to its `BetterTransformer` implementation.

        Args:
            albert_layer (`torch.nn.Module`):
                The original ALBERT Layer where the weights needs to be retrieved.
        """
        super().__init__(config)
        super(BetterTransformerBaseLayer, self).__init__()
        # In_proj layer
        self.in_proj_weight = nn.Parameter(
            torch.cat(
                [
                    albert_layer.attention.query.weight,
                    albert_layer.attention.key.weight,
                    albert_layer.attention.value.weight,
                ]
            )
        )
        self.in_proj_bias = nn.Parameter(
            torch.cat(
                [
                    albert_layer.attention.query.bias,
                    albert_layer.attention.key.bias,
                    albert_layer.attention.value.bias,
                ]
            )
        )

        # Out proj layer
        self.out_proj_weight = albert_layer.attention.dense.weight
        self.out_proj_bias = albert_layer.attention.dense.bias

        # Linear layer 1
        self.linear1_weight = albert_layer.ffn.weight
        self.linear1_bias = albert_layer.ffn.bias

        # Linear layer 2
        self.linear2_weight = albert_layer.ffn_output.weight
        self.linear2_bias = albert_layer.ffn_output.bias

        # Layer norm 1
        self.norm1_eps = albert_layer.attention.LayerNorm.eps
        self.norm1_weight = albert_layer.attention.LayerNorm.weight
        self.norm1_bias = albert_layer.attention.LayerNorm.bias

        # Layer norm 2
        self.norm2_eps = albert_layer.full_layer_layer_norm.eps
        self.norm2_weight = albert_layer.full_layer_layer_norm.weight
        self.norm2_bias = albert_layer.full_layer_layer_norm.bias

        # Model hyper parameters
        self.num_heads = albert_layer.attention.num_attention_heads
        self.embed_dim = albert_layer.attention.all_head_size

        # Last step: set the last layer to `False` -> this will be set to `True` when converting the model
        self.is_last_layer = False

        self.original_layers_mapping = {
            "in_proj_weight": ["attention.query.weight", "attention.key.weight", "attention.value.weight"],
            "in_proj_bias": ["attention.query.bias", "attention.key.bias", "attention.value.bias"],
            "out_proj_weight": "attention.dense.weight",
            "out_proj_bias": "attention.dense.bias",
            "linear1_weight": "ffn.weight",
            "linear1_bias": "ffn.bias",
            "linear2_weight": "ffn_output.weight",
            "linear2_bias": "ffn_output.bias",
            "norm1_eps": "attention.LayerNorm.eps",
            "norm1_weight": "attention.LayerNorm.weight",
            "norm1_bias": "attention.LayerNorm.bias",
            "norm2_eps": "full_layer_layer_norm.eps",
            "norm2_weight": "full_layer_layer_norm.weight",
            "norm2_bias": "full_layer_layer_norm.bias",
        }
        self.attention_head_size = config.hidden_size // config.num_attention_heads
        self.attention_probs_dropout_prob = config.attention_probs_dropout_prob
        self.hidden_dropout_prob = config.hidden_dropout_prob
        self.act_fn_callable = ACT2FN[self.act_fn]

        self.validate_bettertransformer()