sockeye/train.py [514:605]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        model_size=decoder_transformer_model_size,
        attention_heads=args.transformer_attention_heads[1],
        feed_forward_num_hidden=args.transformer_feed_forward_num_hidden[1],
        act_type=args.transformer_activation_type[1],
        num_layers=decoder_num_layers,
        dropout_attention=args.transformer_dropout_attention[1],
        dropout_act=args.transformer_dropout_act[1],
        dropout_prepost=args.transformer_dropout_prepost[1],
        positional_embedding_type=args.transformer_positional_embedding_type,
        preprocess_sequence=decoder_transformer_preprocess,
        postprocess_sequence=decoder_transformer_postprocess,
        max_seq_len_source=max_seq_len_source,
        max_seq_len_target=max_seq_len_target,
        use_lhuc=args.lhuc is not None and (C.LHUC_DECODER in args.lhuc or C.LHUC_ALL in args.lhuc),
        depth_key_value=encoder_num_hidden,
        decoder_type=args.decoder,
        use_glu=args.transformer_feed_forward_use_glu)

    return config_decoder


def get_num_embed(args: argparse.Namespace) -> Tuple[int, int]:
    num_embed_source, num_embed_target = args.num_embed

    transformer_model_size_source = args.transformer_model_size[0]
    if not num_embed_source:
        logger.info("Source embedding size was not set it will automatically be adjusted to match the "
                    "Transformer source model size (%d).", transformer_model_size_source)
        num_embed_source = transformer_model_size_source
    else:
        check_condition(args.transformer_model_size[0] == num_embed_source,
                        "Source embedding size must match transformer model size: %s vs. %s"
                        % (args.transformer_model_size[0], num_embed_source))

    total_source_factor_size = 0
    for factor_combine, factor_size in zip(args.source_factors_combine, args.source_factors_num_embed):
        if factor_combine == C.FACTORS_COMBINE_CONCAT:
            total_source_factor_size += factor_size
    if total_source_factor_size > 0:
        adjusted_transformer_encoder_model_size = num_embed_source + total_source_factor_size
        check_condition(adjusted_transformer_encoder_model_size % 2 == 0 and
                        adjusted_transformer_encoder_model_size % args.transformer_attention_heads[0] == 0,
                        "Sum of source factor sizes, i.e. num-embed plus source-factors-num-embed, (%d) "
                        "has to be even and a multiple of encoder attention heads (%d)" % (
                            adjusted_transformer_encoder_model_size, args.transformer_attention_heads[0]))

    if not num_embed_source:
        num_embed_source = C.DEFAULT_NUM_EMBED

    transformer_model_size_target = args.transformer_model_size[1]
    total_target_factor_size = 0
    for factor_combine, factor_size in zip(args.target_factors_combine, args.target_factors_num_embed):
        if factor_combine == C.FACTORS_COMBINE_CONCAT:
            total_target_factor_size += factor_size

    if not num_embed_target:
        logger.info("Target embedding size was not set it will automatically be adjusted to match the "
                    "Transformer target model size (%d).", transformer_model_size_target)
        num_embed_target = transformer_model_size_target
    else:
        # Make sure that if the user sets num_embed it matches the Transformer model size
        check_condition(args.transformer_model_size[1] == num_embed_target + total_target_factor_size,
                        "Target embedding size must match transformer model size: %s vs. %s"
                        % (args.transformer_model_size[1], num_embed_target + total_target_factor_size))

    if total_target_factor_size > 0:
        adjusted_transformer_decoder_model_size = num_embed_target + total_target_factor_size
        check_condition(adjusted_transformer_decoder_model_size % 2 == 0 and
                        adjusted_transformer_decoder_model_size % args.transformer_attention_heads[0] == 0,
                        "Sum of target factor sizes, i.e. num-embed plus target-factors-num-embed, (%d) "
                        "has to be even and a multiple of encoder attention heads (%d)" % (
                            adjusted_transformer_decoder_model_size, args.transformer_attention_heads[0]))
        # Whenever an input embedding weight is used for the output layer, we cannot use
        # 'concatenation' as the method of combining target factors to the regular target input embedding:
        # num_embed_target + factor_sizes = transformer_model_size
        # output layer input: transformer_model_size, its parameters are however of size num_embed_target
        check_condition(C.WEIGHT_TYING_SOFTMAX not in args.weight_tying_type,
                        "Cannot use weight tying of target input and output embeddings when target factors "
                        "are defined and to be combined via 'concat'. Use 'sum' instead or disable "
                        "weight tying")

    if not num_embed_target:
        num_embed_target = C.DEFAULT_NUM_EMBED

    return num_embed_source, num_embed_target


def create_model_config(args: argparse.Namespace,
                        source_vocab_sizes: List[int],
                        target_vocab_sizes: List[int],
                        max_seq_len_source: int,
                        max_seq_len_target: int,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



sockeye/train_pt.py [491:582]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        model_size=decoder_transformer_model_size,
        attention_heads=args.transformer_attention_heads[1],
        feed_forward_num_hidden=args.transformer_feed_forward_num_hidden[1],
        act_type=args.transformer_activation_type[1],
        num_layers=decoder_num_layers,
        dropout_attention=args.transformer_dropout_attention[1],
        dropout_act=args.transformer_dropout_act[1],
        dropout_prepost=args.transformer_dropout_prepost[1],
        positional_embedding_type=args.transformer_positional_embedding_type,
        preprocess_sequence=decoder_transformer_preprocess,
        postprocess_sequence=decoder_transformer_postprocess,
        max_seq_len_source=max_seq_len_source,
        max_seq_len_target=max_seq_len_target,
        use_lhuc=args.lhuc is not None and (C.LHUC_DECODER in args.lhuc or C.LHUC_ALL in args.lhuc),
        depth_key_value=encoder_num_hidden,
        decoder_type=args.decoder,
        use_glu=args.transformer_feed_forward_use_glu)

    return config_decoder


def get_num_embed(args: argparse.Namespace) -> Tuple[int, int]:
    num_embed_source, num_embed_target = args.num_embed

    transformer_model_size_source = args.transformer_model_size[0]
    if not num_embed_source:
        logger.info("Source embedding size was not set it will automatically be adjusted to match the "
                    "Transformer source model size (%d).", transformer_model_size_source)
        num_embed_source = transformer_model_size_source
    else:
        check_condition(args.transformer_model_size[0] == num_embed_source,
                        "Source embedding size must match transformer model size: %s vs. %s"
                        % (args.transformer_model_size[0], num_embed_source))

    total_source_factor_size = 0
    for factor_combine, factor_size in zip(args.source_factors_combine, args.source_factors_num_embed):
        if factor_combine == C.FACTORS_COMBINE_CONCAT:
            total_source_factor_size += factor_size
    if total_source_factor_size > 0:
        adjusted_transformer_encoder_model_size = num_embed_source + total_source_factor_size
        check_condition(adjusted_transformer_encoder_model_size % 2 == 0 and
                        adjusted_transformer_encoder_model_size % args.transformer_attention_heads[0] == 0,
                        "Sum of source factor sizes, i.e. num-embed plus source-factors-num-embed, (%d) "
                        "has to be even and a multiple of encoder attention heads (%d)" % (
                            adjusted_transformer_encoder_model_size, args.transformer_attention_heads[0]))

    if not num_embed_source:
        num_embed_source = C.DEFAULT_NUM_EMBED

    transformer_model_size_target = args.transformer_model_size[1]
    total_target_factor_size = 0
    for factor_combine, factor_size in zip(args.target_factors_combine, args.target_factors_num_embed):
        if factor_combine == C.FACTORS_COMBINE_CONCAT:
            total_target_factor_size += factor_size

    if not num_embed_target:
        logger.info("Target embedding size was not set it will automatically be adjusted to match the "
                    "Transformer target model size (%d).", transformer_model_size_target)
        num_embed_target = transformer_model_size_target
    else:
        # Make sure that if the user sets num_embed it matches the Transformer model size
        check_condition(args.transformer_model_size[1] == num_embed_target + total_target_factor_size,
                        "Target embedding size must match transformer model size: %s vs. %s"
                        % (args.transformer_model_size[1], num_embed_target + total_target_factor_size))

    if total_target_factor_size > 0:
        adjusted_transformer_decoder_model_size = num_embed_target + total_target_factor_size
        check_condition(adjusted_transformer_decoder_model_size % 2 == 0 and
                        adjusted_transformer_decoder_model_size % args.transformer_attention_heads[0] == 0,
                        "Sum of target factor sizes, i.e. num-embed plus target-factors-num-embed, (%d) "
                        "has to be even and a multiple of encoder attention heads (%d)" % (
                            adjusted_transformer_decoder_model_size, args.transformer_attention_heads[0]))
        # Whenever an input embedding weight is used for the output layer, we cannot use
        # 'concatenation' as the method of combining target factors to the regular target input embedding:
        # num_embed_target + factor_sizes = transformer_model_size
        # output layer input: transformer_model_size, its parameters are however of size num_embed_target
        check_condition(C.WEIGHT_TYING_SOFTMAX not in args.weight_tying_type,
                        "Cannot use weight tying of target input and output embeddings when target factors "
                        "are defined and to be combined via 'concat'. Use 'sum' instead or disable "
                        "weight tying")

    if not num_embed_target:
        num_embed_target = C.DEFAULT_NUM_EMBED

    return num_embed_source, num_embed_target


def create_model_config(args: argparse.Namespace,
                        source_vocab_sizes: List[int],
                        target_vocab_sizes: List[int],
                        max_seq_len_source: int,
                        max_seq_len_target: int,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



