def __init__()

in Model.py [0:0]


    def __init__(self, conf, problem, vocab_info, use_gpu):
        """

        Args:
            inputs: ['string1', 'string2']
            layer_archs:  The layers must produce tensors with similar shapes. The layers may be nested.
                [
                    {
                    'layer': Layer name,
                    'conf': {xxxx}
                    },
                    [
                        {
                        'layer': Layer name,
                        'conf': {},
                        },
                        {
                        'layer': Layer name,
                        'conf': {},
                        }
                    ]
                ]
            vocab_info:
                {
                    'word':  {
                        'vocab_size': xxx,
                        'init_weights': np matrix
                        }
                    'postag': {
                        'vocab_size': xxx,
                        'init_weights': None
                        }
                }
        """
        super(Model, self).__init__()

        inputs = conf.object_inputs_names
        layer_archs = conf.architecture
        target_num = problem.output_target_num()

        # correct the real fixed length if begin/end of sentence are added
        if conf.fixed_lengths:
            fixed_lengths_corrected = copy.deepcopy(conf.fixed_lengths)
            for seq in fixed_lengths_corrected:
                if problem.with_bos_eos:
                    fixed_lengths_corrected[seq] += 2
        else:
            fixed_lengths_corrected = None

        self.use_gpu = use_gpu

        all_layer_configs = dict()
        self.layers = nn.ModuleDict()
        self.layer_inputs = dict()
        self.layer_dependencies = dict()
        self.layer_dependencies[EMBED_LAYER_ID] = set()
        # change output_layer_id to list for support multi_output
        self.output_layer_id = []

        for layer_index, layer_arch in enumerate(layer_archs):
            output_layer_flag = True if 'output_layer_flag' in layer_arch and layer_arch['output_layer_flag'] is True else False
            succeed_embedding_flag = True if layer_index > 0 and 'inputs' in layer_arch and \
                    [input in inputs for input in layer_arch['inputs']].count(True) == len(layer_arch['inputs']) else False

            if output_layer_flag:
                self.output_layer_id.append(layer_arch['layer_id'])
                # if hasattr(self, 'output_layer_id'):
                #     raise ConfigurationError("There should be only one output!")
                # else:
                #     self.output_layer_id = layer_arch['layer_id']

            if layer_index == 0:
                # embedding layer
                emb_conf = copy.deepcopy(vocab_info)
                for input_cluster in emb_conf:
                    emb_conf[input_cluster]['dim'] = layer_arch['conf'][input_cluster]['dim']
                    emb_conf[input_cluster]['fix_weight'] = layer_arch['conf'][input_cluster].get('fix_weight', False)
                emb_conf['weight_on_gpu'] = layer_arch.get('weight_on_gpu', True)

                all_layer_configs[EMBED_LAYER_ID] = get_conf(EMBED_LAYER_ID, layer_arch['layer'],
                    None, all_layer_configs, inputs, self.use_gpu, conf_dict={'conf': emb_conf},
                    shared_conf=None, succeed_embedding_flag=False, output_layer_flag=output_layer_flag,
                    target_num=target_num, fixed_lengths=fixed_lengths_corrected, target_dict=problem.output_dict)
                self.add_layer(EMBED_LAYER_ID, get_layer(layer_arch['layer'], all_layer_configs[EMBED_LAYER_ID]))
            else:
                if layer_arch['layer'] in self.layers and not 'conf' in layer_arch:
                    # reuse formly defined layers (share the same parameters)
                    logging.debug("Layer id: %s; Sharing configuration with layer %s" % (layer_arch['layer_id'], layer_arch['layer']))
                    conf_dict = None
                    shared_conf = all_layer_configs[layer_arch['layer']]
                else:
                    conf_dict = layer_arch['conf']
                    shared_conf = None

                # if the layer is EncoderDecoder, inference the vocab size
                if layer_arch['layer'] == 'EncoderDecoder':
                        layer_arch['conf']['decoder_conf']['decoder_vocab_size'] = target_num
                all_layer_configs[layer_arch['layer_id']] = get_conf(layer_arch['layer_id'], layer_arch['layer'],
                    layer_arch['inputs'], all_layer_configs, inputs, self.use_gpu, conf_dict=conf_dict,
                    shared_conf=shared_conf, succeed_embedding_flag=succeed_embedding_flag,
                    output_layer_flag=output_layer_flag, target_num=target_num,
                    fixed_lengths=fixed_lengths_corrected, target_dict=problem.output_dict)

                if layer_arch['layer'] in self.layers and not 'conf' in layer_arch:
                    self.add_layer(layer_arch['layer_id'], self.layers[layer_arch['layer']])
                else:
                    self.add_layer(layer_arch['layer_id'], get_layer(layer_arch['layer'], all_layer_configs[layer_arch['layer_id']]))

                self.layer_inputs[layer_arch['layer_id']] = layer_arch['inputs']

                # register dependencies, except embeddings
                cur_layer_depend = set()
                for layer_depend_id in layer_arch['inputs']:
                    if not layer_depend_id in inputs:
                        cur_layer_depend.add(layer_depend_id)
                self.add_dependency(layer_arch['layer_id'], cur_layer_depend)

        logging.debug("Layer dependencies: %s" % repr(self.layer_dependencies))

        if not hasattr(self, 'output_layer_id'):
            raise ConfigurationError("Please define an output layer")

        self.layer_topological_sequence = self.get_topological_sequence()