def __init__()

in iep/models/baselines.py [0:0]


  def __init__(self, vocab,
               rnn_wordvec_dim=300, rnn_dim=256, rnn_num_layers=2, rnn_dropout=0,
               cnn_feat_dim=(1024,14,14),
               cnn_res_block_dim=128, cnn_num_res_blocks=0,
               cnn_proj_dim=512, cnn_pooling='maxpool2',
               fc_dims=(1024,), fc_use_batchnorm=False, fc_dropout=0):
    super(CnnLstmModel, self).__init__()
    rnn_kwargs = {
      'token_to_idx': vocab['question_token_to_idx'],
      'wordvec_dim': rnn_wordvec_dim,
      'rnn_dim': rnn_dim,
      'rnn_num_layers': rnn_num_layers,
      'rnn_dropout': rnn_dropout,
    }
    self.rnn = LstmEncoder(**rnn_kwargs)

    cnn_kwargs = {
      'feat_dim': cnn_feat_dim,
      'res_block_dim': cnn_res_block_dim,
      'num_res_blocks': cnn_num_res_blocks,
      'proj_dim': cnn_proj_dim,
      'pooling': cnn_pooling,
    }
    self.cnn, (C, H, W) = build_cnn(**cnn_kwargs)

    classifier_kwargs = {
      'input_dim': C * H * W + rnn_dim,
      'hidden_dims': fc_dims,
      'output_dim': len(vocab['answer_token_to_idx']),
      'use_batchnorm': fc_use_batchnorm,
      'dropout': fc_dropout,
    }
    self.classifier = build_mlp(**classifier_kwargs)