def HyperEvaluate()

in codes/rnn_training/Non_transformers_probe.py [0:0]


def HyperEvaluate(config):
    print(config)
    parser = argparse.ArgumentParser()
    parser.add_argument('--node-ip-address=')#,192.168.2.19
    parser.add_argument('--node-manager-port=')
    parser.add_argument('--object-store-name=')
    parser.add_argument('--raylet-name=')#/tmp/ray/session_2020-07-15_12-00-45_292745_38156/sockets/raylet
    parser.add_argument('--redis-address=')#192.168.2.19:6379
    parser.add_argument('--config-list=',action='store_true')#
    parser.add_argument('--temp-dir=')#/tmp/ray
    parser.add_argument('--redis-password=')#5241590000000000
    #/////////NLI-Args//////////////
    parser = argparse.ArgumentParser(description='NLI training')
    # paths
    parser.add_argument("--nlipath", type=str, default=config['dataset'], help="NLI data (SNLI or MultiNLI)")
    parser.add_argument("--outputdir", type=str, default='savedir_van/', help="Output directory")
    parser.add_argument("--outputmodelname", type=str, default='model.pickle')
    parser.add_argument("--word_emb_path", type=str, default="dataset/GloVe/glove.840B.300d.txt", help="word embedding file path")

    # training
    parser.add_argument("--n_epochs", type=int, default=20)
    parser.add_argument("--batch_size", type=int, default=128)
    parser.add_argument("--dpout_model", type=float, default=0.2, help="encoder dropout")
    parser.add_argument("--dpout_fc", type=float, default=0.2, help="classifier dropout")
    parser.add_argument("--nonlinear_fc", type=float, default=0, help="use nonlinearity in fc")
    parser.add_argument("--optimizer", type=str, default="adam,lr=0.001", help="adam or sgd,lr=0.1")
    parser.add_argument("--lrshrink", type=float, default=5, help="shrink factor for sgd")
    parser.add_argument("--decay", type=float, default=0.99, help="lr decay")
    parser.add_argument("--minlr", type=float, default=1e-10, help="minimum lr")
    parser.add_argument("--max_norm", type=float, default=5., help="max norm (grad clipping)")

    # model
    parser.add_argument("--encoder_type", type=str, default=config['encoder_type'], help="see list of encoders")
    parser.add_argument("--enc_lstm_dim", type=int, default=200, help="encoder nhid dimension")#2048
    parser.add_argument("--n_enc_layers", type=int, default=1, help="encoder num layers")
    parser.add_argument("--fc_dim", type=int, default=200, help="nhid of fc layers")
    parser.add_argument("--n_classes", type=int, default=config['num_classes'], help="entailment/neutral/contradiction")
    parser.add_argument("--pool_type", type=str, default='max', help="max or mean")

    # gpu
    parser.add_argument("--gpu_id", type=int, default=3, help="GPU ID")
    parser.add_argument("--seed", type=int, default=config['seed'], help="seed")

    # data
    parser.add_argument("--word_emb_dim", type=int, default=300, help="word embedding dimension")
    parser.add_argument("--word_emb_type", type=str, default='normal', help="word embedding type, either glove or normal")

    # comet
    # parser.add_argument("--comet_apikey", type=str, default='', help="comet api key")
    # parser.add_argument("--comet_workspace", type=str, default='', help="comet workspace")
    # parser.add_argument("--comet_project", type=str, default='', help="comet project name")
    # parser.add_argument("--comet_disabled", action='store_true', help="if true, disable comet")

    params, _ = parser.parse_known_args()

    print('Came here')
    exp_folder = os.path.join(params.outputdir, params.nlipath, params.encoder_type,'exp_seed_{}'.format(params.seed))
    if not os.path.exists(exp_folder):
        os.makedirs(exp_folder)

    # set proper name
    save_folder_name = os.path.join(exp_folder, 'model')
    if not os.path.exists(save_folder_name):
        os.makedirs(save_folder_name)

    test_sample_folder = os.path.join(exp_folder,'samples_test')
    if not os.path.exists(test_sample_folder):
        os.makedirs(test_sample_folder)
    params.outputmodelname = os.path.join(save_folder_name, '{}_model.pkl'.format(params.encoder_type))
    # print parameters passed, and all parameters
    print('\ntogrep : {0}\n'.format(sys.argv[1:]))
    print(params)
    pr = vars(params)

    # ex = OfflineExperiment(
    #                 workspace=pr['comet_workspace'],
    #                 project_name=pr['comet_project'],
    #                 disabled=pr['comet_disabled'],
    #                 offline_directory= os.path.join(save_folder_name,'comet_runs'))
    #
    # ex.log_parameters(pr)
    # ex.set_name(pr['encoder_type'])

    """
    SEED
    """
#    np.random.seed(params.seed)
#    torch.manual_seed(params.seed)
    device = 'cpu'
    if torch.cuda.is_available():
        device = 'cuda'
    # torch.cuda.manual_seed(params.seed)
    weights = [2, 2, 2, 0.3, 7, 2, 6]
    word_vec = getEmbeddingWeights([_.split('\n')[0] for _ in open('utils/glove_'+params.nlipath+'_vocab.txt').readlines()], params.nlipath)
    n_words_map = {'snli': 25360, 'mnli':67814}
    config_nli_model = {
        'n_words'        :  n_words_map[params.nlipath]          ,
        'word_emb_dim'   :  params.word_emb_dim   ,
        'enc_lstm_dim'   :  params.enc_lstm_dim   ,
        'n_enc_layers'   :  params.n_enc_layers   ,
        'dpout_model'    :  params.dpout_model    ,
        'dpout_fc'       :  params.dpout_fc       ,
        'fc_dim'         :  params.fc_dim         ,
        'bsize'          :  params.batch_size     ,
        'n_classes'      :  params.n_classes      ,
        'pool_type'      :  params.pool_type      ,
        'nonlinear_fc'   :  params.nonlinear_fc   ,
        'encoder_type'   :  params.encoder_type   ,
        'use_cuda'       :  True                  ,

    }

    # model
    encoder_types = ['InferSent', 'BLSTMprojEncoder', 'BGRUlastEncoder',
                     'InnerAttentionMILAEncoder', 'InnerAttentionYANGEncoder',
                     'InnerAttentionNAACLEncoder', 'ConvNetEncoder', 'LSTMEncoder']
    assert params.encoder_type in encoder_types, "encoder_type must be in " + \
                                                 str(encoder_types)
    nli_net = NLINet(config_nli_model, weights=word_vec)

    dev_file = os.path.join('mnli_m_dev_exp','gen_mnli_rand_test.csv')
    samples_save_path = os.path.join('mnli_m_dev_exp',params.nlipath+'_m_dev_'+params.encoder_type+'_rand.jsonl')
    getScores(nli_net, dev = dev_file, dataset = params.nlipath, samples_file = samples_save_path)

    return 0