codes/rnn_training/train_nli_ray.py [145:229]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    train_acc = round(100 * correct / total_samples, 2)
    print("results : epoch {0} ; mean accuracy train : {1}".format(epoch, train_acc))
    # ex.log_metric('train_accuracy', train_acc, step=epoch)
    return train_acc


def evaluate(
    nli_net,
    valid_iter,
    optimizer,
    epoch,
    train_config,
    params,
    eval_type="valid",
    test_folder=None,
    inv_label=None,
    itos_vocab=None,
    final_eval=False,
):
    nli_net.eval()
    correct = 0.0
    test_prediction = []
    s1 = []
    s2 = []
    target = []

    if eval_type == "valid":
        print("\nVALIDATION : Epoch {0}".format(epoch))
    total_samples = 0
    for i, batch in enumerate(valid_iter):
        # prepare batch
        s1_batch, s1_len = batch.Sentence1
        s2_batch, s2_len = batch.Sentence2
        s1_batch, s2_batch = (
            Variable(s1_batch.to(device)),
            Variable(s2_batch.to(device)),
        )
        tgt_batch = batch.Label.to(device)
        total_samples += s1_batch.size(1)

        # model forward
        output, (s1_out, s2_out) = nli_net((s1_batch, s1_len), (s2_batch, s2_len))

        pred = output.data.max(1)[1]
        correct += pred.long().eq(tgt_batch.data.long()).cpu().sum().item()

        if eval_type == "test":
            for b_index in range(len(batch)):
                test_prediction = inv_label[pred[b_index].item()]
                s1 = "".join(
                    [
                        itos_vocab[idx.item()]
                        for idx in batch.Sentence1[0][
                            : batch.Sentence1[1][b_index], b_index
                        ]
                    ]
                ).replace("Ġ", " ")
                s2 = "".join(
                    [
                        itos_vocab[idx.item()]
                        for idx in batch.Sentence2[0][
                            : batch.Sentence2[1][b_index], b_index
                        ]
                    ]
                ).replace("Ġ", " ")
                target = inv_label[batch.Label[b_index]]
                res_file = os.path.join(test_folder, "samples.txt")
                lock = FileLock(os.path.join(test_folder, "samples.txt.new.lock"))
                with lock:
                    with open(res_file, "a") as f:
                        f.write(
                            "S1: "
                            + s1
                            + "\n"
                            + "S2: "
                            + s2
                            + "\n"
                            + "Target: "
                            + target
                            + "\n"
                            + "Predicted: "
                            + test_prediction
                            + "\n\n"
                        )
                    lock.release()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



codes/rnn_training/train_nli_w2v.py [144:228]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    train_acc = round(100 * correct / total_samples, 2)
    print("results : epoch {0} ; mean accuracy train : {1}".format(epoch, train_acc))
    # ex.log_metric('train_accuracy', train_acc, step=epoch)
    return train_acc


def evaluate(
    nli_net,
    valid_iter,
    optimizer,
    epoch,
    train_config,
    params,
    eval_type="valid",
    test_folder=None,
    inv_label=None,
    itos_vocab=None,
    final_eval=False,
):
    nli_net.eval()
    correct = 0.0
    test_prediction = []
    s1 = []
    s2 = []
    target = []

    if eval_type == "valid":
        print("\nVALIDATION : Epoch {0}".format(epoch))
    total_samples = 0
    for i, batch in enumerate(valid_iter):
        # prepare batch
        s1_batch, s1_len = batch.Sentence1
        s2_batch, s2_len = batch.Sentence2
        s1_batch, s2_batch = (
            Variable(s1_batch.to(device)),
            Variable(s2_batch.to(device)),
        )
        tgt_batch = batch.Label.to(device)
        total_samples += s1_batch.size(1)

        # model forward
        output, (s1_out, s2_out) = nli_net((s1_batch, s1_len), (s2_batch, s2_len))

        pred = output.data.max(1)[1]
        correct += pred.long().eq(tgt_batch.data.long()).cpu().sum().item()

        if eval_type == "test":
            for b_index in range(len(batch)):
                test_prediction = inv_label[pred[b_index].item()]
                s1 = "".join(
                    [
                        itos_vocab[idx.item()]
                        for idx in batch.Sentence1[0][
                            : batch.Sentence1[1][b_index], b_index
                        ]
                    ]
                ).replace("Ġ", " ")
                s2 = "".join(
                    [
                        itos_vocab[idx.item()]
                        for idx in batch.Sentence2[0][
                            : batch.Sentence2[1][b_index], b_index
                        ]
                    ]
                ).replace("Ġ", " ")
                target = inv_label[batch.Label[b_index]]
                res_file = os.path.join(test_folder, "samples.txt")
                lock = FileLock(os.path.join(test_folder, "samples.txt.new.lock"))
                with lock:
                    with open(res_file, "a") as f:
                        f.write(
                            "S1: "
                            + s1
                            + "\n"
                            + "S2: "
                            + s2
                            + "\n"
                            + "Target: "
                            + target
                            + "\n"
                            + "Predicted: "
                            + test_prediction
                            + "\n\n"
                        )
                    lock.release()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



