crop_yield_prediction/train_cross_location.py [35:129]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    n_batches = len(train_dataloader)
    sum_loss_dic = {}
    for loss_type in ['loss', 'loss_supervised', 'loss_unsupervised',
                      'l_n', 'l_d', 'l_nd', 'sn_loss', 'tn_loss', 'norm_loss']:
        sum_loss_dic[loss_type] = 0

    for batch_X, batch_y in train_dataloader:
        batch_X, batch_y = prep_data(batch_X, batch_y, cuda)

        # forward
        optimizer.zero_grad()

        emb_triplets, pred = model(batch_X, unsup_weight)

        loss_func = torch.nn.MSELoss()
        loss_supervised = loss_func(pred, batch_y)

        if unsup_weight != 0:
            loss_unsupervised, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss = triplet_loss(emb_triplets,
                                                                               tilenet_margin, tilenet_l2, tilenet_ltn)

            loss = (1 - unsup_weight) * loss_supervised + unsup_weight * loss_unsupervised
        else:
            loss = loss_supervised

        loss.backward()
        optimizer.step()

        # note keeping
        sum_loss_dic['loss'] += loss.item()
        sum_loss_dic['loss_supervised'] += loss_supervised.item()
        if unsup_weight != 0:
            sum_loss_dic['loss_unsupervised'] += loss_unsupervised.item()
            sum_loss_dic['l_n'] += l_n.item()
            sum_loss_dic['l_d'] += l_d.item()
            sum_loss_dic['l_nd'] += l_nd.item()
            sum_loss_dic['sn_loss'] += sn_loss.item()
            sum_loss_dic['tn_loss'] += tn_loss.item()
            if tilenet_l2 != 0:
                sum_loss_dic['norm_loss'] += norm_loss.item()

    avg_loss_dic = {}
    for loss_type in sum_loss_dic.keys():
        avg_loss_dic[loss_type] = sum_loss_dic[loss_type] / n_batches

    return avg_loss_dic


def cal_performance(prediction, y):
    rmse = np.around(sqrt(mean_squared_error(y, prediction)), 3)
    r2 = np.around(r2_score(y, prediction), 3)
    corr = tuple(map(lambda x: np.around(x, 3), pearsonr(y, prediction)))[0]

    return rmse, r2, corr


def triplet_loss(emb_triplets, margin, l2, ltn):
    dim = emb_triplets.shape[-1]
    z_a = emb_triplets[:, :, 0, :]
    z_tn = emb_triplets[:, :, 1, :]
    z_sn = emb_triplets[:, :, 2, :]
    z_d = emb_triplets[:, :, 3, :]

    # average over timesteps
    l_n = torch.mean(torch.sqrt(((z_a - z_sn) ** 2).sum(dim=2)), dim=1)
    l_d = - torch.mean(torch.sqrt(((z_a - z_d) ** 2).sum(dim=2)), dim=1)
    sn_loss = F.relu(l_n + l_d + margin)
    tn_loss = torch.mean(torch.sqrt(((z_a - z_tn) ** 2).sum(dim=2)), dim=1)

    # average by #samples in mini-batch
    l_n = torch.mean(l_n)
    l_d = torch.mean(l_d)
    l_nd = torch.mean(l_n + l_d)
    sn_loss = torch.mean(sn_loss)
    tn_loss = torch.mean(tn_loss)

    loss = (1 - ltn) * sn_loss + ltn * tn_loss

    norm_loss = 0
    if l2 != 0:
        z_a_norm = torch.sqrt((z_a ** 2).sum(dim=2))
        z_sn_norm = torch.sqrt((z_sn ** 2).sum(dim=2))
        z_d_norm = torch.sqrt((z_d ** 2).sum(dim=2))
        z_tn_norm = torch.sqrt((z_tn ** 2).sum(dim=2))
        norm_loss = torch.mean(z_a_norm + z_sn_norm + z_d_norm + z_tn_norm) / (dim ** 0.5)
        loss += l2 * norm_loss

    return loss, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss


def eval_epoch(model, validation_dataloader, tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight, cuda):
    ''' Epoch operation in evaluation phase '''

    model.eval()
    if cuda:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



crop_yield_prediction/train_semi_transformer.py [39:133]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    n_batches = len(train_dataloader)
    sum_loss_dic = {}
    for loss_type in ['loss', 'loss_supervised', 'loss_unsupervised',
                      'l_n', 'l_d', 'l_nd', 'sn_loss', 'tn_loss', 'norm_loss']:
        sum_loss_dic[loss_type] = 0

    for batch_X, batch_y in train_dataloader:
        batch_X, batch_y = prep_data(batch_X, batch_y, cuda)

        # forward
        optimizer.zero_grad()

        emb_triplets, pred = model(batch_X, unsup_weight)

        loss_func = torch.nn.MSELoss()
        loss_supervised = loss_func(pred, batch_y)

        if unsup_weight != 0:
            loss_unsupervised, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss = triplet_loss(emb_triplets,
                                                                               tilenet_margin, tilenet_l2, tilenet_ltn)

            loss = (1 - unsup_weight) * loss_supervised + unsup_weight * loss_unsupervised
        else:
            loss = loss_supervised

        loss.backward()
        optimizer.step()

        # note keeping
        sum_loss_dic['loss'] += loss.item()
        sum_loss_dic['loss_supervised'] += loss_supervised.item()
        if unsup_weight != 0:
            sum_loss_dic['loss_unsupervised'] += loss_unsupervised.item()
            sum_loss_dic['l_n'] += l_n.item()
            sum_loss_dic['l_d'] += l_d.item()
            sum_loss_dic['l_nd'] += l_nd.item()
            sum_loss_dic['sn_loss'] += sn_loss.item()
            sum_loss_dic['tn_loss'] += tn_loss.item()
            if tilenet_l2 != 0:
                sum_loss_dic['norm_loss'] += norm_loss.item()

    avg_loss_dic = {}
    for loss_type in sum_loss_dic.keys():
        avg_loss_dic[loss_type] = sum_loss_dic[loss_type] / n_batches

    return avg_loss_dic


def cal_performance(prediction, y):
    rmse = np.around(sqrt(mean_squared_error(y, prediction)), 3)
    r2 = np.around(r2_score(y, prediction), 3)
    corr = tuple(map(lambda x: np.around(x, 3), pearsonr(y, prediction)))[0]

    return rmse, r2, corr


def triplet_loss(emb_triplets, margin, l2, ltn):
    dim = emb_triplets.shape[-1]
    z_a = emb_triplets[:, :, 0, :]
    z_tn = emb_triplets[:, :, 1, :]
    z_sn = emb_triplets[:, :, 2, :]
    z_d = emb_triplets[:, :, 3, :]

    # average over timesteps
    l_n = torch.mean(torch.sqrt(((z_a - z_sn) ** 2).sum(dim=2)), dim=1)
    l_d = - torch.mean(torch.sqrt(((z_a - z_d) ** 2).sum(dim=2)), dim=1)
    sn_loss = F.relu(l_n + l_d + margin)
    tn_loss = torch.mean(torch.sqrt(((z_a - z_tn) ** 2).sum(dim=2)), dim=1)

    # average by #samples in mini-batch
    l_n = torch.mean(l_n)
    l_d = torch.mean(l_d)
    l_nd = torch.mean(l_n + l_d)
    sn_loss = torch.mean(sn_loss)
    tn_loss = torch.mean(tn_loss)

    loss = (1 - ltn) * sn_loss + ltn * tn_loss

    norm_loss = 0
    if l2 != 0:
        z_a_norm = torch.sqrt((z_a ** 2).sum(dim=2))
        z_sn_norm = torch.sqrt((z_sn ** 2).sum(dim=2))
        z_d_norm = torch.sqrt((z_d ** 2).sum(dim=2))
        z_tn_norm = torch.sqrt((z_tn ** 2).sum(dim=2))
        norm_loss = torch.mean(z_a_norm + z_sn_norm + z_d_norm + z_tn_norm) / (dim ** 0.5)
        loss += l2 * norm_loss

    return loss, l_n, l_d, l_nd, sn_loss, tn_loss, norm_loss


def eval_epoch(model, validation_dataloader, tilenet_margin, tilenet_l2, tilenet_ltn, unsup_weight, cuda):
    ''' Epoch operation in evaluation phase '''

    model.eval()
    if cuda:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



