baseline_model/data_utils/train_sim.py [52:74]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def pk_sim(h, p, k):
    h: torch.Tensor  # (p * k, dim)

    X = h.repeat_interleave(p * k, dim=0)
    Y = h.repeat(p * k, 1)
    sim = torch.cosine_similarity(X, Y)
    sim = sim.view(p * k, p * k)

    class_ids = torch.arange(p).repeat_interleave(k)
    inds = torch.triu_indices(p * k, p * k, offset=1)
    sim = sim[inds[0], inds[1]]
    positive = (class_ids[inds[0]] == class_ids[inds[1]]).to(device=h.device)
    s_p = sim[positive]
    s_n = sim[~positive]

    return s_p, s_n

def iterations(args, epoch, model, criterion, optimizer, data_iter, num_iters, training, device):
    model.train(training)
    total_loss = 0

    with tqdm.trange(num_iters) as progress:
        for _ in progress:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



baseline_model/data_utils/train_vul.py [139:163]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def pk_sim(h, p, k):
    h: torch.Tensor  # (p * k, dim)

    X = h.repeat_interleave(p * k, dim=0)
    Y = h.repeat(p * k, 1)
    sim = torch.cosine_similarity(X, Y)
    sim = sim.view(p * k, p * k)

    class_ids = torch.arange(p).repeat_interleave(k)
    inds = torch.triu_indices(p * k, p * k, offset=1)
    sim = sim[inds[0], inds[1]]
    positive = (class_ids[inds[0]] == class_ids[inds[1]]).to(device=h.device)
    s_p = sim[positive]
    s_n = sim[~positive]

    return s_p, s_n


def iterations(args, epoch, model, criterion, optimizer, data_iter, num_iters, training, device):
    model.train(training)

    total_loss = 0

    with tqdm.trange(num_iters) as progress:
        for _ in progress:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



