trainers/eval_one_dim_subspaces.py [61:107]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            model_0_output, model_0_feats = model_0(data)
            ensemble_pred = (model_0_output + output).argmax(
                dim=1, keepdim=True
            )
            ensemble_correct += (
                ensemble_pred.eq(target.view_as(pred)).sum().item()
            )

            m0_pred = model_0_output.argmax(dim=1, keepdim=True)
            m0_correct += m0_pred.eq(target.view_as(pred)).sum().item()

            model_t_prob = nn.functional.softmax(output, dim=1)
            model_0_prob = nn.functional.softmax(model_0_output, dim=1)
            tv_dist += 0.5 * (model_0_prob - model_t_prob).abs().sum().item()

            feat_cosim += (
                torch.nn.functional.cosine_similarity(
                    feats, model_0_feats, dim=1
                )
                .pow(2)
                .sum()
                .item()
            )

    test_loss /= len(val_loader)
    test_acc = float(correct) / len(val_loader.dataset)
    m0_acc = float(m0_correct) / len(val_loader.dataset)
    tv_dist /= len(val_loader.dataset)
    feat_cosim /= len(val_loader.dataset)
    ensemble_acc = float(ensemble_correct) / len(val_loader.dataset)

    print(
        f"\nTest set: Average loss: {test_loss:.4f}, Accuracy: ({test_acc:.4f})\n"
    )

    if args.save:
        writer.add_scalar(f"test/loss", test_loss, epoch)
        writer.add_scalar(f"test/acc", test_acc, epoch)

    metrics = {
        "tvdist": tv_dist,
        "ensemble_acc": ensemble_acc,
        "feat_cossim": feat_cosim,
        "m0_acc": m0_acc,
    }

    return test_acc, metrics
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



trainers/linestats_swa.py [96:142]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            model_0_output, model_0_feats = model_0(data)
            ensemble_pred = (model_0_output + output).argmax(
                dim=1, keepdim=True
            )
            ensemble_correct += (
                ensemble_pred.eq(target.view_as(pred)).sum().item()
            )

            m0_pred = model_0_output.argmax(dim=1, keepdim=True)
            m0_correct += m0_pred.eq(target.view_as(pred)).sum().item()

            model_t_prob = nn.functional.softmax(output, dim=1)
            model_0_prob = nn.functional.softmax(model_0_output, dim=1)
            tv_dist += 0.5 * (model_0_prob - model_t_prob).abs().sum().item()

            feat_cosim += (
                torch.nn.functional.cosine_similarity(
                    feats, model_0_feats, dim=1
                )
                .pow(2)
                .sum()
                .item()
            )

    test_loss /= len(val_loader)
    test_acc = float(correct) / len(val_loader.dataset)
    m0_acc = float(m0_correct) / len(val_loader.dataset)
    tv_dist /= len(val_loader.dataset)
    feat_cosim /= len(val_loader.dataset)
    ensemble_acc = float(ensemble_correct) / len(val_loader.dataset)

    print(
        f"\nTest set: Average loss: {test_loss:.4f}, Accuracy: ({test_acc:.4f})\n"
    )

    if args.save:
        writer.add_scalar(f"test/loss", test_loss, epoch)
        writer.add_scalar(f"test/acc", test_acc, epoch)

    metrics = {
        "tvdist": tv_dist,
        "ensemble_acc": ensemble_acc,
        "feat_cossim": feat_cosim,
        "m0_acc": m0_acc,
    }

    return test_acc, metrics
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



