in models/base.py [0:0]
def get_ood_scores(model: BaseModel, images, metric, adjustments):
logits, features = model(images, return_features=True)
in_logits, ood_logits = model.parse_logits(logits, features, metric, logits.shape[0])[:2]
if metric.startswith('ada_'):
ood_logits = model.parse_ada_ood_logits(ood_logits, metric, project=False)
prior = F.softmax(adjustments, dim=1)
posterior = F.softmax(in_logits, dim=1)
out_adjust = (posterior / prior).mean(dim=1).log()
# 1.0 for bin_disc, 0.1 for msp, 1.0 for energy, 0.02 for pascl, 0.01 for maha
scale_dict = {'ada_msp': 0.1, 'ada_energy': 1.0, 'ada_bin_disc': 1.0, 'ada_maha': 0.01, 'ada_gradnorm': 10}
ood_logits += out_adjust * scale_dict[metric]
scores = - ood_logits
else:
prior = F.softmax(adjustments, dim=1)
posterior = F.softmax(in_logits, dim=1)
if metric == 'msp':
# The larger MSP, the smaller uncertainty
scores = - F.softmax(logits, dim=1).max(dim=1).values
elif metric == 'energy':
# The larger energy, the smaller uncertainty
tau = 1.
scores = - tau * torch.logsumexp(logits / tau, dim=1)
elif metric == 'bkg_c':
# The larger softmax background-class prob, the larger uncertainty
scores = F.softmax(ood_logits, dim=1)[:, -1]
elif metric == 'bin_disc':
# The larger sigmoid prob, the smaller uncertainty
scores = 1. - ood_logits.squeeze(1).sigmoid()
elif metric == 'mc_disc':
# The larger softmax prob, the smaller uncertainty
scores = F.softmax(ood_logits, dim=1)[:, 1]
elif metric == 'rp_msp':
# The larger MSP, the smaller uncertainty
scores = - (F.softmax(logits, dim=1) - .01 * F.softmax(adjustments, dim=1)).max(dim=1).values
elif metric == 'rp_gradnorm':
# The larger GradNorm, the smaller uncertainty
prior = F.softmax(adjustments, dim=1)
scores = [model.calc_gradnorm_per_sample(feat, targets=prior) for feat in features]
scores = - torch.tensor(scores)
elif metric == 'gradnorm':
# The larger GradNorm, the smaller uncertainty
scores = [model.calc_gradnorm_per_sample(feat) for feat in features]
scores = - torch.tensor(scores)
elif metric == 'rw_energy':
# The larger energy, the smaller uncertainty
tau = 1.
prior = F.softmax(adjustments, dim=1)
posterior = F.softmax(logits, dim=1)
rweight = 1. - (posterior * prior).sum(dim=1) / (posterior.norm(2, dim=1) * prior.norm(2, dim=1))
scores = - tau * torch.logsumexp(logits / tau, dim=1) * rweight
elif metric == 'maha':
scores = - ood_logits[:, 0] # already calculated
else:
raise NotImplementedError('OOD inference metric: ', metric)
return in_logits, scores