src/common/loss.py (150 lines of code) (raw):
#!/usr/bin/env python
# encoding: utf-8
'''
*Copyright (c) 2023, Alibaba Group;
*Licensed under the Apache License, Version 2.0 (the "License");
*you may not use this file except in compliance with the License.
*You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*Unless required by applicable law or agreed to in writing, software
*distributed under the License is distributed on an "AS IS" BASIS,
*WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*See the License for the specific language governing permissions and
*limitations under the License.
@author: Hey
@email: sanyuan.**@**.com
@tel: 137****6540
@datetime: 2022/11/26 21:05
@project: DeepProtFunc
@file: loss
@desc: some loss functions
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
'''
Focal loss
'''
def __init__(self, alpha=1, gamma=2, normalization=False, reduce=False):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.normalization = normalization
self.reduce = reduce
def forward(self, inputs, targets):
if self.normalization:
'''
reduction: the operation on the output loss, which can be set to 'none', 'mean', and 'sum'; 'none'will not perform any processing on the loss, 'mean' will calculate the mean of the loss, 'sum' will sum the loss, and the default is 'mean'
'''
bce = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
probs = torch.sigmoid(inputs)
else:
bce = F.binary_cross_entropy(inputs, targets, reduction='none')
probs = inputs
pt = targets * probs + (1 - targets) * (1 - probs)
modulate = 1 if self.gamma is None else (1 - pt) ** self.gamma
focal_loss = modulate * bce
if self.alpha is not None:
assert 0 <= self.alpha <= 1
alpha_weights = targets * self.alpha + (1 - targets) * (1 - self.alpha)
focal_loss *= alpha_weights
if self.reduce:
# global mean
return torch.mean(focal_loss)
else:
# sum of all samples and calc the mean value
return torch.mean(torch.sum(focal_loss, dim=1))
class MultiLabel_CCE(nn.Module):
'''
multi label cce
'''
def __init__(self, normalization=False):
super(MultiLabel_CCE, self).__init__()
self.normalization = normalization
def forward(self, inputs, targets):
"""
Cross entropy of multi-label classification
Note:The shapes of y_true and y_pred are consistent, and the elements of y_true are either 0 or 1. 1 indicates that the corresponding class is a target class, and 0 indicates that the corresponding class is a non-target class.
"""
if self.normalization:
y_pred = nn.Sigmoid()(inputs)
else:
y_pred = inputs
y_true = targets
y_pred = (1 - 2 * y_true) * y_pred
y_pred_neg = y_pred - y_true * 1e12
y_pred_pos = y_pred - (1 - y_true) * 1e12
zeros = torch.zeros_like(y_pred[..., :1])
y_pred_neg = torch.cat((y_pred_neg, zeros), axis=-1)
y_pred_pos = torch.cat((y_pred_pos, zeros), axis=-1)
neg_loss = torch.logsumexp(y_pred_neg, axis=-1)
pos_loss = torch.logsumexp(y_pred_pos, axis=-1)
loss = torch.mean(neg_loss + pos_loss)
return loss
class AsymmetricLoss(nn.Module):
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=True):
super(AsymmetricLoss, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
# Calculating Probabilities
x_sigmoid = torch.sigmoid(x)
xs_pos = x_sigmoid
xs_neg = 1 - x_sigmoid
# Asymmetric Clipping
if self.clip is not None and self.clip > 0:
xs_neg = (xs_neg + self.clip).clamp(max=1)
# Basic CE calculation
los_pos = y * torch.log(xs_pos.clamp(min=self.eps))
los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps))
loss = los_pos + los_neg
# Asymmetric Focusing
if self.gamma_neg > 0 or self.gamma_pos > 0:
if self.disable_torch_grad_focal_loss:
torch.set_grad_enabled(False)
pt0 = xs_pos * y
pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p
pt = pt0 + pt1
one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y)
one_sided_w = torch.pow(1 - pt, one_sided_gamma)
if self.disable_torch_grad_focal_loss:
torch.set_grad_enabled(True)
loss *= one_sided_w
return -loss.sum()
class AsymmetricLossOptimized(nn.Module):
''' Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations'''
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False):
super(AsymmetricLossOptimized, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
# prevent memory allocation and gpu uploading every iteration, and encourages inplace operations
self.targets = self.anti_targets = self.xs_pos = self.xs_neg = self.asymmetric_w = self.loss = None
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
self.targets = y
self.anti_targets = 1 - y
# Calculating Probabilities
self.xs_pos = torch.sigmoid(x)
self.xs_neg = 1.0 - self.xs_pos
# Asymmetric Clipping
if self.clip is not None and self.clip > 0:
self.xs_neg.add_(self.clip).clamp_(max=1)
# Basic CE calculation
self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))
self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=self.eps)))
# Asymmetric Focusing
if self.gamma_neg > 0 or self.gamma_pos > 0:
if self.disable_torch_grad_focal_loss:
torch.set_grad_enabled(False)
self.xs_pos = self.xs_pos * self.targets
self.xs_neg = self.xs_neg * self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.anti_targets)
if self.disable_torch_grad_focal_loss:
torch.set_grad_enabled(True)
self.loss *= self.asymmetric_w
return -self.loss.sum()
class ASLSingleLabel(nn.Module):
'''
This loss is intended for single-label classification problems(multi-class)
'''
def __init__(self, gamma_pos=0, gamma_neg=4, eps: float = 0.1, reduction='mean'):
super(ASLSingleLabel, self).__init__()
self.eps = eps
self.logsoftmax = nn.LogSoftmax(dim=-1)
self.targets_classes = []
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.reduction = reduction
def forward(self, inputs, target):
'''
"input" dimensions: - (batch_size,number_classes)
"target" dimensions: - (batch_size)
'''
num_classes = inputs.size()[-1]
log_preds = self.logsoftmax(inputs)
self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1)
# ASL weights
targets = self.targets_classes
anti_targets = 1 - targets
xs_pos = torch.exp(log_preds)
xs_neg = 1 - xs_pos
xs_pos = xs_pos * targets
xs_neg = xs_neg * anti_targets
asymmetric_w = torch.pow(1 - xs_pos - xs_neg,
self.gamma_pos * targets + self.gamma_neg * anti_targets)
log_preds = log_preds * asymmetric_w
if self.eps > 0: # label smoothing
self.targets_classes = self.targets_classes.mul(1 - self.eps).add(self.eps / num_classes)
# loss calculation
loss = - self.targets_classes.mul(log_preds)
loss = loss.sum(dim=-1)
if self.reduction == 'mean':
loss = loss.mean()
return loss