in distributed_training/util/inference_utils.py [0:0]
def plot_roc_curve_multiclass(y_true_ohe, y_score, num_classes, color_table, skip_legend=5, is_single_fig=False):
"""
Plot ROC curve to multi-class
"""
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(num_classes):
fpr[i], tpr[i], _ = roc_curve(y_true_ohe[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_true_ohe.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(num_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(num_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= num_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
colors = cycle(color_table)
fig, ax = plt.subplots(figsize=(8,8))
ax.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.5f})'.format(roc_auc["micro"]),
color='deeppink', linewidth=3)
ax.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.5f})'.format(roc_auc["macro"]),
color='navy', linewidth=3)
for i, color in zip(range(num_classes), colors):
if i % skip_legend == 0:
label='ROC curve of class {0} (area = {1:0.4f})'.format(i, roc_auc[i])
else:
label=None
ax.plot(fpr[i], tpr[i], color=color, label=label, lw=2, alpha=0.3, linestyle=':')
ax.grid(alpha=.4)
ax.plot([0, 1], [0, 1], 'k--', lw=2)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic to multi-class')
ax.legend(loc="lower right", prop={'size':10})
if is_single_fig:
plt.show()