in src/minmaxML.py [0:0]
def compute_model_errors(modelhat, X, y, t, errors, error_type, penalty='none', C=1.0):
"""
Computes the error of the round-specific model and puts the errors for each sample in column t of `errors` in place
"""
yhat = modelhat.predict(X).ravel() # Compute predictions for the newly trained model
# Compute the specified error type
if error_type == 'MSE':
errors[t, :] = np.power(y - yhat, 2)
elif error_type == '0/1 Loss': # Classification 0-1 Loss
errors[t, :] = (y != yhat)
elif error_type == 'FP': # FP rate
errors[t, :] = (y < yhat)
elif error_type == 'FN': # FN rate
errors[t, :] = (y > yhat)
elif error_type == 'Log-Loss': # Log loss, the convex surrogate used by logistic regression
errors[t, :] = compute_logloss(y, modelhat.predict_proba(X))
elif error_type == 'FP-Log-Loss': # Computes the log loss, but replaces loss with 0 unless an instance was a FP
errors[t, :] = (y < yhat) * compute_logloss(y, modelhat.predict_proba(X))
elif error_type == 'FN-Log-Loss': # Computes the log loss, but replaces loss with 0 unless an instance was a FN
errors[t, :] = (y > yhat) * compute_logloss(y, modelhat.predict_proba(X))
else:
raise ValueError(f"\'{error_type}\' is an invalid error type")
# Compute the regularization penalty if necessary and add it to the log loss
if penalty in ['l1', 'l2'] and C > 1e15:
errors[t, :] += compute_regularization_penalty(modelhat.coef_, penalty, C) * (0.5 if penalty == 'l2' else 1.0)