in src/sagemaker_xgboost_container/algorithm_mode/train_utils.py [0:0]
def get_eval_metrics_and_feval(tuning_objective_metric_param, eval_metric):
"""Return list of default xgb evaluation metrics and list of container defined metrics.
XGB uses the 'eval_metric' parameter for the evaluation metrics supported by default, and 'feval' as an argument
during training to validate using custom evaluation metrics. The argument 'feval' takes a function as value; the
method returned here will be configured to run for only the metrics the user specifies.
:param tuning_objective_metric_param: HPO metric
:param eval_metric: list of xgb metrics to output
:return: cleaned list of xgb supported evaluation metrics, method configured with container defined metrics,
and tuning objective metric.
"""
tuning_objective_metric = None
configured_eval = None
cleaned_eval_metrics = None
if tuning_objective_metric_param is not None:
tuning_objective_metric_tuple = MetricNameComponents.decode(tuning_objective_metric_param)
tuning_objective_metric = tuning_objective_metric_tuple.metric_name.split(",")
logging.info("Setting up HPO optimized metric to be : {}".format(tuning_objective_metric_tuple.metric_name))
union_metrics = get_union_metrics(tuning_objective_metric, eval_metric)
if union_metrics is not None:
feval_metrics = get_custom_metrics(union_metrics)
if feval_metrics:
configured_eval = configure_feval(feval_metrics)
cleaned_eval_metrics = list(set(union_metrics) - set(feval_metrics))
else:
cleaned_eval_metrics = union_metrics
return cleaned_eval_metrics, configured_eval, tuning_objective_metric