in assets/training/model_evaluation/src/compute_metrics.py [0:0]
def __init__(self,
task: str,
ground_truth: str,
predictions: str,
prediction_probabilities: str,
output: str,
config: dict = None,
is_ground_truth_mltable: str = None,
is_predictions_mltable: str = None,
is_prediction_probabilities_mltable: str = None,
ground_truths_column_name: str = None,
predictions_column_name: str = None,
extra_y_test_cols: str = None,
llm_config: dict = {}):
"""__init__.
Args:
task (str): _description_
"""
self.task = task
self.ground_truth = ground_truth
self.predictions = predictions
self.predictions_probabilities = prediction_probabilities if prediction_probabilities != '' else None
self.output = output
self.is_ground_truth_mltable = is_ground_truth_mltable
self.is_predictions_mltable = is_predictions_mltable
self.is_predictions_probabilities_mltable = is_prediction_probabilities_mltable
self.ground_truths_column_name = ground_truths_column_name
self.extra_y_test_cols = extra_y_test_cols
self.predictions_column_name = predictions_column_name
self.config = config
self._is_multilabel = self.config.get("multilabel", False)
self._has_multiple_output = self._is_multilabel or self.task == TASK.NER
self._is_multiple_ground_truth = False
if self.task in [TASK.QnA] or (self.task == TASK.CHAT_COMPLETION and
self.config.get(SubTask.SUB_TASK_KEY, "") == SubTask.RAG_EVALUATION):
for k, v in constants.OpenAIConstants.DEFAULT_OPENAI_CONFIG.items():
if k not in llm_config:
logger.info(f"Required Key '{k}' not found in openai_config_params. \
Setting it to default '{v}'")
llm_config[k] = v
do_openai_init = True
keys = constants.OpenAIConstants.REQUIRED_KEYS
if self.task == TASK.QnA:
if not any(k in llm_config for k in keys):
logger.warning(f"Any of Required Keys '[{', '.join(keys)}]' missing in openai_config_params.\n"
f"Skipping GPT Based Metrics calculation for this Run.")
do_openai_init = False
elif self.task == TASK.CHAT_COMPLETION and \
self.config.get(SubTask.SUB_TASK_KEY, "") == SubTask.RAG_EVALUATION:
if not all(k in llm_config for k in keys):
message = f"Required Keys '[{', '.join(keys)}]' missing in openai_config_params."
exception = get_azureml_exception(
DataValidationException, BadEvaluationConfig, None, error=message
)
log_traceback(exception, logger)
raise exception
self.rag_input_data_keys = {}
if do_openai_init:
for k in keys:
self.rag_input_data_keys[k] = llm_config.get(k, None)
openai_init_params = {}
for k, v in constants.OpenAIConstants.DEFAULT_OPENAI_INIT_PARAMS.items():
openai_init_params[k] = llm_config.pop(k, v)
openai_params = openai_init(llm_config, **openai_init_params)
if len(openai_params):
self.config[constants.OpenAIConstants.METRICS_KEY] = openai_params