in genai-on-vertex-ai/gemini/evals_playbook/utils/evals_playbook.py [0:0]
def log_experiment(self,
task_id,
experiment_id,
prompt,
model,
metric_config,
experiment_desc="",
is_streaming=False,
tags=[],
metadata={}):
# create experiment object
experiment = self.Experiment(
experiment_id=experiment_id,
experiment_desc=experiment_desc,
task_id=task_id,
prompt_id = prompt.prompt_id,
elapsed_time = 0
)
# add model information
experiment.model_name = model._model_name.split("/")[-1]
experiment.model_endpoint = aiplatform.constants.base.API_BASE_PATH
experiment.is_streaming = is_streaming
# add generation config
if model._generation_config and isinstance(model._generation_config, dict):
experiment.generation_config = json.dumps(model._generation_config)
# add safety settings
if model._safety_settings:
if isinstance(model._safety_settings, dict):
safety_settings_as_dict = {
category.name: threshold.name
for category, threshold in model._safety_settings.items()
}
elif isinstance(model._safety_settings, list):
safety_settings_as_dict = {
s.to_dict().get("category", "HARM_CATEGORY_UNSPECIFIED"):s.to_dict().get("threshold")
for s in model._safety_settings
}
else:
safety_settings_as_dict = {}
experiment.safety_settings = json.dumps(safety_settings_as_dict)
# add metric config
experiment.metric_config = str(metric_config)
# additional fields
experiment.create_datetime = datetime.datetime.now()
experiment.update_datetime = datetime.datetime.now()
experiment.tags = tags
if isinstance(metadata, dict):
experiment.metadata = json.dumps(metadata)
try:
if isinstance(experiment, self.Experiment):
experiment = experiment.__dict__
if "_sa_instance_state" in experiment: experiment.pop("_sa_instance_state")
if not isinstance(experiment, dict):
raise Exception(f"Invalid task object. Expected: `dict`. Actual: {type(experiment)}")
self._upsert("experiments", experiment)
except Exception as e:
print(f"Failed to log experiment due to following error.")
raise e
return experiment