def _evaluate()

in backend/time-series-forecasting/training_methods/automl_training_method.py [0:0]


    def _evaluate(self, model_name: str) -> str:

        # Get the model resource
        model = aiplatform.Model(model_name=model_name)

        # check if there us eval item
        if len(model.list_model_evaluations()) > 0:
            # Parse evaluation data
            model_evaluations = model.list_model_evaluations()[0].to_dict()
            evaluation_metrics = model_evaluations["metrics"]

            evaluation_metrics_df = pd.DataFrame(
                evaluation_metrics.items(), columns=["metric", "value"]
            )

            # Construct a BigQuery client object.
            client = bigquery.Client()
            project_id = client.project
            dataset_id = utils.generate_uuid()

            # Create evaluation dataset in default region
            bq_dataset = bigquery.Dataset(f"{project_id}.{dataset_id}")
            bq_dataset = client.create_dataset(bq_dataset, exists_ok=True)

            # Create a bq table in the dataset and upload the evaluation metrics
            table_id = f"{project_id}.{dataset_id}.automl-evaluation"

            job_config = bigquery.LoadJobConfig(
                # The schema is used to assist in data type definitions.
                schema=[
                    bigquery.SchemaField("metric", bigquery.enums.SqlTypeNames.STRING),
                    bigquery.SchemaField("value", bigquery.enums.SqlTypeNames.FLOAT64),
                ],
                # Optionally, set the write disposition. BigQuery appends loaded rows
                # to an existing table by default, but with WRITE_TRUNCATE write
                # disposition it replaces the table with the loaded data.
                write_disposition="WRITE_TRUNCATE",
            )

            job = client.load_table_from_dataframe(
                dataframe=evaluation_metrics_df,
                destination=table_id,
                job_config=job_config,
            )
            # Wait for the job to complete.
            _ = job.result()

            return str(job.destination)
        else:
            raise ValueError(
                f"Model evaluation data does not exist for model {model_name}!"
            )