in backend/time-series-forecasting/utils.py [0:0]
def save_dataframe_to_bigquery(dataframe: pd.DataFrame, table_name: str) -> str:
"""This function loads a dataframe to a new bigquery table
Args:
dataframe (pd.Dataframe): dataframe to be loaded to bigquery
table_name (str): name of the bigquery table that is being created
Returns:
str: table id of the destination bigquery table
"""
client = bigquery.Client()
project_id = client.project
dataset_id = generate_uuid()
bq_dataset = bigquery.Dataset(f"{project_id}.{dataset_id}")
bq_dataset = client.create_dataset(bq_dataset, exists_ok=True)
job_config = bigquery.LoadJobConfig(
# Specify a (partial) schema. All columns are always written to the
# table. The schema is used to assist in data type definitions.
schema=[
bigquery.SchemaField("date", bigquery.enums.SqlTypeNames.DATE),
],
# Optionally, set the write disposition. BigQuery appends loaded rows
# to an existing table by default, but with WRITE_TRUNCATE write
# disposition it replaces the table with the loaded data.
write_disposition="WRITE_TRUNCATE",
)
# Reference: https://cloud.google.com/bigquery/docs/samples/bigquery-load-table-dataframe
job = client.load_table_from_dataframe(
dataframe=dataframe,
destination=f"{project_id}.{dataset_id}.{table_name}",
job_config=job_config,
)
job.result()
return str(job.destination)