in python/dataproc_templates/jdbc/jdbc_to_bigquery.py [0:0]
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
big_query_dataset: str = args[constants.JDBC_BQ_OUTPUT_DATASET]
big_query_table: str = args[constants.JDBC_BQ_OUTPUT_TABLE]
bq_temp_bucket: str = args[constants.JDBC_BQ_LD_TEMP_BUCKET_NAME]
#check if secret is passed or the connection string in the agruments
if str(args[constants.JDBC_BQ_INPUT_URL])=="":
input_jdbc_url: str = secret_manager.access_secret_version(args[constants.JDBC_BQ_INPUT_URL_SECRET])
else:
input_jdbc_url: str = args[constants.JDBC_BQ_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBC_BQ_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBC_BQ_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBC_BQ_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBC_BQ_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBC_BQ_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBC_BQ_NUMPARTITIONS]
input_jdbc_fetchsize: int = args[constants.JDBC_BQ_INPUT_FETCHSIZE]
input_jdbc_sessioninitstatement: str = args[constants.JDBC_BQ_SESSIONINITSTATEMENT]
output_mode: str = args[constants.JDBC_BQ_OUTPUT_MODE]
ignore_keys = {constants.JDBC_BQ_INPUT_URL}
filtered_args = {key:val for key,val in args.items() if key not in ignore_keys}
logger.info(
"Starting JDBC to BigQuery Spark job with parameters:\n"
f"{pprint.pformat(filtered_args)}"
)
# Read
input_data: DataFrame
partition_parameters = str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctogcs.input.partitioncolumn,jdbctogcs.input.lowerbound,jdbctogcs.input.upperbound. Refer to README.md for more instructions.")
exit (1)
properties = {constants.JDBC_URL: input_jdbc_url,
constants.JDBC_DRIVER: input_jdbc_driver,
constants.JDBC_TABLE: input_jdbc_table,
constants.JDBC_NUMPARTITIONS: jdbc_numpartitions,
constants.JDBC_FETCHSIZE: input_jdbc_fetchsize}
if input_jdbc_sessioninitstatement:
properties[constants.JDBC_SESSIONINITSTATEMENT] = input_jdbc_sessioninitstatement
if partition_parameters:
properties.update({constants.JDBC_PARTITIONCOLUMN: input_jdbc_partitioncolumn,
constants.JDBC_LOWERBOUND: input_jdbc_lowerbound,
constants.JDBC_UPPERBOUND: input_jdbc_upperbound})
input_data = spark.read \
.format(constants.FORMAT_JDBC) \
.options(**properties) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_BIGQUERY) \
.option(constants.TABLE, big_query_dataset + "." + big_query_table) \
.option(constants.GCS_BQ_TEMP_BUCKET, bq_temp_bucket) \
.mode(output_mode) \
.save()