in python/dataproc_templates/jdbc/jdbc_to_gcs.py [0:0]
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
#check if secret is passed or the connection string in URL
#check if secret is passed or the connection string in the agruments
if str(args[constants.JDBCTOGCS_INPUT_URL])=="":
input_jdbc_url: str = secret_manager.access_secret_version(args[constants.JDBCTOGCS_INPUT_URL_SECRET])
else:
input_jdbc_url: str = args[constants.JDBCTOGCS_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOGCS_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOGCS_INPUT_TABLE]
input_jdbc_sql_query: str = args[constants.JDBCTOGCS_INPUT_SQL_QUERY]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOGCS_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOGCS_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOGCS_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOGCS_NUMPARTITIONS]
input_jdbc_fetchsize: int = args[constants.JDBCTOGCS_INPUT_FETCHSIZE]
input_jdbc_sessioninitstatement: str = args[constants.JDBCTOGCS_SESSIONINITSTATEMENT]
output_location: str = args[constants.JDBCTOGCS_OUTPUT_LOCATION]
output_format: str = args[constants.JDBCTOGCS_OUTPUT_FORMAT]
output_mode: str = args[constants.JDBCTOGCS_OUTPUT_MODE]
output_partitioncolumn: str = args[constants.JDBCTOGCS_OUTPUT_PARTITIONCOLUMN]
temp_view: str = args[constants.JDBCTOGCS_TEMP_VIEW_NAME]
temp_sql_query:str = args[constants.JDBCTOGCS_TEMP_SQL_QUERY]
ignore_keys = {constants.JDBCTOGCS_INPUT_URL}
filtered_args = {key:val for key,val in args.items() if key not in ignore_keys}
logger.info(
"Starting JDBC to Cloud Storage Spark job with parameters:\n"
f"{pprint.pformat(filtered_args)}"
)
# Read
input_data: DataFrame
read_properties = {constants.JDBC_URL: input_jdbc_url,
constants.JDBC_DRIVER: input_jdbc_driver}
if input_jdbc_table:
read_properties.update({constants.JDBC_TABLE: input_jdbc_table})
elif input_jdbc_sql_query:
read_properties.update({constants.JDBC_QUERY: input_jdbc_sql_query})
else:
logger.error("Arguments must have either input table or input SQL query")
exit(1)
read_properties.update({constants.JDBC_NUMPARTITIONS: jdbc_numpartitions,
constants.JDBC_FETCHSIZE: input_jdbc_fetchsize})
if input_jdbc_sessioninitstatement:
read_properties[constants.JDBC_SESSIONINITSTATEMENT] = input_jdbc_sessioninitstatement
partition_parameters = str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctogcs.input.partitioncolumn,jdbctogcs.input.lowerbound,jdbctogcs.input.upperbound. Refer to README.md for more instructions.")
exit(1)
if partition_parameters:
read_properties.update({constants.JDBC_PARTITIONCOLUMN: input_jdbc_partitioncolumn,
constants.JDBC_LOWERBOUND: input_jdbc_lowerbound,
constants.JDBC_UPPERBOUND: input_jdbc_upperbound})
input_data = spark.read \
.format(constants.FORMAT_JDBC) \
.options(**read_properties) \
.load()
if temp_sql_query:
# Create temp view on source data
input_data.createGlobalTempView(temp_view)
# Execute SQL
output_data = spark.sql(temp_sql_query)
else:
output_data = input_data
# Write
if (output_partitioncolumn != ""):
writer: DataFrameWriter = output_data.write.mode(output_mode).partitionBy(output_partitioncolumn)
else:
writer: DataFrameWriter = output_data.write.mode(output_mode)
persist_dataframe_to_cloud_storage(writer, args, output_location, output_format, "jdbc.gcs.output.")