in flatten_join_nested_file.py [0:0]
def write_to_targets(tbl_name, dyn_frame, target_path, num_output_files, target_repository):
dyn_frame = dyn_frame.coalesce(num_output_files)
if target_repository == 's3' or target_repository == 'all':
datasink_map[tbl_name] = glueContext.write_dynamic_frame.from_options(frame=dyn_frame, connection_type="s3",
connection_options={
"path": target_path},
format="glueparquet",
transformation_ctx="datasink_map['tbl_name']")
if target_repository == 'redshift' or target_repository == 'all':
db_tbl_name = redshift_schema+"."+tbl_name
glueContext.write_dynamic_frame.from_jdbc_conf(frame=dyn_frame,
catalog_connection=redshift_connection,
connection_options={
"dbtable": db_tbl_name, "database": redshift_db_name},
redshift_tmp_dir=s3_temp_folder)
if target_repository == 'none':
dfc = dyn_frame.toDF().count()
print('schema for table: ', tbl_name, ' number of rows: ', dfc)
dfs = dyn_frame.printSchema()
print(dfs)