in src/graph_notebook/seed/load_query.py [0:0]
def get_queries(query_language, name, location):
if location == 'samples':
d = os.path.dirname(os.path.realpath(__file__))
path_to_data_sets = pjoin(d, 'queries', normalize_model_name(query_language),
normalize_language_name(query_language), name)
else:
# handle custom files here
if name.startswith('s3://'):
bucketname, filename = name.replace("s3://", "").split("/", 1)
path_to_data_sets = download_and_extract_archive_from_s3(bucketname, filename)
else:
path_to_data_sets = name
queries = []
if os.path.isdir(path_to_data_sets): # path_to_data_sets is an existing directory
for file in os.listdir(path_to_data_sets):
new_query = file_to_query(file, path_to_data_sets)
if new_query:
queries.append(new_query)
queries.sort(key=lambda i: i['name']) # ensure we get queries back in lexicographical order.
if name.startswith('s3://'):
# if S3 data was downloaded, delete the temp folder.
rmtree(path_to_data_sets, ignore_errors=True)
elif os.path.isfile(path_to_data_sets): # path_to_data_sets is an existing file
file = os.path.basename(path_to_data_sets)
folder = os.path.dirname(path_to_data_sets)
new_query = file_to_query(file, folder)
if new_query:
queries.append(new_query)
if name.startswith('s3://'):
os.unlink(path_to_data_sets)
else:
return None
return queries