in redash/query_runner/big_query.py [0:0]
def _get_query_result(self, jobs, query):
project_id = self._get_project_id()
job_data = self._get_job_data(query)
insert_response = jobs.insert(projectId=project_id, body=job_data).execute()
self.current_job_id = insert_response["jobReference"]["jobId"]
current_row = 0
query_reply = _get_query_results(
jobs,
project_id=project_id,
location=self._get_location(),
job_id=self.current_job_id,
start_index=current_row,
)
logger.debug("bigquery replied: %s", query_reply)
rows = []
while ("rows" in query_reply) and current_row < int(query_reply["totalRows"]):
for row in query_reply["rows"]:
rows.append(transform_row(row, query_reply["schema"]["fields"]))
current_row += len(query_reply["rows"])
query_result_request = {
"projectId": project_id,
"jobId": query_reply["jobReference"]["jobId"],
"startIndex": current_row,
}
if self._get_location():
query_result_request["location"] = self._get_location()
query_reply = jobs.getQueryResults(**query_result_request).execute()
columns = [
{
"name": f["name"],
"friendly_name": f["name"],
"type": "string"
if f.get("mode") == "REPEATED"
else types_map.get(f["type"], "string"),
}
for f in query_reply["schema"]["fields"]
]
data = {
"columns": columns,
"rows": rows,
"metadata": {"data_scanned": int(query_reply["totalBytesProcessed"])},
}
return data