pysparksamples/copy_number_job.py [114:138]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            }
        }
    ]
}


# With a GET request, the filters parameter needs to be converted
# from a dictionary to JSON-formatted string

params = {
    "filters": json.dumps(filters),
    "fields": fields,
    "format": "JSON",
    "size": size
    }

## query the files endpoint and get back JSON response

query_response = requests.get(files_endpt, params = params)

json_response = json.loads(query_response.content.decode("utf-8"))["data"]["hits"]

## Parallel read of JSON object

df = spark.read.json(sc.parallelize([json_response]))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



pysparksamples/mutation_job.py [123:146]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            }
        }
    ]
}

# With a GET request, the filters parameter needs to be converted
# from a dictionary to JSON-formatted string

params = {
    "filters": json.dumps(filters),
    "fields": fields,
    "format": "JSON",
    "size": size
    }

## query the files endpoint and get back JSON response

query_response = requests.get(files_endpt, params = params)

json_response = json.loads(query_response.content.decode("utf-8"))["data"]["hits"]

## Parallel read of JSON object

df = spark.read.json(sc.parallelize([json_response]))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



