in scripts/sap-data-extraction.py [0:0]
def _extract(skip_key):
global response
skip_key=str(skip_key)
url = _get_base_url() + "/" + odpEntitySetName + "?$format=json&$top=5000&$skip=" + skip_key
print(url)
headers = {
"prefer" : "odata.maxpagesize=" + dataChunkSize + ",odata.track-changes"
}
sapresponse = _make_http_call_to_sap(url,headers)
sapresponsebody = json.loads(sapresponse.text)
_response = copy.deepcopy(sapresponsebody)
d = sapresponsebody.pop('d',None)
results = d.pop('results',None)
for result in results:
_metadata = result.pop('__metadata',None)
if len(results)<=0:
response = _setResponse(True,"No data available to extract from SAP", _response, 0)
elif(dataS3Bucket != ""):
s3 = boto3.resource('s3')
fileName = ''.join([dataS3Folder,'/',str(uuid.uuid4().hex[:6]),odpServiceName, "_", odpEntitySetName,".json"])
object = s3.Object(dataS3Bucket, fileName)
if _athenacompatiblejson==True:
object.put(Body=_athenaJson(results))
else:
object.put(Body=json.dumps(results,indent=4))
response = _setResponse(True,"Data successfully extracted and stored in S3 Bucket with key " + fileName, None, len(results))
else:
response = _setResponse(True,"Data successfully extracted from SAP", _response, len(results))