def process_files()

in src/batch_processor.py [0:0]


def process_files(inputBucket, fileName, region, dbTableName):
    try:
        urllib3.disable_warnings()
        s3 = boto3.resource('s3', verify=False)
        
        prefix = fileName
        print("region - " + region)
        bucket = s3.Bucket(name=inputBucket)
        FilesNotFound = True
        startTime = datetime.now()
        
        
        for files in bucket.objects.filter(Prefix=prefix):
            logMessage(fileName, 'files.key-' + files.key, LOGTYPE_DEBUG)
            isCSVFile = files.key.endswith(".csv")
            
            if isCSVFile:
                FilesNotFound = False
                input_products = read_file(fileName, inputBucket, files.key, s3, dbTableName)
                
                if len(input_products) > 0:
                    batch_process(input_products, fileName, region, dbTableName)
                else:
                    logMessage(fileName, "No products could be found in bucket {}/{}".format(inputBucket, prefix), LOGTYPE_INFO)

        if FilesNotFound:
            logMessage(fileName, "No file in {0}/{1}".format(bucket, prefix), LOGTYPE_INFO)
        
        endTime = datetime.now()
        diffTime = endTime - startTime
        logMessage(fileName, "Total processing time - " + str(diffTime.seconds), LOGTYPE_INFO) 

    except Exception as ex:
        logMessage(fileName, "Error processing files:" + str(ex), LOGTYPE_ERROR)