in framework/resources/Functional/odbcTest.py [0:0]
def SaveData (rows, queryDir, queryFileName, outputFileExt, resultFileCreated):
# create "type" file that contains the data types of each column
typeFile = "%s/%s%s" % (queryDir, queryFileName, ".type")
typeFile = open (typeFile, 'w')
# creat "label" file that contains the column names
columnFile = "%s/%s%s" % (queryDir, queryFileName, ".label")
columnFile = open (columnFile, 'w')
# create some dummy classes for "special" data types
newdecimal = type(decimal.Decimal(0))
newdate = type(datetime.date(1,2,3))
newtimestamp = type(datetime.datetime(1,2,3))
newtime = type(datetime.time(1,2,3))
# determine the column names and data types returned by this query
# and store in "type" file and "label" file
dataType = []
for column in cursor.description:
getTypeArray = {
LongType: BIGINT,
BooleanType: BOOLEAN,
newdate: DATE,
newdecimal: DECIMAL,
FloatType: FLOAT,
IntType: INTEGER,
newtime: TIME,
newtimestamp: TIMESTAMP,
BufferType: VARBINARY,
StringType: VARCHAR
}
getType = getTypeArray.get(column[1], NONETYPE)
if getType == NONETYPE:
if column[1] == pyodbc.DATETIME:
getType = TIMESTAMP
else:
sys.exit()
dataType.append(getType);
typeFile.write("%s\n" % getType);
columnFile.write("%s\n" % column[0]);
typeFile.close()
columnFile.close()
# open output file. by default, the output file is created in the same
# directory as the query file. It can also be created in a separate
# directory.
if outputFileGiven:
outputFile = outputFileArg;
elif resultFileCreated:
outputFile = "%s/%s%s" % (resultDir, queryFileName, outputFileExt)
else:
outputFile = "%s/%s%s" % (queryDir, queryFileName, outputFileExt)
outputFileH = open (outputFile, 'w')
# evaluate each row that is returned
for row in rows:
# add null values by converting None values to null values
for index, value in enumerate(row):
if value == None:
row[index] = "null"
# Convert tuple containing row fields into a single string delimited by tabs.
# For some data types, Simba ODBC driver returns data that is not completely
# the same as what the Drill Apache driver returns. Modify the data
# from Simba to match what Drill Apache provides, so that the data matches
# the contents of the expected results files.
convertToString = ""
for index, field in enumerate(row):
fieldString = str(field)
if dataType[index] == BOOLEAN:
if fieldString == "True":
field = "true"
elif fieldString == "False":
field = "false"
if dataType[index] == TIME and fieldString != "null":
# strip off microseconds. Drill only has milliseconds
if field.microsecond > 0:
fieldString = fieldString.rstrip('0123456789')
fieldString = "%s%s" % (fieldString,field.microsecond)
field = fieldString
if dataType[index] == TIMESTAMP and fieldString != "null":
# strip off microseconds. Drill only has milliseconds
try:
data = datetime.datetime.strptime(fieldString, "%Y-%m-%d %H:%M:%S.%f")
if data.microsecond > 0:
fieldString = fieldString.rstrip('0')
except:
data = datetime.datetime.strptime(fieldString, "%Y-%m-%d %H:%M:%S")
fieldString = fieldString + ".0"
field = fieldString
if dataType[index] == VARCHAR and len(fieldString) > 0 and fieldString[0] in ('{','['):
# for varchars that begin with curly braces or square brackets, remove
# spaces and newlines unless they are in a string with double quotes
newFields = []
inQuotes = False
start = 0
stringLength = len(fieldString)
for current, character in enumerate(fieldString):
if character == "\"":
inQuotes = not(inQuotes)
atLastChar = (current == stringLength-1);
if atLastChar == True:
newFields.append(fieldString[start:])
elif ((character == ' ') or (character == '\n')) and not(inQuotes):
newFields.append(fieldString[start:current])
start = current + 1
field = "".join(newFields)
# add field to convertToString
if convertToString:
convertToString = convertToString + "\t" + str(field)
else:
convertToString = str(field)
# Save row to output file. It should match expected results files.
outputFileH.write("%s\n" % convertToString)
outputFileH.close()