in testsuite/driver/testglobals.py [0:0]
def __init__(self):
# skip this test?
self.skip = False
# skip these ways
self.omit_ways = []
# skip all ways except these (None == do all ways)
self.only_ways = None
# add these ways to the default set
self.extra_ways = []
# the result we normally expect for this test
self.expect = 'pass'
# override the expected result for certain ways
self.expect_fail_for = []
# the stdin file that this test will use (empty for <name>.stdin)
self.stdin = ''
# Set the expected stderr/stdout. '' means infer from test name.
self.use_specs = {}
# don't compare output
self.ignore_stdout = False
self.ignore_stderr = False
# Backpack test
self.compile_backpack = False
# We sometimes want to modify the compiler_always_flags, so
# they are copied from config.compiler_always_flags when we
# make a new instance of TestOptions.
self.compiler_always_flags = []
# extra compiler opts for this test
self.extra_hc_opts = ''
# extra run opts for this test
self.extra_run_opts = ''
# expected exit code
self.exit_code = 0
# extra files to clean afterward
self.clean_files = []
# extra files to copy to the testdir
self.extra_files = []
# Map from metric to (function from way and commit to baseline value, allowed percentage deviation) e.g.
# { 'bytes allocated': (
# lambda way commit:
# ...
# if way1: return None ...
# elif way2:return 9300000000 ...
# ...
# , 10) }
# This means no baseline is available for way1. For way 2, allow a 10%
# deviation from 9300000000.
self.stats_range_fields = {}
# Is the test testing performance?
self.is_stats_test = False
# Does this test the compiler's performance as opposed to the generated code.
self.is_compiler_stats_test = False
# should we run this test alone, i.e. not run it in parallel with
# any other threads
self.alone = False
# Does this test use a literate (.lhs) file?
self.literate = False
# Does this test use a .c, .m or .mm file?
self.c_src = False
self.objc_src = False
self.objcpp_src = False
# Does this test use a .cmm file?
self.cmm_src = False
# Should we put .hi/.o files in a subdirectory?
self.outputdir = None
# Command to run before the test
self.pre_cmd = None
# Command wrapper: a function to apply to the command before running it
self.cmd_wrapper = None
# Prefix to put on the command before compiling it
self.compile_cmd_prefix = ''
# Extra output normalisation
self.extra_normaliser = lambda x: x
# Custom output checker, otherwise do a comparison with expected
# stdout file. Accepts two arguments: filename of actual stdout
# output, and a normaliser function given other test options
self.check_stdout = None
# Check .hp file when profiling libraries are available?
self.check_hp = True
# Extra normalisation for compiler error messages
self.extra_errmsg_normaliser = lambda x: x
# Keep profiling callstacks.
self.keep_prof_callstacks = False
# The directory the test is in
self.testdir = '.'
# Should we redirect stdout and stderr to a single file?
self.combined_output = False
# How should the timeout be adjusted on this test?
self.compile_timeout_multiplier = 1.0
self.run_timeout_multiplier = 1.0
self.cleanup = True
# Sould we run tests in a local subdirectory (<testname>-run) or
# in temporary directory in /tmp? See Note [Running tests in /tmp].
self.local = True