def test_common_work()

in testsuite/driver/testlib.py [0:0]


def test_common_work(watcher, name, opts, func, args):
    try:
        t.total_tests += 1
        setLocalTestOpts(opts)

        package_conf_cache_file_start_timestamp = get_package_cache_timestamp()

        # All the ways we might run this test
        if func == compile or func == multimod_compile:
            all_ways = config.compile_ways
        elif func == compile_and_run or func == multimod_compile_and_run:
            all_ways = config.run_ways
        elif func == ghci_script:
            if 'ghci' in config.run_ways:
                all_ways = ['ghci']
            else:
                all_ways = []
        else:
            all_ways = ['normal']

        # A test itself can request extra ways by setting opts.extra_ways
        all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]

        t.total_test_cases += len(all_ways)

        ok_way = lambda way: \
            not getTestOpts().skip \
            and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
            and (config.cmdline_ways == [] or way in config.cmdline_ways) \
            and (not (config.skip_perf_tests and isStatsTest())) \
            and (not (config.only_perf_tests and not isStatsTest())) \
            and way not in getTestOpts().omit_ways

        # Which ways we are asked to skip
        do_ways = list(filter (ok_way,all_ways))

        # Only run all ways in slow mode.
        # See Note [validate and testsuite speed] in toplevel Makefile.
        if config.accept:
            # Only ever run one way
            do_ways = do_ways[:1]
        elif config.speed > 0:
            # However, if we EXPLICITLY asked for a way (with extra_ways)
            # please test it!
            explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
            other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
            do_ways = other_ways[:1] + explicit_ways

        # Find all files in the source directory that this test
        # depends on. Do this only once for all ways.
        # Generously add all filenames that start with the name of
        # the test to this set, as a convenience to test authors.
        # They will have to use the `extra_files` setup function to
        # specify all other files that their test depends on (but
        # this seems to be necessary for only about 10% of all
        # tests).
        files = set(f for f in os.listdir(opts.srcdir)
                       if f.startswith(name) and not f == name and
                          not f.endswith(testdir_suffix) and
                          not os.path.splitext(f)[1] in do_not_copy)
        for filename in (opts.extra_files + extra_src_files.get(name, [])):
            if filename.startswith('/'):
                framework_fail(name, 'whole-test',
                    'no absolute paths in extra_files please: ' + filename)

            elif '*' in filename:
                # Don't use wildcards in extra_files too much, as
                # globbing is slow.
                files.update((os.path.relpath(f, opts.srcdir)
                            for f in glob.iglob(in_srcdir(filename))))

            elif filename:
                files.add(filename)

            else:
                framework_fail(name, 'whole-test', 'extra_file is empty string')

        # Run the required tests...
        for way in do_ways:
            if stopping():
                break
            try:
                do_test(name, way, func, args, files)
            except KeyboardInterrupt:
                stopNow()
            except Exception as e:
                framework_fail(name, way, str(e))
                traceback.print_exc()

        t.n_tests_skipped += len(set(all_ways) - set(do_ways))

        if config.cleanup and do_ways:
            try:
                cleanup()
            except Exception as e:
                framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))

        package_conf_cache_file_end_timestamp = get_package_cache_timestamp();

        if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
            framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))

    except Exception as e:
        framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
    finally:
        watcher.notify()