def do_test()

in testsuite/driver/testlib.py [0:0]


def do_test(name, way, func, args, files):
    opts = getTestOpts()

    full_name = name + '(' + way + ')'

    if_verbose(2, "=====> {0} {1} of {2} {3}".format(
        full_name, t.total_tests, len(allTestNames),
        [len(t.unexpected_passes),
         len(t.unexpected_failures),
         len(t.framework_failures)]))

    # Clean up prior to the test, so that we can't spuriously conclude
    # that it passed on the basis of old run outputs.
    cleanup()
    os.makedirs(opts.testdir)

    # Link all source files for this test into a new directory in
    # /tmp, and run the test in that directory. This makes it
    # possible to run tests in parallel, without modification, that
    # would otherwise (accidentally) write to the same output file.
    # It also makes it easier to keep the testsuite clean.

    for extra_file in files:
        src = in_srcdir(extra_file)
        dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
        if os.path.isfile(src):
            link_or_copy_file(src, dst)
        elif os.path.isdir(src):
            if os.path.exists(dst):
                shutil.rmtree(dst)
            os.mkdir(dst)
            lndir(src, dst)
        else:
            if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
                # When using a ghc built without haddock support, .t
                # files are rightfully missing. Don't
                # framework_fail. Test will be skipped later.
                pass
            else:
                framework_fail(name, way,
                    'extra_file does not exist: ' + extra_file)

    if func.__name__ == 'run_command' or func.__name__ == 'makefile_test' or opts.pre_cmd:
        # When running 'MAKE' make sure 'TOP' still points to the
        # root of the testsuite.
        src_makefile = in_srcdir('Makefile')
        dst_makefile = in_testdir('Makefile')
        if os.path.exists(src_makefile):
            with io.open(src_makefile, 'r', encoding='utf8') as src:
                makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
                with io.open(dst_makefile, 'w', encoding='utf8') as dst:
                    dst.write(makefile)

    if opts.pre_cmd:
        exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
                           stderr = subprocess.STDOUT,
                           print_output = config.verbose >= 3)

        # If user used expect_broken then don't record failures of pre_cmd
        if exit_code != 0 and opts.expect not in ['fail']:
            framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
            if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))

    result = func(*[name,way] + args)

    if opts.expect not in ['pass', 'fail', 'missing-lib']:
        framework_fail(name, way, 'bad expected ' + opts.expect)

    try:
        passFail = result['passFail']
    except (KeyError, TypeError):
        passFail = 'No passFail found'

    directory = re.sub('^\\.[/\\\\]', '', opts.testdir)

    if passFail == 'pass':
        if _expect_pass(way):
            t.expected_passes.append(TestResult(directory, name, "", way))
            t.n_expected_passes += 1
        else:
            if_verbose(1, '*** unexpected pass for %s' % full_name)
            t.unexpected_passes.append(TestResult(directory, name, 'unexpected', way))
    elif passFail == 'fail':
        if _expect_pass(way):
            reason = result['reason']
            tag = result.get('tag')
            if tag == 'stat':
                if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
                t.unexpected_stat_failures.append(TestResult(directory, name, reason, way))
            else:
                if_verbose(1, '*** unexpected failure for %s' % full_name)
                result = TestResult(directory, name, reason, way, stderr=result.get('stderr'))
                t.unexpected_failures.append(result)
        else:
            if opts.expect == 'missing-lib':
                t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
            else:
                t.n_expected_failures += 1
    else:
        framework_fail(name, way, 'bad result ' + passFail)