def run()

in pylib/mercurial-support/run-tests.py [0:0]


    def run(self, test):
        self._result.onStart(test)
        test(self._result)

        failed = len(self._result.failures)
        skipped = len(self._result.skipped)
        ignored = len(self._result.ignored)

        with iolock:
            self.stream.writeln('')

            if not self._runner.options.noskips:
                for test, msg in sorted(
                    self._result.skipped, key=lambda s: s[0].name
                ):
                    formatted = 'Skipped %s: %s\n' % (test.name, msg)
                    msg = highlightmsg(formatted, self._result.color)
                    self.stream.write(msg)
            for test, msg in sorted(
                self._result.failures, key=lambda f: f[0].name
            ):
                formatted = 'Failed %s: %s\n' % (test.name, msg)
                self.stream.write(highlightmsg(formatted, self._result.color))
            for test, msg in sorted(
                self._result.errors, key=lambda e: e[0].name
            ):
                self.stream.writeln('Errored %s: %s' % (test.name, msg))

            if self._runner.options.xunit:
                with open(self._runner.options.xunit, "wb") as xuf:
                    self._writexunit(self._result, xuf)

            if self._runner.options.json:
                jsonpath = os.path.join(self._runner._outputdir, b'report.json')
                with open(jsonpath, 'w') as fp:
                    self._writejson(self._result, fp)

            self._runner._checkhglib('Tested')

            savetimes(self._runner._outputdir, self._result)

            if failed and self._runner.options.known_good_rev:
                self._bisecttests(t for t, m in self._result.failures)
            self.stream.writeln(
                '# Ran %d tests, %d skipped, %d failed.'
                % (self._result.testsRun, skipped + ignored, failed)
            )
            if failed:
                self.stream.writeln(
                    'python hash seed: %s' % os.environ['PYTHONHASHSEED']
                )
            if self._runner.options.time:
                self.printtimes(self._result.times)

            if self._runner.options.exceptions:
                exceptions = aggregateexceptions(
                    os.path.join(self._runner._outputdir, b'exceptions')
                )

                self.stream.writeln('Exceptions Report:')
                self.stream.writeln(
                    '%d total from %d frames'
                    % (exceptions['total'], len(exceptions['exceptioncounts']))
                )
                combined = exceptions['combined']
                for key in sorted(combined, key=combined.get, reverse=True):
                    frame, line, exc = key
                    totalcount, testcount, leastcount, leasttest = combined[key]

                    self.stream.writeln(
                        '%d (%d tests)\t%s: %s (%s - %d total)'
                        % (
                            totalcount,
                            testcount,
                            frame,
                            exc,
                            leasttest,
                            leastcount,
                        )
                    )

            self.stream.flush()

        return self._result