def checkBenchmark()

in regression_report_v2.py [0:0]


def checkBenchmark(args, exe, benchmark, execNames):
    results, lessIsbBetter = loadHistoryData(args.codespeedUrl, exe, benchmark, args.numBaselineSamples + 3)
    results = list(reversed(results))
    scores = [score for (date, score, deviation, commit, branch) in results]
    stds = [deviation for (date, score, deviation, commit, branch) in results]

    urlToBenchmark = args.codespeedUrl + 'timeline/#/?' + urllib.urlencode({
        'ben': benchmark,
        'exe': exe,
        'env': ENVIRONMENT,
        'revs': args.numDisplaySamples,
        'equid': 'off',
        'quarts': 'on',
        'extr': 'on'})

    if len(results) < MIN_SAMPLE_SIZE_LIMIT:
        return

    direction = 1
    if lessIsbBetter:
        scores = [-1 * score for score in scores]
        direction = -1
    detectRegression(urlToBenchmark, stds, scores, args.numBaselineSamples, args.minRegressionRatio,
                     args.minInstabilityMultiplier, direction, execNames[exe])