func()

in cmd/promtool/unittest.go [154:385]


func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, ruleFiles ...string) []error {
	// Setup testing suite.
	suite, err := promql.NewLazyLoader(nil, tg.seriesLoadingString(), queryOpts)
	if err != nil {
		return []error{err}
	}
	defer suite.Close()
	suite.SubqueryInterval = evalInterval

	// Load the rule files.
	opts := &rules.ManagerOptions{
		QueryFunc:  rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()),
		Appendable: suite.Storage(),
		Context:    context.Background(),
		NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {},
		Logger:     log.NewNopLogger(),
	}
	m := rules.NewManager(opts)
	groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...)
	if ers != nil {
		return ers
	}
	groups := orderedGroups(groupsMap, groupOrderMap)

	// Bounds for evaluating the rules.
	mint := time.Unix(0, 0).UTC()
	maxt := mint.Add(tg.maxEvalTime())

	// Pre-processing some data for testing alerts.
	// All this preparation is so that we can test alerts as we evaluate the rules.
	// This avoids storing them in memory, as the number of evals might be high.

	// All the `eval_time` for which we have unit tests for alerts.
	alertEvalTimesMap := map[model.Duration]struct{}{}
	// Map of all the eval_time+alertname combination present in the unit tests.
	alertsInTest := make(map[model.Duration]map[string]struct{})
	// Map of all the unit tests for given eval_time.
	alertTests := make(map[model.Duration][]alertTestCase)
	for _, alert := range tg.AlertRuleTests {
		if alert.Alertname == "" {
			var testGroupLog string
			if tg.TestGroupName != "" {
				testGroupLog = fmt.Sprintf(" (in TestGroup %s)", tg.TestGroupName)
			}
			return []error{fmt.Errorf("an item under alert_rule_test misses required attribute alertname at eval_time %v%s", alert.EvalTime, testGroupLog)}
		}
		alertEvalTimesMap[alert.EvalTime] = struct{}{}

		if _, ok := alertsInTest[alert.EvalTime]; !ok {
			alertsInTest[alert.EvalTime] = make(map[string]struct{})
		}
		alertsInTest[alert.EvalTime][alert.Alertname] = struct{}{}

		alertTests[alert.EvalTime] = append(alertTests[alert.EvalTime], alert)
	}
	alertEvalTimes := make([]model.Duration, 0, len(alertEvalTimesMap))
	for k := range alertEvalTimesMap {
		alertEvalTimes = append(alertEvalTimes, k)
	}
	sort.Slice(alertEvalTimes, func(i, j int) bool {
		return alertEvalTimes[i] < alertEvalTimes[j]
	})

	// Current index in alertEvalTimes what we are looking at.
	curr := 0

	for _, g := range groups {
		for _, r := range g.Rules() {
			if alertRule, ok := r.(*rules.AlertingRule); ok {
				// Mark alerting rules as restored, to ensure the ALERTS timeseries is
				// created when they run.
				alertRule.SetRestored(true)
			}
		}
	}

	var errs []error
	for ts := mint; ts.Before(maxt) || ts.Equal(maxt); ts = ts.Add(evalInterval) {
		// Collects the alerts asked for unit testing.
		var evalErrs []error
		suite.WithSamplesTill(ts, func(err error) {
			if err != nil {
				errs = append(errs, err)
				return
			}
			for _, g := range groups {
				g.Eval(suite.Context(), ts)
				for _, r := range g.Rules() {
					if r.LastError() != nil {
						evalErrs = append(evalErrs, fmt.Errorf("    rule: %s, time: %s, err: %v",
							r.Name(), ts.Sub(time.Unix(0, 0).UTC()), r.LastError()))
					}
				}
			}
		})
		errs = append(errs, evalErrs...)
		// Only end testing at this point if errors occurred evaluating above,
		// rather than any test failures already collected in errs.
		if len(evalErrs) > 0 {
			return errs
		}

		for {
			if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) &&
				time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint)) {
				break
			}

			// We need to check alerts for this time.
			// If 'ts <= `eval_time=alertEvalTimes[curr]` < ts+evalInterval'
			// then we compare alerts with the Eval at `ts`.
			t := alertEvalTimes[curr]

			presentAlerts := alertsInTest[t]
			got := make(map[string]labelsAndAnnotations)

			// Same Alert name can be present in multiple groups.
			// Hence we collect them all to check against expected alerts.
			for _, g := range groups {
				grules := g.Rules()
				for _, r := range grules {
					ar, ok := r.(*rules.AlertingRule)
					if !ok {
						continue
					}
					if _, ok := presentAlerts[ar.Name()]; !ok {
						continue
					}

					var alerts labelsAndAnnotations
					for _, a := range ar.ActiveAlerts() {
						if a.State == rules.StateFiring {
							alerts = append(alerts, labelAndAnnotation{
								Labels:      a.Labels.Copy(),
								Annotations: a.Annotations.Copy(),
							})
						}
					}

					got[ar.Name()] = append(got[ar.Name()], alerts...)
				}
			}

			for _, testcase := range alertTests[t] {
				// Checking alerts.
				gotAlerts := got[testcase.Alertname]

				var expAlerts labelsAndAnnotations
				for _, a := range testcase.ExpAlerts {
					// User gives only the labels from alerting rule, which doesn't
					// include this label (added by Prometheus during Eval).
					if a.ExpLabels == nil {
						a.ExpLabels = make(map[string]string)
					}
					a.ExpLabels[labels.AlertName] = testcase.Alertname

					expAlerts = append(expAlerts, labelAndAnnotation{
						Labels:      labels.FromMap(a.ExpLabels),
						Annotations: labels.FromMap(a.ExpAnnotations),
					})
				}

				sort.Sort(gotAlerts)
				sort.Sort(expAlerts)

				if !reflect.DeepEqual(expAlerts, gotAlerts) {
					var testName string
					if tg.TestGroupName != "" {
						testName = fmt.Sprintf("    name: %s,\n", tg.TestGroupName)
					}
					expString := indentLines(expAlerts.String(), "            ")
					gotString := indentLines(gotAlerts.String(), "            ")
					errs = append(errs, fmt.Errorf("%s    alertname: %s, time: %s, \n        exp:%v, \n        got:%v",
						testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString))
				}
			}

			curr++
		}
	}

	// Checking promql expressions.
Outer:
	for _, testCase := range tg.PromqlExprTests {
		got, err := query(suite.Context(), testCase.Expr, mint.Add(time.Duration(testCase.EvalTime)),
			suite.QueryEngine(), suite.Queryable())
		if err != nil {
			errs = append(errs, fmt.Errorf("    expr: %q, time: %s, err: %s", testCase.Expr,
				testCase.EvalTime.String(), err.Error()))
			continue
		}

		var gotSamples []parsedSample
		for _, s := range got {
			gotSamples = append(gotSamples, parsedSample{
				Labels: s.Metric.Copy(),
				Value:  s.F,
			})
		}

		var expSamples []parsedSample
		for _, s := range testCase.ExpSamples {
			lb, err := parser.ParseMetric(s.Labels)
			if err != nil {
				err = fmt.Errorf("labels %q: %w", s.Labels, err)
				errs = append(errs, fmt.Errorf("    expr: %q, time: %s, err: %w", testCase.Expr,
					testCase.EvalTime.String(), err))
				continue Outer
			}
			expSamples = append(expSamples, parsedSample{
				Labels: lb,
				Value:  s.Value,
			})
		}

		sort.Slice(expSamples, func(i, j int) bool {
			return labels.Compare(expSamples[i].Labels, expSamples[j].Labels) <= 0
		})
		sort.Slice(gotSamples, func(i, j int) bool {
			return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0
		})
		if !reflect.DeepEqual(expSamples, gotSamples) {
			errs = append(errs, fmt.Errorf("    expr: %q, time: %s,\n        exp: %v\n        got: %v", testCase.Expr,
				testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples)))
		}
	}

	if len(errs) > 0 {
		return errs
	}
	return nil
}