in common/pkg/monitoring/monitoring.go [188:306]
func queryMetrics(log logr.Logger, db *sql.DB, ms MetricSet, errCount *uint64) []prometheus.Metric {
rows, err := db.Query(ms.Query)
if err != nil {
atomic.AddUint64(errCount, 1)
log.Error(err, "Failed to query metric set", "query", ms.Query)
return nil
}
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
atomic.AddUint64(errCount, 1)
log.Error(err, "Failed to read query columns", "query", ms.Query)
return nil
}
// Force columns to match config (lower case).
for i := 0; i < len(columns); i++ {
columns[i] = strings.ToLower(columns[i])
}
labels := map[string]string{}
var metrics []prometheus.Metric
values := make([]interface{}, len(columns))
// Setup pointers to interfaces for splatting into rows.Scan
valuePtrs := make([]interface{}, len(columns))
for i := range values {
valuePtrs[i] = &values[i]
}
// Find the columns for our metrics based on Name.
// For histograms provide the bucket, sum, count column indexes in that
// order.
forMetric := make([]int, len(ms.Metrics))
forHistMetric := make([][]int, len(ms.Metrics))
columnToIdx := map[string]int{}
for i, c := range columns {
columnToIdx[c] = i
}
// Build mapping arrays.
for i, m := range ms.Metrics {
if m.Usage == Histogram {
// [b1,b1,...,bn,sum,count]
forHistMetric[i] = make([]int, len(m.Buckets)+2)
j := 0
for _, k := range sortedKeys(m.Buckets) {
if idx, found := columnToIdx[fmt.Sprintf("%s_%s", m.column, k)]; found {
forHistMetric[i][j] = idx
}
j += 1
}
if idx, found := columnToIdx[m.column+"_sum"]; found {
forHistMetric[i][j] = idx
}
if idx, found := columnToIdx[m.column+"_count"]; found {
forHistMetric[i][j+1] = idx
}
} else if idx, found := columnToIdx[m.column]; found {
forMetric[i] = idx
}
}
rowCount := 0
for rows.Next() {
rows.Scan(valuePtrs...)
rowCount += 1
// Build labels from the query first
for i := range ms.Metrics {
if ms.Metrics[i].Usage == Label {
labels[ms.Metrics[i].Name] = parseString(log, values[forMetric[i]])
}
}
// Build metrics
for i := range ms.Metrics {
m := ms.Metrics[i]
mDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s_%s", ms.Namespace, ms.Name, m.Name), m.Desc, nil, labels)
var metric prometheus.Metric
var err error
switch m.Usage {
case Counter:
metric, err = prometheus.NewConstMetric(mDesc, prometheus.CounterValue, parseFloat64(log, values[forMetric[i]]))
case Gauge:
metric, err = prometheus.NewConstMetric(mDesc, prometheus.GaugeValue, parseFloat64(log, values[forMetric[i]]))
case Histogram:
bucketVals := map[float64]uint64{}
j := 0
for _, k := range sortedKeys(m.Buckets) {
bucketVals[m.Buckets[k]] = parseUint64(log, values[forHistMetric[i][j]])
j += 1
}
sum := parseFloat64(log, values[forHistMetric[i][j]])
count := parseUint64(log, values[forHistMetric[i][j+1]])
metric, err = prometheus.NewConstHistogram(mDesc, count, sum, bucketVals)
case Label:
continue
}
if err != nil || metric == nil {
atomic.AddUint64(errCount, 1)
log.Error(err, "Failed to create prometheus metric", "desc", mDesc, "err", err, "metric", metric)
continue
}
metrics = append(metrics, metric)
}
}
if rowCount == 0 {
// This is likely due to bad row level security or a poor query.
// If you dont see metrics you expected this might be a cause so lets log it.
log.Info("Query returned no rows.", "query", ms.Query)
}
return metrics
}