in receiver/sqlserverreceiver/scraper.go [195:459]
func (s *sqlServerScraperHelper) recordDatabasePerfCounterMetrics(ctx context.Context) error {
const counterKey = "counter"
const valueKey = "value"
// Constants are the columns for metrics from query
const activeTempTables = "Active Temp Tables"
const backupRestoreThroughputPerSec = "Backup/Restore Throughput/sec"
const batchRequestRate = "Batch Requests/sec"
const bufferCacheHitRatio = "Buffer cache hit ratio"
const bytesReceivedFromReplicaPerSec = "Bytes Received from Replica/sec"
const bytesSentForReplicaPerSec = "Bytes Sent to Replica/sec"
const diskReadIOThrottled = "Disk Read IO Throttled/sec"
const diskWriteIOThrottled = "Disk Write IO Throttled/sec"
const executionErrors = "Execution Errors"
const freeListStalls = "Free list stalls/sec"
const freeSpaceInTempdb = "Free Space in tempdb (KB)"
const fullScansPerSec = "Full Scans/sec"
const indexSearchesPerSec = "Index Searches/sec"
const lockTimeoutsPerSec = "Lock Timeouts/sec"
const lockWaits = "Lock Waits/sec"
const loginsPerSec = "Logins/sec"
const logoutPerSec = "Logouts/sec"
const numberOfDeadlocksPerSec = "Number of Deadlocks/sec"
const mirrorWritesTransactionPerSec = "Mirrored Write Transactions/sec"
const memoryGrantsPending = "Memory Grants Pending"
const pageLookupsPerSec = "Page lookups/sec"
const processesBlocked = "Processes blocked"
const sqlCompilationRate = "SQL Compilations/sec"
const sqlReCompilationsRate = "SQL Re-Compilations/sec"
const transactionDelay = "Transaction Delay"
const userConnCount = "User Connections"
const usedMemory = "Used memory (KB)"
const versionStoreSize = "Version Store Size (KB)"
rows, err := s.client.QueryRows(ctx)
if err != nil {
if !errors.Is(err, sqlquery.ErrNullValueWarning) {
return fmt.Errorf("sqlServerScraperHelper: %w", err)
}
s.logger.Warn("problems encountered getting metric rows", zap.Error(err))
}
var errs []error
now := pcommon.NewTimestampFromTime(time.Now())
for i, row := range rows {
rb := s.mb.NewResourceBuilder()
rb.SetSqlserverComputerName(row[computerNameKey])
rb.SetSqlserverInstanceName(row[instanceNameKey])
rb.SetServerAddress(s.config.Server)
rb.SetServerPort(int64(s.config.Port))
switch row[counterKey] {
case activeTempTables:
val, err := strconv.ParseInt(row[valueKey], 10, 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, activeTempTables)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverTableCountDataPoint(now, val, metadata.AttributeTableStateActive, metadata.AttributeTableStatusTemporary)
}
case backupRestoreThroughputPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, backupRestoreThroughputPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverDatabaseBackupOrRestoreRateDataPoint(now, val)
}
case batchRequestRate:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, batchRequestRate)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverBatchRequestRateDataPoint(now, val)
}
case bufferCacheHitRatio:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, bufferCacheHitRatio)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverPageBufferCacheHitRatioDataPoint(now, val)
}
case bytesReceivedFromReplicaPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, bytesReceivedFromReplicaPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverReplicaDataRateDataPoint(now, val, metadata.AttributeReplicaDirectionReceive)
}
case bytesSentForReplicaPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, bytesReceivedFromReplicaPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverReplicaDataRateDataPoint(now, val, metadata.AttributeReplicaDirectionTransmit)
}
case diskReadIOThrottled:
errs = append(errs, s.mb.RecordSqlserverResourcePoolDiskThrottledReadRateDataPoint(now, row[valueKey]))
case diskWriteIOThrottled:
errs = append(errs, s.mb.RecordSqlserverResourcePoolDiskThrottledWriteRateDataPoint(now, row[valueKey]))
case executionErrors:
val, err := strconv.ParseInt(row[valueKey], 10, 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, executionErrors)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverDatabaseExecutionErrorsDataPoint(now, val)
}
case freeListStalls:
val, err := strconv.ParseInt(row[valueKey], 10, 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, freeListStalls)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverPageBufferCacheFreeListStallsRateDataPoint(now, val)
}
case fullScansPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, fullScansPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverDatabaseFullScanRateDataPoint(now, val)
}
case freeSpaceInTempdb:
val, err := strconv.ParseInt(row[valueKey], 10, 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, freeSpaceInTempdb)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverDatabaseTempdbSpaceDataPoint(now, val, metadata.AttributeTempdbStateFree)
}
case indexSearchesPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, indexSearchesPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverIndexSearchRateDataPoint(now, val)
}
case lockTimeoutsPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, lockTimeoutsPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverLockTimeoutRateDataPoint(now, val)
}
case lockWaits:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, lockWaits)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverLockWaitRateDataPoint(now, val)
}
case loginsPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, loginsPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverLoginRateDataPoint(now, val)
}
case logoutPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, logoutPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverLogoutRateDataPoint(now, val)
}
case memoryGrantsPending:
val, err := strconv.ParseInt(row[valueKey], 10, 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, memoryGrantsPending)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverMemoryGrantsPendingCountDataPoint(now, val)
}
case mirrorWritesTransactionPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, mirrorWritesTransactionPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverTransactionMirrorWriteRateDataPoint(now, val)
}
case numberOfDeadlocksPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, numberOfDeadlocksPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverDeadlockRateDataPoint(now, val)
}
case pageLookupsPerSec:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, pageLookupsPerSec)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverPageLookupRateDataPoint(now, val)
}
case processesBlocked:
errs = append(errs, s.mb.RecordSqlserverProcessesBlockedDataPoint(now, row[valueKey]))
case sqlCompilationRate:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, sqlCompilationRate)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverBatchSQLCompilationRateDataPoint(now, val)
}
case sqlReCompilationsRate:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, sqlReCompilationsRate)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverBatchSQLRecompilationRateDataPoint(now, val)
}
case transactionDelay:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, transactionDelay)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverTransactionDelayDataPoint(now, val)
}
case userConnCount:
val, err := strconv.ParseInt(row[valueKey], 10, 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, userConnCount)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverUserConnectionCountDataPoint(now, val)
}
case usedMemory:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, usedMemory)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverMemoryUsageDataPoint(now, val)
}
case versionStoreSize:
val, err := strconv.ParseFloat(row[valueKey], 64)
if err != nil {
err = fmt.Errorf("failed to parse valueKey for row %d: %w in %s", i, err, versionStoreSize)
errs = append(errs, err)
} else {
s.mb.RecordSqlserverDatabaseTempdbVersionStoreSizeDataPoint(now, val)
}
}
s.mb.EmitForResource(metadata.WithResource(rb.Emit()))
}
return errors.Join(errs...)
}