receiver/mysqlreceiver/internal/metadata/generated_metrics.go (3,578 lines of code) (raw):
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"fmt"
"strconv"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/filter"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
)
// AttributeBufferPoolData specifies the value buffer_pool_data attribute.
type AttributeBufferPoolData int
const (
_ AttributeBufferPoolData = iota
AttributeBufferPoolDataDirty
AttributeBufferPoolDataClean
)
// String returns the string representation of the AttributeBufferPoolData.
func (av AttributeBufferPoolData) String() string {
switch av {
case AttributeBufferPoolDataDirty:
return "dirty"
case AttributeBufferPoolDataClean:
return "clean"
}
return ""
}
// MapAttributeBufferPoolData is a helper map of string to AttributeBufferPoolData attribute value.
var MapAttributeBufferPoolData = map[string]AttributeBufferPoolData{
"dirty": AttributeBufferPoolDataDirty,
"clean": AttributeBufferPoolDataClean,
}
// AttributeBufferPoolOperations specifies the value buffer_pool_operations attribute.
type AttributeBufferPoolOperations int
const (
_ AttributeBufferPoolOperations = iota
AttributeBufferPoolOperationsReadAheadRnd
AttributeBufferPoolOperationsReadAhead
AttributeBufferPoolOperationsReadAheadEvicted
AttributeBufferPoolOperationsReadRequests
AttributeBufferPoolOperationsReads
AttributeBufferPoolOperationsWaitFree
AttributeBufferPoolOperationsWriteRequests
)
// String returns the string representation of the AttributeBufferPoolOperations.
func (av AttributeBufferPoolOperations) String() string {
switch av {
case AttributeBufferPoolOperationsReadAheadRnd:
return "read_ahead_rnd"
case AttributeBufferPoolOperationsReadAhead:
return "read_ahead"
case AttributeBufferPoolOperationsReadAheadEvicted:
return "read_ahead_evicted"
case AttributeBufferPoolOperationsReadRequests:
return "read_requests"
case AttributeBufferPoolOperationsReads:
return "reads"
case AttributeBufferPoolOperationsWaitFree:
return "wait_free"
case AttributeBufferPoolOperationsWriteRequests:
return "write_requests"
}
return ""
}
// MapAttributeBufferPoolOperations is a helper map of string to AttributeBufferPoolOperations attribute value.
var MapAttributeBufferPoolOperations = map[string]AttributeBufferPoolOperations{
"read_ahead_rnd": AttributeBufferPoolOperationsReadAheadRnd,
"read_ahead": AttributeBufferPoolOperationsReadAhead,
"read_ahead_evicted": AttributeBufferPoolOperationsReadAheadEvicted,
"read_requests": AttributeBufferPoolOperationsReadRequests,
"reads": AttributeBufferPoolOperationsReads,
"wait_free": AttributeBufferPoolOperationsWaitFree,
"write_requests": AttributeBufferPoolOperationsWriteRequests,
}
// AttributeBufferPoolPages specifies the value buffer_pool_pages attribute.
type AttributeBufferPoolPages int
const (
_ AttributeBufferPoolPages = iota
AttributeBufferPoolPagesData
AttributeBufferPoolPagesFree
AttributeBufferPoolPagesMisc
)
// String returns the string representation of the AttributeBufferPoolPages.
func (av AttributeBufferPoolPages) String() string {
switch av {
case AttributeBufferPoolPagesData:
return "data"
case AttributeBufferPoolPagesFree:
return "free"
case AttributeBufferPoolPagesMisc:
return "misc"
}
return ""
}
// MapAttributeBufferPoolPages is a helper map of string to AttributeBufferPoolPages attribute value.
var MapAttributeBufferPoolPages = map[string]AttributeBufferPoolPages{
"data": AttributeBufferPoolPagesData,
"free": AttributeBufferPoolPagesFree,
"misc": AttributeBufferPoolPagesMisc,
}
// AttributeCacheStatus specifies the value cache_status attribute.
type AttributeCacheStatus int
const (
_ AttributeCacheStatus = iota
AttributeCacheStatusHit
AttributeCacheStatusMiss
AttributeCacheStatusOverflow
)
// String returns the string representation of the AttributeCacheStatus.
func (av AttributeCacheStatus) String() string {
switch av {
case AttributeCacheStatusHit:
return "hit"
case AttributeCacheStatusMiss:
return "miss"
case AttributeCacheStatusOverflow:
return "overflow"
}
return ""
}
// MapAttributeCacheStatus is a helper map of string to AttributeCacheStatus attribute value.
var MapAttributeCacheStatus = map[string]AttributeCacheStatus{
"hit": AttributeCacheStatusHit,
"miss": AttributeCacheStatusMiss,
"overflow": AttributeCacheStatusOverflow,
}
// AttributeCommand specifies the value command attribute.
type AttributeCommand int
const (
_ AttributeCommand = iota
AttributeCommandDelete
AttributeCommandDeleteMulti
AttributeCommandInsert
AttributeCommandSelect
AttributeCommandUpdate
AttributeCommandUpdateMulti
)
// String returns the string representation of the AttributeCommand.
func (av AttributeCommand) String() string {
switch av {
case AttributeCommandDelete:
return "delete"
case AttributeCommandDeleteMulti:
return "delete_multi"
case AttributeCommandInsert:
return "insert"
case AttributeCommandSelect:
return "select"
case AttributeCommandUpdate:
return "update"
case AttributeCommandUpdateMulti:
return "update_multi"
}
return ""
}
// MapAttributeCommand is a helper map of string to AttributeCommand attribute value.
var MapAttributeCommand = map[string]AttributeCommand{
"delete": AttributeCommandDelete,
"delete_multi": AttributeCommandDeleteMulti,
"insert": AttributeCommandInsert,
"select": AttributeCommandSelect,
"update": AttributeCommandUpdate,
"update_multi": AttributeCommandUpdateMulti,
}
// AttributeConnectionError specifies the value connection_error attribute.
type AttributeConnectionError int
const (
_ AttributeConnectionError = iota
AttributeConnectionErrorAccept
AttributeConnectionErrorInternal
AttributeConnectionErrorMaxConnections
AttributeConnectionErrorPeerAddress
AttributeConnectionErrorSelect
AttributeConnectionErrorTcpwrap
AttributeConnectionErrorAborted
AttributeConnectionErrorAbortedClients
AttributeConnectionErrorLocked
)
// String returns the string representation of the AttributeConnectionError.
func (av AttributeConnectionError) String() string {
switch av {
case AttributeConnectionErrorAccept:
return "accept"
case AttributeConnectionErrorInternal:
return "internal"
case AttributeConnectionErrorMaxConnections:
return "max_connections"
case AttributeConnectionErrorPeerAddress:
return "peer_address"
case AttributeConnectionErrorSelect:
return "select"
case AttributeConnectionErrorTcpwrap:
return "tcpwrap"
case AttributeConnectionErrorAborted:
return "aborted"
case AttributeConnectionErrorAbortedClients:
return "aborted_clients"
case AttributeConnectionErrorLocked:
return "locked"
}
return ""
}
// MapAttributeConnectionError is a helper map of string to AttributeConnectionError attribute value.
var MapAttributeConnectionError = map[string]AttributeConnectionError{
"accept": AttributeConnectionErrorAccept,
"internal": AttributeConnectionErrorInternal,
"max_connections": AttributeConnectionErrorMaxConnections,
"peer_address": AttributeConnectionErrorPeerAddress,
"select": AttributeConnectionErrorSelect,
"tcpwrap": AttributeConnectionErrorTcpwrap,
"aborted": AttributeConnectionErrorAborted,
"aborted_clients": AttributeConnectionErrorAbortedClients,
"locked": AttributeConnectionErrorLocked,
}
// AttributeConnectionStatus specifies the value connection_status attribute.
type AttributeConnectionStatus int
const (
_ AttributeConnectionStatus = iota
AttributeConnectionStatusAccepted
AttributeConnectionStatusClosed
AttributeConnectionStatusRejected
)
// String returns the string representation of the AttributeConnectionStatus.
func (av AttributeConnectionStatus) String() string {
switch av {
case AttributeConnectionStatusAccepted:
return "accepted"
case AttributeConnectionStatusClosed:
return "closed"
case AttributeConnectionStatusRejected:
return "rejected"
}
return ""
}
// MapAttributeConnectionStatus is a helper map of string to AttributeConnectionStatus attribute value.
var MapAttributeConnectionStatus = map[string]AttributeConnectionStatus{
"accepted": AttributeConnectionStatusAccepted,
"closed": AttributeConnectionStatusClosed,
"rejected": AttributeConnectionStatusRejected,
}
// AttributeDirection specifies the value direction attribute.
type AttributeDirection int
const (
_ AttributeDirection = iota
AttributeDirectionReceived
AttributeDirectionSent
)
// String returns the string representation of the AttributeDirection.
func (av AttributeDirection) String() string {
switch av {
case AttributeDirectionReceived:
return "received"
case AttributeDirectionSent:
return "sent"
}
return ""
}
// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
var MapAttributeDirection = map[string]AttributeDirection{
"received": AttributeDirectionReceived,
"sent": AttributeDirectionSent,
}
// AttributeDoubleWrites specifies the value double_writes attribute.
type AttributeDoubleWrites int
const (
_ AttributeDoubleWrites = iota
AttributeDoubleWritesPagesWritten
AttributeDoubleWritesWrites
)
// String returns the string representation of the AttributeDoubleWrites.
func (av AttributeDoubleWrites) String() string {
switch av {
case AttributeDoubleWritesPagesWritten:
return "pages_written"
case AttributeDoubleWritesWrites:
return "writes"
}
return ""
}
// MapAttributeDoubleWrites is a helper map of string to AttributeDoubleWrites attribute value.
var MapAttributeDoubleWrites = map[string]AttributeDoubleWrites{
"pages_written": AttributeDoubleWritesPagesWritten,
"writes": AttributeDoubleWritesWrites,
}
// AttributeEventState specifies the value event_state attribute.
type AttributeEventState int
const (
_ AttributeEventState = iota
AttributeEventStateErrors
AttributeEventStateWarnings
AttributeEventStateRowsAffected
AttributeEventStateRowsSent
AttributeEventStateRowsExamined
AttributeEventStateCreatedTmpDiskTables
AttributeEventStateCreatedTmpTables
AttributeEventStateSortMergePasses
AttributeEventStateSortRows
AttributeEventStateNoIndexUsed
)
// String returns the string representation of the AttributeEventState.
func (av AttributeEventState) String() string {
switch av {
case AttributeEventStateErrors:
return "errors"
case AttributeEventStateWarnings:
return "warnings"
case AttributeEventStateRowsAffected:
return "rows_affected"
case AttributeEventStateRowsSent:
return "rows_sent"
case AttributeEventStateRowsExamined:
return "rows_examined"
case AttributeEventStateCreatedTmpDiskTables:
return "created_tmp_disk_tables"
case AttributeEventStateCreatedTmpTables:
return "created_tmp_tables"
case AttributeEventStateSortMergePasses:
return "sort_merge_passes"
case AttributeEventStateSortRows:
return "sort_rows"
case AttributeEventStateNoIndexUsed:
return "no_index_used"
}
return ""
}
// MapAttributeEventState is a helper map of string to AttributeEventState attribute value.
var MapAttributeEventState = map[string]AttributeEventState{
"errors": AttributeEventStateErrors,
"warnings": AttributeEventStateWarnings,
"rows_affected": AttributeEventStateRowsAffected,
"rows_sent": AttributeEventStateRowsSent,
"rows_examined": AttributeEventStateRowsExamined,
"created_tmp_disk_tables": AttributeEventStateCreatedTmpDiskTables,
"created_tmp_tables": AttributeEventStateCreatedTmpTables,
"sort_merge_passes": AttributeEventStateSortMergePasses,
"sort_rows": AttributeEventStateSortRows,
"no_index_used": AttributeEventStateNoIndexUsed,
}
// AttributeHandler specifies the value handler attribute.
type AttributeHandler int
const (
_ AttributeHandler = iota
AttributeHandlerCommit
AttributeHandlerDelete
AttributeHandlerDiscover
AttributeHandlerExternalLock
AttributeHandlerMrrInit
AttributeHandlerPrepare
AttributeHandlerReadFirst
AttributeHandlerReadKey
AttributeHandlerReadLast
AttributeHandlerReadNext
AttributeHandlerReadPrev
AttributeHandlerReadRnd
AttributeHandlerReadRndNext
AttributeHandlerRollback
AttributeHandlerSavepoint
AttributeHandlerSavepointRollback
AttributeHandlerUpdate
AttributeHandlerWrite
)
// String returns the string representation of the AttributeHandler.
func (av AttributeHandler) String() string {
switch av {
case AttributeHandlerCommit:
return "commit"
case AttributeHandlerDelete:
return "delete"
case AttributeHandlerDiscover:
return "discover"
case AttributeHandlerExternalLock:
return "external_lock"
case AttributeHandlerMrrInit:
return "mrr_init"
case AttributeHandlerPrepare:
return "prepare"
case AttributeHandlerReadFirst:
return "read_first"
case AttributeHandlerReadKey:
return "read_key"
case AttributeHandlerReadLast:
return "read_last"
case AttributeHandlerReadNext:
return "read_next"
case AttributeHandlerReadPrev:
return "read_prev"
case AttributeHandlerReadRnd:
return "read_rnd"
case AttributeHandlerReadRndNext:
return "read_rnd_next"
case AttributeHandlerRollback:
return "rollback"
case AttributeHandlerSavepoint:
return "savepoint"
case AttributeHandlerSavepointRollback:
return "savepoint_rollback"
case AttributeHandlerUpdate:
return "update"
case AttributeHandlerWrite:
return "write"
}
return ""
}
// MapAttributeHandler is a helper map of string to AttributeHandler attribute value.
var MapAttributeHandler = map[string]AttributeHandler{
"commit": AttributeHandlerCommit,
"delete": AttributeHandlerDelete,
"discover": AttributeHandlerDiscover,
"external_lock": AttributeHandlerExternalLock,
"mrr_init": AttributeHandlerMrrInit,
"prepare": AttributeHandlerPrepare,
"read_first": AttributeHandlerReadFirst,
"read_key": AttributeHandlerReadKey,
"read_last": AttributeHandlerReadLast,
"read_next": AttributeHandlerReadNext,
"read_prev": AttributeHandlerReadPrev,
"read_rnd": AttributeHandlerReadRnd,
"read_rnd_next": AttributeHandlerReadRndNext,
"rollback": AttributeHandlerRollback,
"savepoint": AttributeHandlerSavepoint,
"savepoint_rollback": AttributeHandlerSavepointRollback,
"update": AttributeHandlerUpdate,
"write": AttributeHandlerWrite,
}
// AttributeIoWaitsOperations specifies the value io_waits_operations attribute.
type AttributeIoWaitsOperations int
const (
_ AttributeIoWaitsOperations = iota
AttributeIoWaitsOperationsDelete
AttributeIoWaitsOperationsFetch
AttributeIoWaitsOperationsInsert
AttributeIoWaitsOperationsUpdate
)
// String returns the string representation of the AttributeIoWaitsOperations.
func (av AttributeIoWaitsOperations) String() string {
switch av {
case AttributeIoWaitsOperationsDelete:
return "delete"
case AttributeIoWaitsOperationsFetch:
return "fetch"
case AttributeIoWaitsOperationsInsert:
return "insert"
case AttributeIoWaitsOperationsUpdate:
return "update"
}
return ""
}
// MapAttributeIoWaitsOperations is a helper map of string to AttributeIoWaitsOperations attribute value.
var MapAttributeIoWaitsOperations = map[string]AttributeIoWaitsOperations{
"delete": AttributeIoWaitsOperationsDelete,
"fetch": AttributeIoWaitsOperationsFetch,
"insert": AttributeIoWaitsOperationsInsert,
"update": AttributeIoWaitsOperationsUpdate,
}
// AttributeJoinKind specifies the value join_kind attribute.
type AttributeJoinKind int
const (
_ AttributeJoinKind = iota
AttributeJoinKindFull
AttributeJoinKindFullRange
AttributeJoinKindRange
AttributeJoinKindRangeCheck
AttributeJoinKindScan
)
// String returns the string representation of the AttributeJoinKind.
func (av AttributeJoinKind) String() string {
switch av {
case AttributeJoinKindFull:
return "full"
case AttributeJoinKindFullRange:
return "full_range"
case AttributeJoinKindRange:
return "range"
case AttributeJoinKindRangeCheck:
return "range_check"
case AttributeJoinKindScan:
return "scan"
}
return ""
}
// MapAttributeJoinKind is a helper map of string to AttributeJoinKind attribute value.
var MapAttributeJoinKind = map[string]AttributeJoinKind{
"full": AttributeJoinKindFull,
"full_range": AttributeJoinKindFullRange,
"range": AttributeJoinKindRange,
"range_check": AttributeJoinKindRangeCheck,
"scan": AttributeJoinKindScan,
}
// AttributeLocks specifies the value locks attribute.
type AttributeLocks int
const (
_ AttributeLocks = iota
AttributeLocksImmediate
AttributeLocksWaited
)
// String returns the string representation of the AttributeLocks.
func (av AttributeLocks) String() string {
switch av {
case AttributeLocksImmediate:
return "immediate"
case AttributeLocksWaited:
return "waited"
}
return ""
}
// MapAttributeLocks is a helper map of string to AttributeLocks attribute value.
var MapAttributeLocks = map[string]AttributeLocks{
"immediate": AttributeLocksImmediate,
"waited": AttributeLocksWaited,
}
// AttributeLogOperations specifies the value log_operations attribute.
type AttributeLogOperations int
const (
_ AttributeLogOperations = iota
AttributeLogOperationsWaits
AttributeLogOperationsWriteRequests
AttributeLogOperationsWrites
)
// String returns the string representation of the AttributeLogOperations.
func (av AttributeLogOperations) String() string {
switch av {
case AttributeLogOperationsWaits:
return "waits"
case AttributeLogOperationsWriteRequests:
return "write_requests"
case AttributeLogOperationsWrites:
return "writes"
}
return ""
}
// MapAttributeLogOperations is a helper map of string to AttributeLogOperations attribute value.
var MapAttributeLogOperations = map[string]AttributeLogOperations{
"waits": AttributeLogOperationsWaits,
"write_requests": AttributeLogOperationsWriteRequests,
"writes": AttributeLogOperationsWrites,
}
// AttributeMysqlxThreads specifies the value mysqlx_threads attribute.
type AttributeMysqlxThreads int
const (
_ AttributeMysqlxThreads = iota
AttributeMysqlxThreadsAvailable
AttributeMysqlxThreadsActive
)
// String returns the string representation of the AttributeMysqlxThreads.
func (av AttributeMysqlxThreads) String() string {
switch av {
case AttributeMysqlxThreadsAvailable:
return "available"
case AttributeMysqlxThreadsActive:
return "active"
}
return ""
}
// MapAttributeMysqlxThreads is a helper map of string to AttributeMysqlxThreads attribute value.
var MapAttributeMysqlxThreads = map[string]AttributeMysqlxThreads{
"available": AttributeMysqlxThreadsAvailable,
"active": AttributeMysqlxThreadsActive,
}
// AttributeOpenedResources specifies the value opened_resources attribute.
type AttributeOpenedResources int
const (
_ AttributeOpenedResources = iota
AttributeOpenedResourcesFile
AttributeOpenedResourcesTableDefinition
AttributeOpenedResourcesTable
)
// String returns the string representation of the AttributeOpenedResources.
func (av AttributeOpenedResources) String() string {
switch av {
case AttributeOpenedResourcesFile:
return "file"
case AttributeOpenedResourcesTableDefinition:
return "table_definition"
case AttributeOpenedResourcesTable:
return "table"
}
return ""
}
// MapAttributeOpenedResources is a helper map of string to AttributeOpenedResources attribute value.
var MapAttributeOpenedResources = map[string]AttributeOpenedResources{
"file": AttributeOpenedResourcesFile,
"table_definition": AttributeOpenedResourcesTableDefinition,
"table": AttributeOpenedResourcesTable,
}
// AttributeOperations specifies the value operations attribute.
type AttributeOperations int
const (
_ AttributeOperations = iota
AttributeOperationsFsyncs
AttributeOperationsReads
AttributeOperationsWrites
)
// String returns the string representation of the AttributeOperations.
func (av AttributeOperations) String() string {
switch av {
case AttributeOperationsFsyncs:
return "fsyncs"
case AttributeOperationsReads:
return "reads"
case AttributeOperationsWrites:
return "writes"
}
return ""
}
// MapAttributeOperations is a helper map of string to AttributeOperations attribute value.
var MapAttributeOperations = map[string]AttributeOperations{
"fsyncs": AttributeOperationsFsyncs,
"reads": AttributeOperationsReads,
"writes": AttributeOperationsWrites,
}
// AttributePageOperations specifies the value page_operations attribute.
type AttributePageOperations int
const (
_ AttributePageOperations = iota
AttributePageOperationsCreated
AttributePageOperationsRead
AttributePageOperationsWritten
)
// String returns the string representation of the AttributePageOperations.
func (av AttributePageOperations) String() string {
switch av {
case AttributePageOperationsCreated:
return "created"
case AttributePageOperationsRead:
return "read"
case AttributePageOperationsWritten:
return "written"
}
return ""
}
// MapAttributePageOperations is a helper map of string to AttributePageOperations attribute value.
var MapAttributePageOperations = map[string]AttributePageOperations{
"created": AttributePageOperationsCreated,
"read": AttributePageOperationsRead,
"written": AttributePageOperationsWritten,
}
// AttributePreparedStatementsCommand specifies the value prepared_statements_command attribute.
type AttributePreparedStatementsCommand int
const (
_ AttributePreparedStatementsCommand = iota
AttributePreparedStatementsCommandExecute
AttributePreparedStatementsCommandClose
AttributePreparedStatementsCommandFetch
AttributePreparedStatementsCommandPrepare
AttributePreparedStatementsCommandReset
AttributePreparedStatementsCommandSendLongData
)
// String returns the string representation of the AttributePreparedStatementsCommand.
func (av AttributePreparedStatementsCommand) String() string {
switch av {
case AttributePreparedStatementsCommandExecute:
return "execute"
case AttributePreparedStatementsCommandClose:
return "close"
case AttributePreparedStatementsCommandFetch:
return "fetch"
case AttributePreparedStatementsCommandPrepare:
return "prepare"
case AttributePreparedStatementsCommandReset:
return "reset"
case AttributePreparedStatementsCommandSendLongData:
return "send_long_data"
}
return ""
}
// MapAttributePreparedStatementsCommand is a helper map of string to AttributePreparedStatementsCommand attribute value.
var MapAttributePreparedStatementsCommand = map[string]AttributePreparedStatementsCommand{
"execute": AttributePreparedStatementsCommandExecute,
"close": AttributePreparedStatementsCommandClose,
"fetch": AttributePreparedStatementsCommandFetch,
"prepare": AttributePreparedStatementsCommandPrepare,
"reset": AttributePreparedStatementsCommandReset,
"send_long_data": AttributePreparedStatementsCommandSendLongData,
}
// AttributeReadLockType specifies the value read_lock_type attribute.
type AttributeReadLockType int
const (
_ AttributeReadLockType = iota
AttributeReadLockTypeNormal
AttributeReadLockTypeWithSharedLocks
AttributeReadLockTypeHighPriority
AttributeReadLockTypeNoInsert
AttributeReadLockTypeExternal
)
// String returns the string representation of the AttributeReadLockType.
func (av AttributeReadLockType) String() string {
switch av {
case AttributeReadLockTypeNormal:
return "normal"
case AttributeReadLockTypeWithSharedLocks:
return "with_shared_locks"
case AttributeReadLockTypeHighPriority:
return "high_priority"
case AttributeReadLockTypeNoInsert:
return "no_insert"
case AttributeReadLockTypeExternal:
return "external"
}
return ""
}
// MapAttributeReadLockType is a helper map of string to AttributeReadLockType attribute value.
var MapAttributeReadLockType = map[string]AttributeReadLockType{
"normal": AttributeReadLockTypeNormal,
"with_shared_locks": AttributeReadLockTypeWithSharedLocks,
"high_priority": AttributeReadLockTypeHighPriority,
"no_insert": AttributeReadLockTypeNoInsert,
"external": AttributeReadLockTypeExternal,
}
// AttributeRowLocks specifies the value row_locks attribute.
type AttributeRowLocks int
const (
_ AttributeRowLocks = iota
AttributeRowLocksWaits
AttributeRowLocksTime
)
// String returns the string representation of the AttributeRowLocks.
func (av AttributeRowLocks) String() string {
switch av {
case AttributeRowLocksWaits:
return "waits"
case AttributeRowLocksTime:
return "time"
}
return ""
}
// MapAttributeRowLocks is a helper map of string to AttributeRowLocks attribute value.
var MapAttributeRowLocks = map[string]AttributeRowLocks{
"waits": AttributeRowLocksWaits,
"time": AttributeRowLocksTime,
}
// AttributeRowOperations specifies the value row_operations attribute.
type AttributeRowOperations int
const (
_ AttributeRowOperations = iota
AttributeRowOperationsDeleted
AttributeRowOperationsInserted
AttributeRowOperationsRead
AttributeRowOperationsUpdated
)
// String returns the string representation of the AttributeRowOperations.
func (av AttributeRowOperations) String() string {
switch av {
case AttributeRowOperationsDeleted:
return "deleted"
case AttributeRowOperationsInserted:
return "inserted"
case AttributeRowOperationsRead:
return "read"
case AttributeRowOperationsUpdated:
return "updated"
}
return ""
}
// MapAttributeRowOperations is a helper map of string to AttributeRowOperations attribute value.
var MapAttributeRowOperations = map[string]AttributeRowOperations{
"deleted": AttributeRowOperationsDeleted,
"inserted": AttributeRowOperationsInserted,
"read": AttributeRowOperationsRead,
"updated": AttributeRowOperationsUpdated,
}
// AttributeSorts specifies the value sorts attribute.
type AttributeSorts int
const (
_ AttributeSorts = iota
AttributeSortsMergePasses
AttributeSortsRange
AttributeSortsRows
AttributeSortsScan
)
// String returns the string representation of the AttributeSorts.
func (av AttributeSorts) String() string {
switch av {
case AttributeSortsMergePasses:
return "merge_passes"
case AttributeSortsRange:
return "range"
case AttributeSortsRows:
return "rows"
case AttributeSortsScan:
return "scan"
}
return ""
}
// MapAttributeSorts is a helper map of string to AttributeSorts attribute value.
var MapAttributeSorts = map[string]AttributeSorts{
"merge_passes": AttributeSortsMergePasses,
"range": AttributeSortsRange,
"rows": AttributeSortsRows,
"scan": AttributeSortsScan,
}
// AttributeTableSizeType specifies the value table_size_type attribute.
type AttributeTableSizeType int
const (
_ AttributeTableSizeType = iota
AttributeTableSizeTypeData
AttributeTableSizeTypeIndex
)
// String returns the string representation of the AttributeTableSizeType.
func (av AttributeTableSizeType) String() string {
switch av {
case AttributeTableSizeTypeData:
return "data"
case AttributeTableSizeTypeIndex:
return "index"
}
return ""
}
// MapAttributeTableSizeType is a helper map of string to AttributeTableSizeType attribute value.
var MapAttributeTableSizeType = map[string]AttributeTableSizeType{
"data": AttributeTableSizeTypeData,
"index": AttributeTableSizeTypeIndex,
}
// AttributeThreads specifies the value threads attribute.
type AttributeThreads int
const (
_ AttributeThreads = iota
AttributeThreadsCached
AttributeThreadsConnected
AttributeThreadsCreated
AttributeThreadsRunning
)
// String returns the string representation of the AttributeThreads.
func (av AttributeThreads) String() string {
switch av {
case AttributeThreadsCached:
return "cached"
case AttributeThreadsConnected:
return "connected"
case AttributeThreadsCreated:
return "created"
case AttributeThreadsRunning:
return "running"
}
return ""
}
// MapAttributeThreads is a helper map of string to AttributeThreads attribute value.
var MapAttributeThreads = map[string]AttributeThreads{
"cached": AttributeThreadsCached,
"connected": AttributeThreadsConnected,
"created": AttributeThreadsCreated,
"running": AttributeThreadsRunning,
}
// AttributeTmpResource specifies the value tmp_resource attribute.
type AttributeTmpResource int
const (
_ AttributeTmpResource = iota
AttributeTmpResourceDiskTables
AttributeTmpResourceFiles
AttributeTmpResourceTables
)
// String returns the string representation of the AttributeTmpResource.
func (av AttributeTmpResource) String() string {
switch av {
case AttributeTmpResourceDiskTables:
return "disk_tables"
case AttributeTmpResourceFiles:
return "files"
case AttributeTmpResourceTables:
return "tables"
}
return ""
}
// MapAttributeTmpResource is a helper map of string to AttributeTmpResource attribute value.
var MapAttributeTmpResource = map[string]AttributeTmpResource{
"disk_tables": AttributeTmpResourceDiskTables,
"files": AttributeTmpResourceFiles,
"tables": AttributeTmpResourceTables,
}
// AttributeWriteLockType specifies the value write_lock_type attribute.
type AttributeWriteLockType int
const (
_ AttributeWriteLockType = iota
AttributeWriteLockTypeAllowWrite
AttributeWriteLockTypeConcurrentInsert
AttributeWriteLockTypeLowPriority
AttributeWriteLockTypeNormal
AttributeWriteLockTypeExternal
)
// String returns the string representation of the AttributeWriteLockType.
func (av AttributeWriteLockType) String() string {
switch av {
case AttributeWriteLockTypeAllowWrite:
return "allow_write"
case AttributeWriteLockTypeConcurrentInsert:
return "concurrent_insert"
case AttributeWriteLockTypeLowPriority:
return "low_priority"
case AttributeWriteLockTypeNormal:
return "normal"
case AttributeWriteLockTypeExternal:
return "external"
}
return ""
}
// MapAttributeWriteLockType is a helper map of string to AttributeWriteLockType attribute value.
var MapAttributeWriteLockType = map[string]AttributeWriteLockType{
"allow_write": AttributeWriteLockTypeAllowWrite,
"concurrent_insert": AttributeWriteLockTypeConcurrentInsert,
"low_priority": AttributeWriteLockTypeLowPriority,
"normal": AttributeWriteLockTypeNormal,
"external": AttributeWriteLockTypeExternal,
}
var MetricsInfo = metricsInfo{
MysqlBufferPoolDataPages: metricInfo{
Name: "mysql.buffer_pool.data_pages",
},
MysqlBufferPoolLimit: metricInfo{
Name: "mysql.buffer_pool.limit",
},
MysqlBufferPoolOperations: metricInfo{
Name: "mysql.buffer_pool.operations",
},
MysqlBufferPoolPageFlushes: metricInfo{
Name: "mysql.buffer_pool.page_flushes",
},
MysqlBufferPoolPages: metricInfo{
Name: "mysql.buffer_pool.pages",
},
MysqlBufferPoolUsage: metricInfo{
Name: "mysql.buffer_pool.usage",
},
MysqlClientNetworkIo: metricInfo{
Name: "mysql.client.network.io",
},
MysqlCommands: metricInfo{
Name: "mysql.commands",
},
MysqlConnectionCount: metricInfo{
Name: "mysql.connection.count",
},
MysqlConnectionErrors: metricInfo{
Name: "mysql.connection.errors",
},
MysqlDoubleWrites: metricInfo{
Name: "mysql.double_writes",
},
MysqlHandlers: metricInfo{
Name: "mysql.handlers",
},
MysqlIndexIoWaitCount: metricInfo{
Name: "mysql.index.io.wait.count",
},
MysqlIndexIoWaitTime: metricInfo{
Name: "mysql.index.io.wait.time",
},
MysqlJoins: metricInfo{
Name: "mysql.joins",
},
MysqlLocks: metricInfo{
Name: "mysql.locks",
},
MysqlLogOperations: metricInfo{
Name: "mysql.log_operations",
},
MysqlMysqlxConnections: metricInfo{
Name: "mysql.mysqlx_connections",
},
MysqlMysqlxWorkerThreads: metricInfo{
Name: "mysql.mysqlx_worker_threads",
},
MysqlOpenedResources: metricInfo{
Name: "mysql.opened_resources",
},
MysqlOperations: metricInfo{
Name: "mysql.operations",
},
MysqlPageOperations: metricInfo{
Name: "mysql.page_operations",
},
MysqlPreparedStatements: metricInfo{
Name: "mysql.prepared_statements",
},
MysqlQueryClientCount: metricInfo{
Name: "mysql.query.client.count",
},
MysqlQueryCount: metricInfo{
Name: "mysql.query.count",
},
MysqlQuerySlowCount: metricInfo{
Name: "mysql.query.slow.count",
},
MysqlReplicaSQLDelay: metricInfo{
Name: "mysql.replica.sql_delay",
},
MysqlReplicaTimeBehindSource: metricInfo{
Name: "mysql.replica.time_behind_source",
},
MysqlRowLocks: metricInfo{
Name: "mysql.row_locks",
},
MysqlRowOperations: metricInfo{
Name: "mysql.row_operations",
},
MysqlSorts: metricInfo{
Name: "mysql.sorts",
},
MysqlStatementEventCount: metricInfo{
Name: "mysql.statement_event.count",
},
MysqlStatementEventWaitTime: metricInfo{
Name: "mysql.statement_event.wait.time",
},
MysqlTableAverageRowLength: metricInfo{
Name: "mysql.table.average_row_length",
},
MysqlTableIoWaitCount: metricInfo{
Name: "mysql.table.io.wait.count",
},
MysqlTableIoWaitTime: metricInfo{
Name: "mysql.table.io.wait.time",
},
MysqlTableLockWaitReadCount: metricInfo{
Name: "mysql.table.lock_wait.read.count",
},
MysqlTableLockWaitReadTime: metricInfo{
Name: "mysql.table.lock_wait.read.time",
},
MysqlTableLockWaitWriteCount: metricInfo{
Name: "mysql.table.lock_wait.write.count",
},
MysqlTableLockWaitWriteTime: metricInfo{
Name: "mysql.table.lock_wait.write.time",
},
MysqlTableRows: metricInfo{
Name: "mysql.table.rows",
},
MysqlTableSize: metricInfo{
Name: "mysql.table.size",
},
MysqlTableOpenCache: metricInfo{
Name: "mysql.table_open_cache",
},
MysqlThreads: metricInfo{
Name: "mysql.threads",
},
MysqlTmpResources: metricInfo{
Name: "mysql.tmp_resources",
},
MysqlUptime: metricInfo{
Name: "mysql.uptime",
},
}
type metricsInfo struct {
MysqlBufferPoolDataPages metricInfo
MysqlBufferPoolLimit metricInfo
MysqlBufferPoolOperations metricInfo
MysqlBufferPoolPageFlushes metricInfo
MysqlBufferPoolPages metricInfo
MysqlBufferPoolUsage metricInfo
MysqlClientNetworkIo metricInfo
MysqlCommands metricInfo
MysqlConnectionCount metricInfo
MysqlConnectionErrors metricInfo
MysqlDoubleWrites metricInfo
MysqlHandlers metricInfo
MysqlIndexIoWaitCount metricInfo
MysqlIndexIoWaitTime metricInfo
MysqlJoins metricInfo
MysqlLocks metricInfo
MysqlLogOperations metricInfo
MysqlMysqlxConnections metricInfo
MysqlMysqlxWorkerThreads metricInfo
MysqlOpenedResources metricInfo
MysqlOperations metricInfo
MysqlPageOperations metricInfo
MysqlPreparedStatements metricInfo
MysqlQueryClientCount metricInfo
MysqlQueryCount metricInfo
MysqlQuerySlowCount metricInfo
MysqlReplicaSQLDelay metricInfo
MysqlReplicaTimeBehindSource metricInfo
MysqlRowLocks metricInfo
MysqlRowOperations metricInfo
MysqlSorts metricInfo
MysqlStatementEventCount metricInfo
MysqlStatementEventWaitTime metricInfo
MysqlTableAverageRowLength metricInfo
MysqlTableIoWaitCount metricInfo
MysqlTableIoWaitTime metricInfo
MysqlTableLockWaitReadCount metricInfo
MysqlTableLockWaitReadTime metricInfo
MysqlTableLockWaitWriteCount metricInfo
MysqlTableLockWaitWriteTime metricInfo
MysqlTableRows metricInfo
MysqlTableSize metricInfo
MysqlTableOpenCache metricInfo
MysqlThreads metricInfo
MysqlTmpResources metricInfo
MysqlUptime metricInfo
}
type metricInfo struct {
Name string
}
type metricMysqlBufferPoolDataPages struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.buffer_pool.data_pages metric with initial data.
func (m *metricMysqlBufferPoolDataPages) init() {
m.data.SetName("mysql.buffer_pool.data_pages")
m.data.SetDescription("The number of data pages in the InnoDB buffer pool.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlBufferPoolDataPages) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", bufferPoolDataAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlBufferPoolDataPages) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlBufferPoolDataPages) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlBufferPoolDataPages(cfg MetricConfig) metricMysqlBufferPoolDataPages {
m := metricMysqlBufferPoolDataPages{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlBufferPoolLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.buffer_pool.limit metric with initial data.
func (m *metricMysqlBufferPoolLimit) init() {
m.data.SetName("mysql.buffer_pool.limit")
m.data.SetDescription("The configured size of the InnoDB buffer pool.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMysqlBufferPoolLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlBufferPoolLimit) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlBufferPoolLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlBufferPoolLimit(cfg MetricConfig) metricMysqlBufferPoolLimit {
m := metricMysqlBufferPoolLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlBufferPoolOperations struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.buffer_pool.operations metric with initial data.
func (m *metricMysqlBufferPoolOperations) init() {
m.data.SetName("mysql.buffer_pool.operations")
m.data.SetDescription("The number of operations on the InnoDB buffer pool.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlBufferPoolOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, bufferPoolOperationsAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", bufferPoolOperationsAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlBufferPoolOperations) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlBufferPoolOperations) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlBufferPoolOperations(cfg MetricConfig) metricMysqlBufferPoolOperations {
m := metricMysqlBufferPoolOperations{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlBufferPoolPageFlushes struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.buffer_pool.page_flushes metric with initial data.
func (m *metricMysqlBufferPoolPageFlushes) init() {
m.data.SetName("mysql.buffer_pool.page_flushes")
m.data.SetDescription("The number of requests to flush pages from the InnoDB buffer pool.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMysqlBufferPoolPageFlushes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlBufferPoolPageFlushes) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlBufferPoolPageFlushes) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlBufferPoolPageFlushes(cfg MetricConfig) metricMysqlBufferPoolPageFlushes {
m := metricMysqlBufferPoolPageFlushes{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlBufferPoolPages struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.buffer_pool.pages metric with initial data.
func (m *metricMysqlBufferPoolPages) init() {
m.data.SetName("mysql.buffer_pool.pages")
m.data.SetDescription("The number of pages in the InnoDB buffer pool.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlBufferPoolPages) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, bufferPoolPagesAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", bufferPoolPagesAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlBufferPoolPages) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlBufferPoolPages) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlBufferPoolPages(cfg MetricConfig) metricMysqlBufferPoolPages {
m := metricMysqlBufferPoolPages{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlBufferPoolUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.buffer_pool.usage metric with initial data.
func (m *metricMysqlBufferPoolUsage) init() {
m.data.SetName("mysql.buffer_pool.usage")
m.data.SetDescription("The number of bytes in the InnoDB buffer pool.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlBufferPoolUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", bufferPoolDataAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlBufferPoolUsage) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlBufferPoolUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlBufferPoolUsage(cfg MetricConfig) metricMysqlBufferPoolUsage {
m := metricMysqlBufferPoolUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlClientNetworkIo struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.client.network.io metric with initial data.
func (m *metricMysqlClientNetworkIo) init() {
m.data.SetName("mysql.client.network.io")
m.data.SetDescription("The number of transmitted bytes between server and clients.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlClientNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlClientNetworkIo) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlClientNetworkIo) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlClientNetworkIo(cfg MetricConfig) metricMysqlClientNetworkIo {
m := metricMysqlClientNetworkIo{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlCommands struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.commands metric with initial data.
func (m *metricMysqlCommands) init() {
m.data.SetName("mysql.commands")
m.data.SetDescription("The number of times each type of command has been executed.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlCommands) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, commandAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("command", commandAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlCommands) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlCommands) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlCommands(cfg MetricConfig) metricMysqlCommands {
m := metricMysqlCommands{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlConnectionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.connection.count metric with initial data.
func (m *metricMysqlConnectionCount) init() {
m.data.SetName("mysql.connection.count")
m.data.SetDescription("The number of connection attempts (successful or not) to the MySQL server.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMysqlConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlConnectionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlConnectionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlConnectionCount(cfg MetricConfig) metricMysqlConnectionCount {
m := metricMysqlConnectionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlConnectionErrors struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.connection.errors metric with initial data.
func (m *metricMysqlConnectionErrors) init() {
m.data.SetName("mysql.connection.errors")
m.data.SetDescription("Errors that occur during the client connection process.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlConnectionErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, connectionErrorAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("error", connectionErrorAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlConnectionErrors) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlConnectionErrors) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlConnectionErrors(cfg MetricConfig) metricMysqlConnectionErrors {
m := metricMysqlConnectionErrors{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlDoubleWrites struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.double_writes metric with initial data.
func (m *metricMysqlDoubleWrites) init() {
m.data.SetName("mysql.double_writes")
m.data.SetDescription("The number of writes to the InnoDB doublewrite buffer.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlDoubleWrites) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, doubleWritesAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", doubleWritesAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlDoubleWrites) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlDoubleWrites) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlDoubleWrites(cfg MetricConfig) metricMysqlDoubleWrites {
m := metricMysqlDoubleWrites{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlHandlers struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.handlers metric with initial data.
func (m *metricMysqlHandlers) init() {
m.data.SetName("mysql.handlers")
m.data.SetDescription("The number of requests to various MySQL handlers.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlHandlers) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, handlerAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", handlerAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlHandlers) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlHandlers) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlHandlers(cfg MetricConfig) metricMysqlHandlers {
m := metricMysqlHandlers{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlIndexIoWaitCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.index.io.wait.count metric with initial data.
func (m *metricMysqlIndexIoWaitCount) init() {
m.data.SetName("mysql.index.io.wait.count")
m.data.SetDescription("The total count of I/O wait events for an index.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlIndexIoWaitCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ioWaitsOperationsAttributeValue string, tableNameAttributeValue string, schemaAttributeValue string, indexNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", ioWaitsOperationsAttributeValue)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("index", indexNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlIndexIoWaitCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlIndexIoWaitCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlIndexIoWaitCount(cfg MetricConfig) metricMysqlIndexIoWaitCount {
m := metricMysqlIndexIoWaitCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlIndexIoWaitTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.index.io.wait.time metric with initial data.
func (m *metricMysqlIndexIoWaitTime) init() {
m.data.SetName("mysql.index.io.wait.time")
m.data.SetDescription("The total time of I/O wait events for an index.")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlIndexIoWaitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ioWaitsOperationsAttributeValue string, tableNameAttributeValue string, schemaAttributeValue string, indexNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", ioWaitsOperationsAttributeValue)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("index", indexNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlIndexIoWaitTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlIndexIoWaitTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlIndexIoWaitTime(cfg MetricConfig) metricMysqlIndexIoWaitTime {
m := metricMysqlIndexIoWaitTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlJoins struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.joins metric with initial data.
func (m *metricMysqlJoins) init() {
m.data.SetName("mysql.joins")
m.data.SetDescription("The number of joins that perform table scans.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlJoins) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, joinKindAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", joinKindAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlJoins) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlJoins) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlJoins(cfg MetricConfig) metricMysqlJoins {
m := metricMysqlJoins{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlLocks struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.locks metric with initial data.
func (m *metricMysqlLocks) init() {
m.data.SetName("mysql.locks")
m.data.SetDescription("The number of MySQL locks.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlLocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, locksAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", locksAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlLocks) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlLocks) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlLocks(cfg MetricConfig) metricMysqlLocks {
m := metricMysqlLocks{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlLogOperations struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.log_operations metric with initial data.
func (m *metricMysqlLogOperations) init() {
m.data.SetName("mysql.log_operations")
m.data.SetDescription("The number of InnoDB log operations.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlLogOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, logOperationsAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", logOperationsAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlLogOperations) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlLogOperations) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlLogOperations(cfg MetricConfig) metricMysqlLogOperations {
m := metricMysqlLogOperations{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlMysqlxConnections struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.mysqlx_connections metric with initial data.
func (m *metricMysqlMysqlxConnections) init() {
m.data.SetName("mysql.mysqlx_connections")
m.data.SetDescription("The number of mysqlx connections.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlMysqlxConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, connectionStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", connectionStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlMysqlxConnections) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlMysqlxConnections) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlMysqlxConnections(cfg MetricConfig) metricMysqlMysqlxConnections {
m := metricMysqlMysqlxConnections{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlMysqlxWorkerThreads struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.mysqlx_worker_threads metric with initial data.
func (m *metricMysqlMysqlxWorkerThreads) init() {
m.data.SetName("mysql.mysqlx_worker_threads")
m.data.SetDescription("The number of worker threads available.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlMysqlxWorkerThreads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, mysqlxThreadsAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", mysqlxThreadsAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlMysqlxWorkerThreads) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlMysqlxWorkerThreads) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlMysqlxWorkerThreads(cfg MetricConfig) metricMysqlMysqlxWorkerThreads {
m := metricMysqlMysqlxWorkerThreads{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlOpenedResources struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.opened_resources metric with initial data.
func (m *metricMysqlOpenedResources) init() {
m.data.SetName("mysql.opened_resources")
m.data.SetDescription("The number of opened resources.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlOpenedResources) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, openedResourcesAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", openedResourcesAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlOpenedResources) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlOpenedResources) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlOpenedResources(cfg MetricConfig) metricMysqlOpenedResources {
m := metricMysqlOpenedResources{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlOperations struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.operations metric with initial data.
func (m *metricMysqlOperations) init() {
m.data.SetName("mysql.operations")
m.data.SetDescription("The number of InnoDB operations.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationsAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", operationsAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlOperations) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlOperations) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlOperations(cfg MetricConfig) metricMysqlOperations {
m := metricMysqlOperations{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlPageOperations struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.page_operations metric with initial data.
func (m *metricMysqlPageOperations) init() {
m.data.SetName("mysql.page_operations")
m.data.SetDescription("The number of InnoDB page operations.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlPageOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, pageOperationsAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", pageOperationsAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlPageOperations) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlPageOperations) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlPageOperations(cfg MetricConfig) metricMysqlPageOperations {
m := metricMysqlPageOperations{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlPreparedStatements struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.prepared_statements metric with initial data.
func (m *metricMysqlPreparedStatements) init() {
m.data.SetName("mysql.prepared_statements")
m.data.SetDescription("The number of times each type of prepared statement command has been issued.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlPreparedStatements) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, preparedStatementsCommandAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("command", preparedStatementsCommandAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlPreparedStatements) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlPreparedStatements) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlPreparedStatements(cfg MetricConfig) metricMysqlPreparedStatements {
m := metricMysqlPreparedStatements{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlQueryClientCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.query.client.count metric with initial data.
func (m *metricMysqlQueryClientCount) init() {
m.data.SetName("mysql.query.client.count")
m.data.SetDescription("The number of statements executed by the server. This includes only statements sent to the server by clients.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMysqlQueryClientCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlQueryClientCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlQueryClientCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlQueryClientCount(cfg MetricConfig) metricMysqlQueryClientCount {
m := metricMysqlQueryClientCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlQueryCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.query.count metric with initial data.
func (m *metricMysqlQueryCount) init() {
m.data.SetName("mysql.query.count")
m.data.SetDescription("The number of statements executed by the server.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMysqlQueryCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlQueryCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlQueryCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlQueryCount(cfg MetricConfig) metricMysqlQueryCount {
m := metricMysqlQueryCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlQuerySlowCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.query.slow.count metric with initial data.
func (m *metricMysqlQuerySlowCount) init() {
m.data.SetName("mysql.query.slow.count")
m.data.SetDescription("The number of slow queries.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMysqlQuerySlowCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlQuerySlowCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlQuerySlowCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlQuerySlowCount(cfg MetricConfig) metricMysqlQuerySlowCount {
m := metricMysqlQuerySlowCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlReplicaSQLDelay struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.replica.sql_delay metric with initial data.
func (m *metricMysqlReplicaSQLDelay) init() {
m.data.SetName("mysql.replica.sql_delay")
m.data.SetDescription("The number of seconds that the replica must lag the source.")
m.data.SetUnit("s")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMysqlReplicaSQLDelay) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlReplicaSQLDelay) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlReplicaSQLDelay) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlReplicaSQLDelay(cfg MetricConfig) metricMysqlReplicaSQLDelay {
m := metricMysqlReplicaSQLDelay{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlReplicaTimeBehindSource struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.replica.time_behind_source metric with initial data.
func (m *metricMysqlReplicaTimeBehindSource) init() {
m.data.SetName("mysql.replica.time_behind_source")
m.data.SetDescription("This field is an indication of how “late” the replica is.")
m.data.SetUnit("s")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMysqlReplicaTimeBehindSource) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlReplicaTimeBehindSource) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlReplicaTimeBehindSource) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlReplicaTimeBehindSource(cfg MetricConfig) metricMysqlReplicaTimeBehindSource {
m := metricMysqlReplicaTimeBehindSource{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlRowLocks struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.row_locks metric with initial data.
func (m *metricMysqlRowLocks) init() {
m.data.SetName("mysql.row_locks")
m.data.SetDescription("The number of InnoDB row locks.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlRowLocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, rowLocksAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", rowLocksAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlRowLocks) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlRowLocks) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlRowLocks(cfg MetricConfig) metricMysqlRowLocks {
m := metricMysqlRowLocks{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlRowOperations struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.row_operations metric with initial data.
func (m *metricMysqlRowOperations) init() {
m.data.SetName("mysql.row_operations")
m.data.SetDescription("The number of InnoDB row operations.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlRowOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, rowOperationsAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", rowOperationsAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlRowOperations) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlRowOperations) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlRowOperations(cfg MetricConfig) metricMysqlRowOperations {
m := metricMysqlRowOperations{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlSorts struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.sorts metric with initial data.
func (m *metricMysqlSorts) init() {
m.data.SetName("mysql.sorts")
m.data.SetDescription("The number of MySQL sorts.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlSorts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sortsAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", sortsAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlSorts) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlSorts) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlSorts(cfg MetricConfig) metricMysqlSorts {
m := metricMysqlSorts{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlStatementEventCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.statement_event.count metric with initial data.
func (m *metricMysqlStatementEventCount) init() {
m.data.SetName("mysql.statement_event.count")
m.data.SetDescription("Summary of current and recent statement events.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlStatementEventCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string, digestAttributeValue string, digestTextAttributeValue string, eventStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("digest", digestAttributeValue)
dp.Attributes().PutStr("digest_text", digestTextAttributeValue)
dp.Attributes().PutStr("kind", eventStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlStatementEventCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlStatementEventCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlStatementEventCount(cfg MetricConfig) metricMysqlStatementEventCount {
m := metricMysqlStatementEventCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlStatementEventWaitTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.statement_event.wait.time metric with initial data.
func (m *metricMysqlStatementEventWaitTime) init() {
m.data.SetName("mysql.statement_event.wait.time")
m.data.SetDescription("The total wait time of the summarized timed events.")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlStatementEventWaitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string, digestAttributeValue string, digestTextAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("digest", digestAttributeValue)
dp.Attributes().PutStr("digest_text", digestTextAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlStatementEventWaitTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlStatementEventWaitTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlStatementEventWaitTime(cfg MetricConfig) metricMysqlStatementEventWaitTime {
m := metricMysqlStatementEventWaitTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTableAverageRowLength struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.table.average_row_length metric with initial data.
func (m *metricMysqlTableAverageRowLength) init() {
m.data.SetName("mysql.table.average_row_length")
m.data.SetDescription("The average row length in bytes for a given table.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTableAverageRowLength) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tableNameAttributeValue string, schemaAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("schema", schemaAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTableAverageRowLength) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTableAverageRowLength) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTableAverageRowLength(cfg MetricConfig) metricMysqlTableAverageRowLength {
m := metricMysqlTableAverageRowLength{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTableIoWaitCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.table.io.wait.count metric with initial data.
func (m *metricMysqlTableIoWaitCount) init() {
m.data.SetName("mysql.table.io.wait.count")
m.data.SetDescription("The total count of I/O wait events for a table.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTableIoWaitCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ioWaitsOperationsAttributeValue string, tableNameAttributeValue string, schemaAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", ioWaitsOperationsAttributeValue)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("schema", schemaAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTableIoWaitCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTableIoWaitCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTableIoWaitCount(cfg MetricConfig) metricMysqlTableIoWaitCount {
m := metricMysqlTableIoWaitCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTableIoWaitTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.table.io.wait.time metric with initial data.
func (m *metricMysqlTableIoWaitTime) init() {
m.data.SetName("mysql.table.io.wait.time")
m.data.SetDescription("The total time of I/O wait events for a table.")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTableIoWaitTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ioWaitsOperationsAttributeValue string, tableNameAttributeValue string, schemaAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", ioWaitsOperationsAttributeValue)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("schema", schemaAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTableIoWaitTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTableIoWaitTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTableIoWaitTime(cfg MetricConfig) metricMysqlTableIoWaitTime {
m := metricMysqlTableIoWaitTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTableLockWaitReadCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.table.lock_wait.read.count metric with initial data.
func (m *metricMysqlTableLockWaitReadCount) init() {
m.data.SetName("mysql.table.lock_wait.read.count")
m.data.SetDescription("The total table lock wait read events.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTableLockWaitReadCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string, tableNameAttributeValue string, readLockTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("kind", readLockTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTableLockWaitReadCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTableLockWaitReadCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTableLockWaitReadCount(cfg MetricConfig) metricMysqlTableLockWaitReadCount {
m := metricMysqlTableLockWaitReadCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTableLockWaitReadTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.table.lock_wait.read.time metric with initial data.
func (m *metricMysqlTableLockWaitReadTime) init() {
m.data.SetName("mysql.table.lock_wait.read.time")
m.data.SetDescription("The total table lock wait read events times.")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTableLockWaitReadTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string, tableNameAttributeValue string, readLockTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("kind", readLockTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTableLockWaitReadTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTableLockWaitReadTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTableLockWaitReadTime(cfg MetricConfig) metricMysqlTableLockWaitReadTime {
m := metricMysqlTableLockWaitReadTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTableLockWaitWriteCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.table.lock_wait.write.count metric with initial data.
func (m *metricMysqlTableLockWaitWriteCount) init() {
m.data.SetName("mysql.table.lock_wait.write.count")
m.data.SetDescription("The total table lock wait write events.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTableLockWaitWriteCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string, tableNameAttributeValue string, writeLockTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("kind", writeLockTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTableLockWaitWriteCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTableLockWaitWriteCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTableLockWaitWriteCount(cfg MetricConfig) metricMysqlTableLockWaitWriteCount {
m := metricMysqlTableLockWaitWriteCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTableLockWaitWriteTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.table.lock_wait.write.time metric with initial data.
func (m *metricMysqlTableLockWaitWriteTime) init() {
m.data.SetName("mysql.table.lock_wait.write.time")
m.data.SetDescription("The total table lock wait write events times.")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTableLockWaitWriteTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string, tableNameAttributeValue string, writeLockTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("kind", writeLockTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTableLockWaitWriteTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTableLockWaitWriteTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTableLockWaitWriteTime(cfg MetricConfig) metricMysqlTableLockWaitWriteTime {
m := metricMysqlTableLockWaitWriteTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTableRows struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.table.rows metric with initial data.
func (m *metricMysqlTableRows) init() {
m.data.SetName("mysql.table.rows")
m.data.SetDescription("The number of rows for a given table.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTableRows) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tableNameAttributeValue string, schemaAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("schema", schemaAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTableRows) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTableRows) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTableRows(cfg MetricConfig) metricMysqlTableRows {
m := metricMysqlTableRows{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTableSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.table.size metric with initial data.
func (m *metricMysqlTableSize) init() {
m.data.SetName("mysql.table.size")
m.data.SetDescription("The table size in bytes for a given table.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTableSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tableNameAttributeValue string, schemaAttributeValue string, tableSizeTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("table", tableNameAttributeValue)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("kind", tableSizeTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTableSize) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTableSize) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTableSize(cfg MetricConfig) metricMysqlTableSize {
m := metricMysqlTableSize{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTableOpenCache struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.table_open_cache metric with initial data.
func (m *metricMysqlTableOpenCache) init() {
m.data.SetName("mysql.table_open_cache")
m.data.SetDescription("The number of hits, misses or overflows for open tables cache lookups.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTableOpenCache) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, cacheStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", cacheStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTableOpenCache) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTableOpenCache) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTableOpenCache(cfg MetricConfig) metricMysqlTableOpenCache {
m := metricMysqlTableOpenCache{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlThreads struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.threads metric with initial data.
func (m *metricMysqlThreads) init() {
m.data.SetName("mysql.threads")
m.data.SetDescription("The state of MySQL threads.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlThreads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, threadsAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", threadsAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlThreads) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlThreads) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlThreads(cfg MetricConfig) metricMysqlThreads {
m := metricMysqlThreads{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlTmpResources struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.tmp_resources metric with initial data.
func (m *metricMysqlTmpResources) init() {
m.data.SetName("mysql.tmp_resources")
m.data.SetDescription("The number of created temporary resources.")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMysqlTmpResources) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tmpResourceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("resource", tmpResourceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlTmpResources) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlTmpResources) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlTmpResources(cfg MetricConfig) metricMysqlTmpResources {
m := metricMysqlTmpResources{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMysqlUptime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mysql.uptime metric with initial data.
func (m *metricMysqlUptime) init() {
m.data.SetName("mysql.uptime")
m.data.SetDescription("The number of seconds that the server has been up.")
m.data.SetUnit("s")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMysqlUptime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMysqlUptime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMysqlUptime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMysqlUptime(cfg MetricConfig) metricMysqlUptime {
m := metricMysqlUptime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user config.
type MetricsBuilder struct {
config MetricsBuilderConfig // config of the metrics builder.
startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
metricsCapacity int // maximum observed number of metrics per resource.
metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
buildInfo component.BuildInfo // contains version information.
resourceAttributeIncludeFilter map[string]filter.Filter
resourceAttributeExcludeFilter map[string]filter.Filter
metricMysqlBufferPoolDataPages metricMysqlBufferPoolDataPages
metricMysqlBufferPoolLimit metricMysqlBufferPoolLimit
metricMysqlBufferPoolOperations metricMysqlBufferPoolOperations
metricMysqlBufferPoolPageFlushes metricMysqlBufferPoolPageFlushes
metricMysqlBufferPoolPages metricMysqlBufferPoolPages
metricMysqlBufferPoolUsage metricMysqlBufferPoolUsage
metricMysqlClientNetworkIo metricMysqlClientNetworkIo
metricMysqlCommands metricMysqlCommands
metricMysqlConnectionCount metricMysqlConnectionCount
metricMysqlConnectionErrors metricMysqlConnectionErrors
metricMysqlDoubleWrites metricMysqlDoubleWrites
metricMysqlHandlers metricMysqlHandlers
metricMysqlIndexIoWaitCount metricMysqlIndexIoWaitCount
metricMysqlIndexIoWaitTime metricMysqlIndexIoWaitTime
metricMysqlJoins metricMysqlJoins
metricMysqlLocks metricMysqlLocks
metricMysqlLogOperations metricMysqlLogOperations
metricMysqlMysqlxConnections metricMysqlMysqlxConnections
metricMysqlMysqlxWorkerThreads metricMysqlMysqlxWorkerThreads
metricMysqlOpenedResources metricMysqlOpenedResources
metricMysqlOperations metricMysqlOperations
metricMysqlPageOperations metricMysqlPageOperations
metricMysqlPreparedStatements metricMysqlPreparedStatements
metricMysqlQueryClientCount metricMysqlQueryClientCount
metricMysqlQueryCount metricMysqlQueryCount
metricMysqlQuerySlowCount metricMysqlQuerySlowCount
metricMysqlReplicaSQLDelay metricMysqlReplicaSQLDelay
metricMysqlReplicaTimeBehindSource metricMysqlReplicaTimeBehindSource
metricMysqlRowLocks metricMysqlRowLocks
metricMysqlRowOperations metricMysqlRowOperations
metricMysqlSorts metricMysqlSorts
metricMysqlStatementEventCount metricMysqlStatementEventCount
metricMysqlStatementEventWaitTime metricMysqlStatementEventWaitTime
metricMysqlTableAverageRowLength metricMysqlTableAverageRowLength
metricMysqlTableIoWaitCount metricMysqlTableIoWaitCount
metricMysqlTableIoWaitTime metricMysqlTableIoWaitTime
metricMysqlTableLockWaitReadCount metricMysqlTableLockWaitReadCount
metricMysqlTableLockWaitReadTime metricMysqlTableLockWaitReadTime
metricMysqlTableLockWaitWriteCount metricMysqlTableLockWaitWriteCount
metricMysqlTableLockWaitWriteTime metricMysqlTableLockWaitWriteTime
metricMysqlTableRows metricMysqlTableRows
metricMysqlTableSize metricMysqlTableSize
metricMysqlTableOpenCache metricMysqlTableOpenCache
metricMysqlThreads metricMysqlThreads
metricMysqlTmpResources metricMysqlTmpResources
metricMysqlUptime metricMysqlUptime
}
// MetricBuilderOption applies changes to default metrics builder.
type MetricBuilderOption interface {
apply(*MetricsBuilder)
}
type metricBuilderOptionFunc func(mb *MetricsBuilder)
func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) {
mbof(mb)
}
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
return metricBuilderOptionFunc(func(mb *MetricsBuilder) {
mb.startTime = startTime
})
}
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
config: mbc,
startTime: pcommon.NewTimestampFromTime(time.Now()),
metricsBuffer: pmetric.NewMetrics(),
buildInfo: settings.BuildInfo,
metricMysqlBufferPoolDataPages: newMetricMysqlBufferPoolDataPages(mbc.Metrics.MysqlBufferPoolDataPages),
metricMysqlBufferPoolLimit: newMetricMysqlBufferPoolLimit(mbc.Metrics.MysqlBufferPoolLimit),
metricMysqlBufferPoolOperations: newMetricMysqlBufferPoolOperations(mbc.Metrics.MysqlBufferPoolOperations),
metricMysqlBufferPoolPageFlushes: newMetricMysqlBufferPoolPageFlushes(mbc.Metrics.MysqlBufferPoolPageFlushes),
metricMysqlBufferPoolPages: newMetricMysqlBufferPoolPages(mbc.Metrics.MysqlBufferPoolPages),
metricMysqlBufferPoolUsage: newMetricMysqlBufferPoolUsage(mbc.Metrics.MysqlBufferPoolUsage),
metricMysqlClientNetworkIo: newMetricMysqlClientNetworkIo(mbc.Metrics.MysqlClientNetworkIo),
metricMysqlCommands: newMetricMysqlCommands(mbc.Metrics.MysqlCommands),
metricMysqlConnectionCount: newMetricMysqlConnectionCount(mbc.Metrics.MysqlConnectionCount),
metricMysqlConnectionErrors: newMetricMysqlConnectionErrors(mbc.Metrics.MysqlConnectionErrors),
metricMysqlDoubleWrites: newMetricMysqlDoubleWrites(mbc.Metrics.MysqlDoubleWrites),
metricMysqlHandlers: newMetricMysqlHandlers(mbc.Metrics.MysqlHandlers),
metricMysqlIndexIoWaitCount: newMetricMysqlIndexIoWaitCount(mbc.Metrics.MysqlIndexIoWaitCount),
metricMysqlIndexIoWaitTime: newMetricMysqlIndexIoWaitTime(mbc.Metrics.MysqlIndexIoWaitTime),
metricMysqlJoins: newMetricMysqlJoins(mbc.Metrics.MysqlJoins),
metricMysqlLocks: newMetricMysqlLocks(mbc.Metrics.MysqlLocks),
metricMysqlLogOperations: newMetricMysqlLogOperations(mbc.Metrics.MysqlLogOperations),
metricMysqlMysqlxConnections: newMetricMysqlMysqlxConnections(mbc.Metrics.MysqlMysqlxConnections),
metricMysqlMysqlxWorkerThreads: newMetricMysqlMysqlxWorkerThreads(mbc.Metrics.MysqlMysqlxWorkerThreads),
metricMysqlOpenedResources: newMetricMysqlOpenedResources(mbc.Metrics.MysqlOpenedResources),
metricMysqlOperations: newMetricMysqlOperations(mbc.Metrics.MysqlOperations),
metricMysqlPageOperations: newMetricMysqlPageOperations(mbc.Metrics.MysqlPageOperations),
metricMysqlPreparedStatements: newMetricMysqlPreparedStatements(mbc.Metrics.MysqlPreparedStatements),
metricMysqlQueryClientCount: newMetricMysqlQueryClientCount(mbc.Metrics.MysqlQueryClientCount),
metricMysqlQueryCount: newMetricMysqlQueryCount(mbc.Metrics.MysqlQueryCount),
metricMysqlQuerySlowCount: newMetricMysqlQuerySlowCount(mbc.Metrics.MysqlQuerySlowCount),
metricMysqlReplicaSQLDelay: newMetricMysqlReplicaSQLDelay(mbc.Metrics.MysqlReplicaSQLDelay),
metricMysqlReplicaTimeBehindSource: newMetricMysqlReplicaTimeBehindSource(mbc.Metrics.MysqlReplicaTimeBehindSource),
metricMysqlRowLocks: newMetricMysqlRowLocks(mbc.Metrics.MysqlRowLocks),
metricMysqlRowOperations: newMetricMysqlRowOperations(mbc.Metrics.MysqlRowOperations),
metricMysqlSorts: newMetricMysqlSorts(mbc.Metrics.MysqlSorts),
metricMysqlStatementEventCount: newMetricMysqlStatementEventCount(mbc.Metrics.MysqlStatementEventCount),
metricMysqlStatementEventWaitTime: newMetricMysqlStatementEventWaitTime(mbc.Metrics.MysqlStatementEventWaitTime),
metricMysqlTableAverageRowLength: newMetricMysqlTableAverageRowLength(mbc.Metrics.MysqlTableAverageRowLength),
metricMysqlTableIoWaitCount: newMetricMysqlTableIoWaitCount(mbc.Metrics.MysqlTableIoWaitCount),
metricMysqlTableIoWaitTime: newMetricMysqlTableIoWaitTime(mbc.Metrics.MysqlTableIoWaitTime),
metricMysqlTableLockWaitReadCount: newMetricMysqlTableLockWaitReadCount(mbc.Metrics.MysqlTableLockWaitReadCount),
metricMysqlTableLockWaitReadTime: newMetricMysqlTableLockWaitReadTime(mbc.Metrics.MysqlTableLockWaitReadTime),
metricMysqlTableLockWaitWriteCount: newMetricMysqlTableLockWaitWriteCount(mbc.Metrics.MysqlTableLockWaitWriteCount),
metricMysqlTableLockWaitWriteTime: newMetricMysqlTableLockWaitWriteTime(mbc.Metrics.MysqlTableLockWaitWriteTime),
metricMysqlTableRows: newMetricMysqlTableRows(mbc.Metrics.MysqlTableRows),
metricMysqlTableSize: newMetricMysqlTableSize(mbc.Metrics.MysqlTableSize),
metricMysqlTableOpenCache: newMetricMysqlTableOpenCache(mbc.Metrics.MysqlTableOpenCache),
metricMysqlThreads: newMetricMysqlThreads(mbc.Metrics.MysqlThreads),
metricMysqlTmpResources: newMetricMysqlTmpResources(mbc.Metrics.MysqlTmpResources),
metricMysqlUptime: newMetricMysqlUptime(mbc.Metrics.MysqlUptime),
resourceAttributeIncludeFilter: make(map[string]filter.Filter),
resourceAttributeExcludeFilter: make(map[string]filter.Filter),
}
if mbc.ResourceAttributes.MysqlInstanceEndpoint.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mysql.instance.endpoint"] = filter.CreateFilter(mbc.ResourceAttributes.MysqlInstanceEndpoint.MetricsInclude)
}
if mbc.ResourceAttributes.MysqlInstanceEndpoint.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mysql.instance.endpoint"] = filter.CreateFilter(mbc.ResourceAttributes.MysqlInstanceEndpoint.MetricsExclude)
}
for _, op := range options {
op.apply(mb)
}
return mb
}
// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
return NewResourceBuilder(mb.config.ResourceAttributes)
}
// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
}
}
// ResourceMetricsOption applies changes to provided resource metrics.
type ResourceMetricsOption interface {
apply(pmetric.ResourceMetrics)
}
type resourceMetricsOptionFunc func(pmetric.ResourceMetrics)
func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) {
rmof(rm)
}
// WithResource sets the provided resource on the emitted ResourceMetrics.
// It's recommended to use ResourceBuilder to create the resource.
func WithResource(res pcommon.Resource) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
res.CopyTo(rm.Resource())
})
}
// WithStartTimeOverride overrides start time for all the resource metrics data points.
// This option should be only used if different start time has to be set on metrics coming from different resources.
func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
var dps pmetric.NumberDataPointSlice
metrics := rm.ScopeMetrics().At(0).Metrics()
for i := 0; i < metrics.Len(); i++ {
switch metrics.At(i).Type() {
case pmetric.MetricTypeGauge:
dps = metrics.At(i).Gauge().DataPoints()
case pmetric.MetricTypeSum:
dps = metrics.At(i).Sum().DataPoints()
}
for j := 0; j < dps.Len(); j++ {
dps.At(j).SetStartTimestamp(start)
}
}
})
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceMetricsOption arguments.
func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName(ScopeName)
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricMysqlBufferPoolDataPages.emit(ils.Metrics())
mb.metricMysqlBufferPoolLimit.emit(ils.Metrics())
mb.metricMysqlBufferPoolOperations.emit(ils.Metrics())
mb.metricMysqlBufferPoolPageFlushes.emit(ils.Metrics())
mb.metricMysqlBufferPoolPages.emit(ils.Metrics())
mb.metricMysqlBufferPoolUsage.emit(ils.Metrics())
mb.metricMysqlClientNetworkIo.emit(ils.Metrics())
mb.metricMysqlCommands.emit(ils.Metrics())
mb.metricMysqlConnectionCount.emit(ils.Metrics())
mb.metricMysqlConnectionErrors.emit(ils.Metrics())
mb.metricMysqlDoubleWrites.emit(ils.Metrics())
mb.metricMysqlHandlers.emit(ils.Metrics())
mb.metricMysqlIndexIoWaitCount.emit(ils.Metrics())
mb.metricMysqlIndexIoWaitTime.emit(ils.Metrics())
mb.metricMysqlJoins.emit(ils.Metrics())
mb.metricMysqlLocks.emit(ils.Metrics())
mb.metricMysqlLogOperations.emit(ils.Metrics())
mb.metricMysqlMysqlxConnections.emit(ils.Metrics())
mb.metricMysqlMysqlxWorkerThreads.emit(ils.Metrics())
mb.metricMysqlOpenedResources.emit(ils.Metrics())
mb.metricMysqlOperations.emit(ils.Metrics())
mb.metricMysqlPageOperations.emit(ils.Metrics())
mb.metricMysqlPreparedStatements.emit(ils.Metrics())
mb.metricMysqlQueryClientCount.emit(ils.Metrics())
mb.metricMysqlQueryCount.emit(ils.Metrics())
mb.metricMysqlQuerySlowCount.emit(ils.Metrics())
mb.metricMysqlReplicaSQLDelay.emit(ils.Metrics())
mb.metricMysqlReplicaTimeBehindSource.emit(ils.Metrics())
mb.metricMysqlRowLocks.emit(ils.Metrics())
mb.metricMysqlRowOperations.emit(ils.Metrics())
mb.metricMysqlSorts.emit(ils.Metrics())
mb.metricMysqlStatementEventCount.emit(ils.Metrics())
mb.metricMysqlStatementEventWaitTime.emit(ils.Metrics())
mb.metricMysqlTableAverageRowLength.emit(ils.Metrics())
mb.metricMysqlTableIoWaitCount.emit(ils.Metrics())
mb.metricMysqlTableIoWaitTime.emit(ils.Metrics())
mb.metricMysqlTableLockWaitReadCount.emit(ils.Metrics())
mb.metricMysqlTableLockWaitReadTime.emit(ils.Metrics())
mb.metricMysqlTableLockWaitWriteCount.emit(ils.Metrics())
mb.metricMysqlTableLockWaitWriteTime.emit(ils.Metrics())
mb.metricMysqlTableRows.emit(ils.Metrics())
mb.metricMysqlTableSize.emit(ils.Metrics())
mb.metricMysqlTableOpenCache.emit(ils.Metrics())
mb.metricMysqlThreads.emit(ils.Metrics())
mb.metricMysqlTmpResources.emit(ils.Metrics())
mb.metricMysqlUptime.emit(ils.Metrics())
for _, op := range options {
op.apply(rm)
}
for attr, filter := range mb.resourceAttributeIncludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) {
return
}
}
for attr, filter := range mb.resourceAttributeExcludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) {
return
}
}
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
}
}
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user config, e.g. delta or cumulative.
func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics {
mb.EmitForResource(options...)
metrics := mb.metricsBuffer
mb.metricsBuffer = pmetric.NewMetrics()
return metrics
}
// RecordMysqlBufferPoolDataPagesDataPoint adds a data point to mysql.buffer_pool.data_pages metric.
func (mb *MetricsBuilder) RecordMysqlBufferPoolDataPagesDataPoint(ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue AttributeBufferPoolData) {
mb.metricMysqlBufferPoolDataPages.recordDataPoint(mb.startTime, ts, val, bufferPoolDataAttributeValue.String())
}
// RecordMysqlBufferPoolLimitDataPoint adds a data point to mysql.buffer_pool.limit metric.
func (mb *MetricsBuilder) RecordMysqlBufferPoolLimitDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlBufferPoolLimit, value was %s: %w", inputVal, err)
}
mb.metricMysqlBufferPoolLimit.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordMysqlBufferPoolOperationsDataPoint adds a data point to mysql.buffer_pool.operations metric.
func (mb *MetricsBuilder) RecordMysqlBufferPoolOperationsDataPoint(ts pcommon.Timestamp, inputVal string, bufferPoolOperationsAttributeValue AttributeBufferPoolOperations) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlBufferPoolOperations, value was %s: %w", inputVal, err)
}
mb.metricMysqlBufferPoolOperations.recordDataPoint(mb.startTime, ts, val, bufferPoolOperationsAttributeValue.String())
return nil
}
// RecordMysqlBufferPoolPageFlushesDataPoint adds a data point to mysql.buffer_pool.page_flushes metric.
func (mb *MetricsBuilder) RecordMysqlBufferPoolPageFlushesDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlBufferPoolPageFlushes, value was %s: %w", inputVal, err)
}
mb.metricMysqlBufferPoolPageFlushes.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordMysqlBufferPoolPagesDataPoint adds a data point to mysql.buffer_pool.pages metric.
func (mb *MetricsBuilder) RecordMysqlBufferPoolPagesDataPoint(ts pcommon.Timestamp, inputVal string, bufferPoolPagesAttributeValue AttributeBufferPoolPages) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlBufferPoolPages, value was %s: %w", inputVal, err)
}
mb.metricMysqlBufferPoolPages.recordDataPoint(mb.startTime, ts, val, bufferPoolPagesAttributeValue.String())
return nil
}
// RecordMysqlBufferPoolUsageDataPoint adds a data point to mysql.buffer_pool.usage metric.
func (mb *MetricsBuilder) RecordMysqlBufferPoolUsageDataPoint(ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue AttributeBufferPoolData) {
mb.metricMysqlBufferPoolUsage.recordDataPoint(mb.startTime, ts, val, bufferPoolDataAttributeValue.String())
}
// RecordMysqlClientNetworkIoDataPoint adds a data point to mysql.client.network.io metric.
func (mb *MetricsBuilder) RecordMysqlClientNetworkIoDataPoint(ts pcommon.Timestamp, inputVal string, directionAttributeValue AttributeDirection) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlClientNetworkIo, value was %s: %w", inputVal, err)
}
mb.metricMysqlClientNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
return nil
}
// RecordMysqlCommandsDataPoint adds a data point to mysql.commands metric.
func (mb *MetricsBuilder) RecordMysqlCommandsDataPoint(ts pcommon.Timestamp, inputVal string, commandAttributeValue AttributeCommand) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlCommands, value was %s: %w", inputVal, err)
}
mb.metricMysqlCommands.recordDataPoint(mb.startTime, ts, val, commandAttributeValue.String())
return nil
}
// RecordMysqlConnectionCountDataPoint adds a data point to mysql.connection.count metric.
func (mb *MetricsBuilder) RecordMysqlConnectionCountDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlConnectionCount, value was %s: %w", inputVal, err)
}
mb.metricMysqlConnectionCount.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordMysqlConnectionErrorsDataPoint adds a data point to mysql.connection.errors metric.
func (mb *MetricsBuilder) RecordMysqlConnectionErrorsDataPoint(ts pcommon.Timestamp, inputVal string, connectionErrorAttributeValue AttributeConnectionError) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlConnectionErrors, value was %s: %w", inputVal, err)
}
mb.metricMysqlConnectionErrors.recordDataPoint(mb.startTime, ts, val, connectionErrorAttributeValue.String())
return nil
}
// RecordMysqlDoubleWritesDataPoint adds a data point to mysql.double_writes metric.
func (mb *MetricsBuilder) RecordMysqlDoubleWritesDataPoint(ts pcommon.Timestamp, inputVal string, doubleWritesAttributeValue AttributeDoubleWrites) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlDoubleWrites, value was %s: %w", inputVal, err)
}
mb.metricMysqlDoubleWrites.recordDataPoint(mb.startTime, ts, val, doubleWritesAttributeValue.String())
return nil
}
// RecordMysqlHandlersDataPoint adds a data point to mysql.handlers metric.
func (mb *MetricsBuilder) RecordMysqlHandlersDataPoint(ts pcommon.Timestamp, inputVal string, handlerAttributeValue AttributeHandler) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlHandlers, value was %s: %w", inputVal, err)
}
mb.metricMysqlHandlers.recordDataPoint(mb.startTime, ts, val, handlerAttributeValue.String())
return nil
}
// RecordMysqlIndexIoWaitCountDataPoint adds a data point to mysql.index.io.wait.count metric.
func (mb *MetricsBuilder) RecordMysqlIndexIoWaitCountDataPoint(ts pcommon.Timestamp, val int64, ioWaitsOperationsAttributeValue AttributeIoWaitsOperations, tableNameAttributeValue string, schemaAttributeValue string, indexNameAttributeValue string) {
mb.metricMysqlIndexIoWaitCount.recordDataPoint(mb.startTime, ts, val, ioWaitsOperationsAttributeValue.String(), tableNameAttributeValue, schemaAttributeValue, indexNameAttributeValue)
}
// RecordMysqlIndexIoWaitTimeDataPoint adds a data point to mysql.index.io.wait.time metric.
func (mb *MetricsBuilder) RecordMysqlIndexIoWaitTimeDataPoint(ts pcommon.Timestamp, val int64, ioWaitsOperationsAttributeValue AttributeIoWaitsOperations, tableNameAttributeValue string, schemaAttributeValue string, indexNameAttributeValue string) {
mb.metricMysqlIndexIoWaitTime.recordDataPoint(mb.startTime, ts, val, ioWaitsOperationsAttributeValue.String(), tableNameAttributeValue, schemaAttributeValue, indexNameAttributeValue)
}
// RecordMysqlJoinsDataPoint adds a data point to mysql.joins metric.
func (mb *MetricsBuilder) RecordMysqlJoinsDataPoint(ts pcommon.Timestamp, inputVal string, joinKindAttributeValue AttributeJoinKind) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlJoins, value was %s: %w", inputVal, err)
}
mb.metricMysqlJoins.recordDataPoint(mb.startTime, ts, val, joinKindAttributeValue.String())
return nil
}
// RecordMysqlLocksDataPoint adds a data point to mysql.locks metric.
func (mb *MetricsBuilder) RecordMysqlLocksDataPoint(ts pcommon.Timestamp, inputVal string, locksAttributeValue AttributeLocks) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlLocks, value was %s: %w", inputVal, err)
}
mb.metricMysqlLocks.recordDataPoint(mb.startTime, ts, val, locksAttributeValue.String())
return nil
}
// RecordMysqlLogOperationsDataPoint adds a data point to mysql.log_operations metric.
func (mb *MetricsBuilder) RecordMysqlLogOperationsDataPoint(ts pcommon.Timestamp, inputVal string, logOperationsAttributeValue AttributeLogOperations) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlLogOperations, value was %s: %w", inputVal, err)
}
mb.metricMysqlLogOperations.recordDataPoint(mb.startTime, ts, val, logOperationsAttributeValue.String())
return nil
}
// RecordMysqlMysqlxConnectionsDataPoint adds a data point to mysql.mysqlx_connections metric.
func (mb *MetricsBuilder) RecordMysqlMysqlxConnectionsDataPoint(ts pcommon.Timestamp, inputVal string, connectionStatusAttributeValue AttributeConnectionStatus) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlMysqlxConnections, value was %s: %w", inputVal, err)
}
mb.metricMysqlMysqlxConnections.recordDataPoint(mb.startTime, ts, val, connectionStatusAttributeValue.String())
return nil
}
// RecordMysqlMysqlxWorkerThreadsDataPoint adds a data point to mysql.mysqlx_worker_threads metric.
func (mb *MetricsBuilder) RecordMysqlMysqlxWorkerThreadsDataPoint(ts pcommon.Timestamp, inputVal string, mysqlxThreadsAttributeValue AttributeMysqlxThreads) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlMysqlxWorkerThreads, value was %s: %w", inputVal, err)
}
mb.metricMysqlMysqlxWorkerThreads.recordDataPoint(mb.startTime, ts, val, mysqlxThreadsAttributeValue.String())
return nil
}
// RecordMysqlOpenedResourcesDataPoint adds a data point to mysql.opened_resources metric.
func (mb *MetricsBuilder) RecordMysqlOpenedResourcesDataPoint(ts pcommon.Timestamp, inputVal string, openedResourcesAttributeValue AttributeOpenedResources) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlOpenedResources, value was %s: %w", inputVal, err)
}
mb.metricMysqlOpenedResources.recordDataPoint(mb.startTime, ts, val, openedResourcesAttributeValue.String())
return nil
}
// RecordMysqlOperationsDataPoint adds a data point to mysql.operations metric.
func (mb *MetricsBuilder) RecordMysqlOperationsDataPoint(ts pcommon.Timestamp, inputVal string, operationsAttributeValue AttributeOperations) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlOperations, value was %s: %w", inputVal, err)
}
mb.metricMysqlOperations.recordDataPoint(mb.startTime, ts, val, operationsAttributeValue.String())
return nil
}
// RecordMysqlPageOperationsDataPoint adds a data point to mysql.page_operations metric.
func (mb *MetricsBuilder) RecordMysqlPageOperationsDataPoint(ts pcommon.Timestamp, inputVal string, pageOperationsAttributeValue AttributePageOperations) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlPageOperations, value was %s: %w", inputVal, err)
}
mb.metricMysqlPageOperations.recordDataPoint(mb.startTime, ts, val, pageOperationsAttributeValue.String())
return nil
}
// RecordMysqlPreparedStatementsDataPoint adds a data point to mysql.prepared_statements metric.
func (mb *MetricsBuilder) RecordMysqlPreparedStatementsDataPoint(ts pcommon.Timestamp, inputVal string, preparedStatementsCommandAttributeValue AttributePreparedStatementsCommand) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlPreparedStatements, value was %s: %w", inputVal, err)
}
mb.metricMysqlPreparedStatements.recordDataPoint(mb.startTime, ts, val, preparedStatementsCommandAttributeValue.String())
return nil
}
// RecordMysqlQueryClientCountDataPoint adds a data point to mysql.query.client.count metric.
func (mb *MetricsBuilder) RecordMysqlQueryClientCountDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlQueryClientCount, value was %s: %w", inputVal, err)
}
mb.metricMysqlQueryClientCount.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordMysqlQueryCountDataPoint adds a data point to mysql.query.count metric.
func (mb *MetricsBuilder) RecordMysqlQueryCountDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlQueryCount, value was %s: %w", inputVal, err)
}
mb.metricMysqlQueryCount.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordMysqlQuerySlowCountDataPoint adds a data point to mysql.query.slow.count metric.
func (mb *MetricsBuilder) RecordMysqlQuerySlowCountDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlQuerySlowCount, value was %s: %w", inputVal, err)
}
mb.metricMysqlQuerySlowCount.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordMysqlReplicaSQLDelayDataPoint adds a data point to mysql.replica.sql_delay metric.
func (mb *MetricsBuilder) RecordMysqlReplicaSQLDelayDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricMysqlReplicaSQLDelay.recordDataPoint(mb.startTime, ts, val)
}
// RecordMysqlReplicaTimeBehindSourceDataPoint adds a data point to mysql.replica.time_behind_source metric.
func (mb *MetricsBuilder) RecordMysqlReplicaTimeBehindSourceDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricMysqlReplicaTimeBehindSource.recordDataPoint(mb.startTime, ts, val)
}
// RecordMysqlRowLocksDataPoint adds a data point to mysql.row_locks metric.
func (mb *MetricsBuilder) RecordMysqlRowLocksDataPoint(ts pcommon.Timestamp, inputVal string, rowLocksAttributeValue AttributeRowLocks) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlRowLocks, value was %s: %w", inputVal, err)
}
mb.metricMysqlRowLocks.recordDataPoint(mb.startTime, ts, val, rowLocksAttributeValue.String())
return nil
}
// RecordMysqlRowOperationsDataPoint adds a data point to mysql.row_operations metric.
func (mb *MetricsBuilder) RecordMysqlRowOperationsDataPoint(ts pcommon.Timestamp, inputVal string, rowOperationsAttributeValue AttributeRowOperations) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlRowOperations, value was %s: %w", inputVal, err)
}
mb.metricMysqlRowOperations.recordDataPoint(mb.startTime, ts, val, rowOperationsAttributeValue.String())
return nil
}
// RecordMysqlSortsDataPoint adds a data point to mysql.sorts metric.
func (mb *MetricsBuilder) RecordMysqlSortsDataPoint(ts pcommon.Timestamp, inputVal string, sortsAttributeValue AttributeSorts) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlSorts, value was %s: %w", inputVal, err)
}
mb.metricMysqlSorts.recordDataPoint(mb.startTime, ts, val, sortsAttributeValue.String())
return nil
}
// RecordMysqlStatementEventCountDataPoint adds a data point to mysql.statement_event.count metric.
func (mb *MetricsBuilder) RecordMysqlStatementEventCountDataPoint(ts pcommon.Timestamp, val int64, schemaAttributeValue string, digestAttributeValue string, digestTextAttributeValue string, eventStateAttributeValue AttributeEventState) {
mb.metricMysqlStatementEventCount.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue, digestAttributeValue, digestTextAttributeValue, eventStateAttributeValue.String())
}
// RecordMysqlStatementEventWaitTimeDataPoint adds a data point to mysql.statement_event.wait.time metric.
func (mb *MetricsBuilder) RecordMysqlStatementEventWaitTimeDataPoint(ts pcommon.Timestamp, val int64, schemaAttributeValue string, digestAttributeValue string, digestTextAttributeValue string) {
mb.metricMysqlStatementEventWaitTime.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue, digestAttributeValue, digestTextAttributeValue)
}
// RecordMysqlTableAverageRowLengthDataPoint adds a data point to mysql.table.average_row_length metric.
func (mb *MetricsBuilder) RecordMysqlTableAverageRowLengthDataPoint(ts pcommon.Timestamp, val int64, tableNameAttributeValue string, schemaAttributeValue string) {
mb.metricMysqlTableAverageRowLength.recordDataPoint(mb.startTime, ts, val, tableNameAttributeValue, schemaAttributeValue)
}
// RecordMysqlTableIoWaitCountDataPoint adds a data point to mysql.table.io.wait.count metric.
func (mb *MetricsBuilder) RecordMysqlTableIoWaitCountDataPoint(ts pcommon.Timestamp, val int64, ioWaitsOperationsAttributeValue AttributeIoWaitsOperations, tableNameAttributeValue string, schemaAttributeValue string) {
mb.metricMysqlTableIoWaitCount.recordDataPoint(mb.startTime, ts, val, ioWaitsOperationsAttributeValue.String(), tableNameAttributeValue, schemaAttributeValue)
}
// RecordMysqlTableIoWaitTimeDataPoint adds a data point to mysql.table.io.wait.time metric.
func (mb *MetricsBuilder) RecordMysqlTableIoWaitTimeDataPoint(ts pcommon.Timestamp, val int64, ioWaitsOperationsAttributeValue AttributeIoWaitsOperations, tableNameAttributeValue string, schemaAttributeValue string) {
mb.metricMysqlTableIoWaitTime.recordDataPoint(mb.startTime, ts, val, ioWaitsOperationsAttributeValue.String(), tableNameAttributeValue, schemaAttributeValue)
}
// RecordMysqlTableLockWaitReadCountDataPoint adds a data point to mysql.table.lock_wait.read.count metric.
func (mb *MetricsBuilder) RecordMysqlTableLockWaitReadCountDataPoint(ts pcommon.Timestamp, val int64, schemaAttributeValue string, tableNameAttributeValue string, readLockTypeAttributeValue AttributeReadLockType) {
mb.metricMysqlTableLockWaitReadCount.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue, tableNameAttributeValue, readLockTypeAttributeValue.String())
}
// RecordMysqlTableLockWaitReadTimeDataPoint adds a data point to mysql.table.lock_wait.read.time metric.
func (mb *MetricsBuilder) RecordMysqlTableLockWaitReadTimeDataPoint(ts pcommon.Timestamp, val int64, schemaAttributeValue string, tableNameAttributeValue string, readLockTypeAttributeValue AttributeReadLockType) {
mb.metricMysqlTableLockWaitReadTime.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue, tableNameAttributeValue, readLockTypeAttributeValue.String())
}
// RecordMysqlTableLockWaitWriteCountDataPoint adds a data point to mysql.table.lock_wait.write.count metric.
func (mb *MetricsBuilder) RecordMysqlTableLockWaitWriteCountDataPoint(ts pcommon.Timestamp, val int64, schemaAttributeValue string, tableNameAttributeValue string, writeLockTypeAttributeValue AttributeWriteLockType) {
mb.metricMysqlTableLockWaitWriteCount.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue, tableNameAttributeValue, writeLockTypeAttributeValue.String())
}
// RecordMysqlTableLockWaitWriteTimeDataPoint adds a data point to mysql.table.lock_wait.write.time metric.
func (mb *MetricsBuilder) RecordMysqlTableLockWaitWriteTimeDataPoint(ts pcommon.Timestamp, val int64, schemaAttributeValue string, tableNameAttributeValue string, writeLockTypeAttributeValue AttributeWriteLockType) {
mb.metricMysqlTableLockWaitWriteTime.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue, tableNameAttributeValue, writeLockTypeAttributeValue.String())
}
// RecordMysqlTableRowsDataPoint adds a data point to mysql.table.rows metric.
func (mb *MetricsBuilder) RecordMysqlTableRowsDataPoint(ts pcommon.Timestamp, val int64, tableNameAttributeValue string, schemaAttributeValue string) {
mb.metricMysqlTableRows.recordDataPoint(mb.startTime, ts, val, tableNameAttributeValue, schemaAttributeValue)
}
// RecordMysqlTableSizeDataPoint adds a data point to mysql.table.size metric.
func (mb *MetricsBuilder) RecordMysqlTableSizeDataPoint(ts pcommon.Timestamp, val int64, tableNameAttributeValue string, schemaAttributeValue string, tableSizeTypeAttributeValue AttributeTableSizeType) {
mb.metricMysqlTableSize.recordDataPoint(mb.startTime, ts, val, tableNameAttributeValue, schemaAttributeValue, tableSizeTypeAttributeValue.String())
}
// RecordMysqlTableOpenCacheDataPoint adds a data point to mysql.table_open_cache metric.
func (mb *MetricsBuilder) RecordMysqlTableOpenCacheDataPoint(ts pcommon.Timestamp, inputVal string, cacheStatusAttributeValue AttributeCacheStatus) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlTableOpenCache, value was %s: %w", inputVal, err)
}
mb.metricMysqlTableOpenCache.recordDataPoint(mb.startTime, ts, val, cacheStatusAttributeValue.String())
return nil
}
// RecordMysqlThreadsDataPoint adds a data point to mysql.threads metric.
func (mb *MetricsBuilder) RecordMysqlThreadsDataPoint(ts pcommon.Timestamp, inputVal string, threadsAttributeValue AttributeThreads) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlThreads, value was %s: %w", inputVal, err)
}
mb.metricMysqlThreads.recordDataPoint(mb.startTime, ts, val, threadsAttributeValue.String())
return nil
}
// RecordMysqlTmpResourcesDataPoint adds a data point to mysql.tmp_resources metric.
func (mb *MetricsBuilder) RecordMysqlTmpResourcesDataPoint(ts pcommon.Timestamp, inputVal string, tmpResourceAttributeValue AttributeTmpResource) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlTmpResources, value was %s: %w", inputVal, err)
}
mb.metricMysqlTmpResources.recordDataPoint(mb.startTime, ts, val, tmpResourceAttributeValue.String())
return nil
}
// RecordMysqlUptimeDataPoint adds a data point to mysql.uptime metric.
func (mb *MetricsBuilder) RecordMysqlUptimeDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for MysqlUptime, value was %s: %w", inputVal, err)
}
mb.metricMysqlUptime.recordDataPoint(mb.startTime, ts, val)
return nil
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) {
mb.startTime = pcommon.NewTimestampFromTime(time.Now())
for _, op := range options {
op.apply(mb)
}
}