receiver/postgresqlreceiver/internal/metadata/generated_metrics.go (2,103 lines of code) (raw):

// Code generated by mdatagen. DO NOT EDIT. package metadata import ( "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/filter" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" ) // AttributeBgBufferSource specifies the value bg_buffer_source attribute. type AttributeBgBufferSource int const ( _ AttributeBgBufferSource = iota AttributeBgBufferSourceBackend AttributeBgBufferSourceBackendFsync AttributeBgBufferSourceCheckpoints AttributeBgBufferSourceBgwriter ) // String returns the string representation of the AttributeBgBufferSource. func (av AttributeBgBufferSource) String() string { switch av { case AttributeBgBufferSourceBackend: return "backend" case AttributeBgBufferSourceBackendFsync: return "backend_fsync" case AttributeBgBufferSourceCheckpoints: return "checkpoints" case AttributeBgBufferSourceBgwriter: return "bgwriter" } return "" } // MapAttributeBgBufferSource is a helper map of string to AttributeBgBufferSource attribute value. var MapAttributeBgBufferSource = map[string]AttributeBgBufferSource{ "backend": AttributeBgBufferSourceBackend, "backend_fsync": AttributeBgBufferSourceBackendFsync, "checkpoints": AttributeBgBufferSourceCheckpoints, "bgwriter": AttributeBgBufferSourceBgwriter, } // AttributeBgCheckpointType specifies the value bg_checkpoint_type attribute. type AttributeBgCheckpointType int const ( _ AttributeBgCheckpointType = iota AttributeBgCheckpointTypeRequested AttributeBgCheckpointTypeScheduled ) // String returns the string representation of the AttributeBgCheckpointType. func (av AttributeBgCheckpointType) String() string { switch av { case AttributeBgCheckpointTypeRequested: return "requested" case AttributeBgCheckpointTypeScheduled: return "scheduled" } return "" } // MapAttributeBgCheckpointType is a helper map of string to AttributeBgCheckpointType attribute value. var MapAttributeBgCheckpointType = map[string]AttributeBgCheckpointType{ "requested": AttributeBgCheckpointTypeRequested, "scheduled": AttributeBgCheckpointTypeScheduled, } // AttributeBgDurationType specifies the value bg_duration_type attribute. type AttributeBgDurationType int const ( _ AttributeBgDurationType = iota AttributeBgDurationTypeSync AttributeBgDurationTypeWrite ) // String returns the string representation of the AttributeBgDurationType. func (av AttributeBgDurationType) String() string { switch av { case AttributeBgDurationTypeSync: return "sync" case AttributeBgDurationTypeWrite: return "write" } return "" } // MapAttributeBgDurationType is a helper map of string to AttributeBgDurationType attribute value. var MapAttributeBgDurationType = map[string]AttributeBgDurationType{ "sync": AttributeBgDurationTypeSync, "write": AttributeBgDurationTypeWrite, } // AttributeOperation specifies the value operation attribute. type AttributeOperation int const ( _ AttributeOperation = iota AttributeOperationIns AttributeOperationUpd AttributeOperationDel AttributeOperationHotUpd ) // String returns the string representation of the AttributeOperation. func (av AttributeOperation) String() string { switch av { case AttributeOperationIns: return "ins" case AttributeOperationUpd: return "upd" case AttributeOperationDel: return "del" case AttributeOperationHotUpd: return "hot_upd" } return "" } // MapAttributeOperation is a helper map of string to AttributeOperation attribute value. var MapAttributeOperation = map[string]AttributeOperation{ "ins": AttributeOperationIns, "upd": AttributeOperationUpd, "del": AttributeOperationDel, "hot_upd": AttributeOperationHotUpd, } // AttributeSource specifies the value source attribute. type AttributeSource int const ( _ AttributeSource = iota AttributeSourceHeapRead AttributeSourceHeapHit AttributeSourceIdxRead AttributeSourceIdxHit AttributeSourceToastRead AttributeSourceToastHit AttributeSourceTidxRead AttributeSourceTidxHit ) // String returns the string representation of the AttributeSource. func (av AttributeSource) String() string { switch av { case AttributeSourceHeapRead: return "heap_read" case AttributeSourceHeapHit: return "heap_hit" case AttributeSourceIdxRead: return "idx_read" case AttributeSourceIdxHit: return "idx_hit" case AttributeSourceToastRead: return "toast_read" case AttributeSourceToastHit: return "toast_hit" case AttributeSourceTidxRead: return "tidx_read" case AttributeSourceTidxHit: return "tidx_hit" } return "" } // MapAttributeSource is a helper map of string to AttributeSource attribute value. var MapAttributeSource = map[string]AttributeSource{ "heap_read": AttributeSourceHeapRead, "heap_hit": AttributeSourceHeapHit, "idx_read": AttributeSourceIdxRead, "idx_hit": AttributeSourceIdxHit, "toast_read": AttributeSourceToastRead, "toast_hit": AttributeSourceToastHit, "tidx_read": AttributeSourceTidxRead, "tidx_hit": AttributeSourceTidxHit, } // AttributeState specifies the value state attribute. type AttributeState int const ( _ AttributeState = iota AttributeStateDead AttributeStateLive ) // String returns the string representation of the AttributeState. func (av AttributeState) String() string { switch av { case AttributeStateDead: return "dead" case AttributeStateLive: return "live" } return "" } // MapAttributeState is a helper map of string to AttributeState attribute value. var MapAttributeState = map[string]AttributeState{ "dead": AttributeStateDead, "live": AttributeStateLive, } // AttributeWalOperationLag specifies the value wal_operation_lag attribute. type AttributeWalOperationLag int const ( _ AttributeWalOperationLag = iota AttributeWalOperationLagFlush AttributeWalOperationLagReplay AttributeWalOperationLagWrite ) // String returns the string representation of the AttributeWalOperationLag. func (av AttributeWalOperationLag) String() string { switch av { case AttributeWalOperationLagFlush: return "flush" case AttributeWalOperationLagReplay: return "replay" case AttributeWalOperationLagWrite: return "write" } return "" } // MapAttributeWalOperationLag is a helper map of string to AttributeWalOperationLag attribute value. var MapAttributeWalOperationLag = map[string]AttributeWalOperationLag{ "flush": AttributeWalOperationLagFlush, "replay": AttributeWalOperationLagReplay, "write": AttributeWalOperationLagWrite, } var MetricsInfo = metricsInfo{ PostgresqlBackends: metricInfo{ Name: "postgresql.backends", }, PostgresqlBgwriterBuffersAllocated: metricInfo{ Name: "postgresql.bgwriter.buffers.allocated", }, PostgresqlBgwriterBuffersWrites: metricInfo{ Name: "postgresql.bgwriter.buffers.writes", }, PostgresqlBgwriterCheckpointCount: metricInfo{ Name: "postgresql.bgwriter.checkpoint.count", }, PostgresqlBgwriterDuration: metricInfo{ Name: "postgresql.bgwriter.duration", }, PostgresqlBgwriterMaxwritten: metricInfo{ Name: "postgresql.bgwriter.maxwritten", }, PostgresqlBlksHit: metricInfo{ Name: "postgresql.blks_hit", }, PostgresqlBlksRead: metricInfo{ Name: "postgresql.blks_read", }, PostgresqlBlocksRead: metricInfo{ Name: "postgresql.blocks_read", }, PostgresqlCommits: metricInfo{ Name: "postgresql.commits", }, PostgresqlConnectionMax: metricInfo{ Name: "postgresql.connection.max", }, PostgresqlDatabaseCount: metricInfo{ Name: "postgresql.database.count", }, PostgresqlDatabaseLocks: metricInfo{ Name: "postgresql.database.locks", }, PostgresqlDbSize: metricInfo{ Name: "postgresql.db_size", }, PostgresqlDeadlocks: metricInfo{ Name: "postgresql.deadlocks", }, PostgresqlIndexScans: metricInfo{ Name: "postgresql.index.scans", }, PostgresqlIndexSize: metricInfo{ Name: "postgresql.index.size", }, PostgresqlOperations: metricInfo{ Name: "postgresql.operations", }, PostgresqlReplicationDataDelay: metricInfo{ Name: "postgresql.replication.data_delay", }, PostgresqlRollbacks: metricInfo{ Name: "postgresql.rollbacks", }, PostgresqlRows: metricInfo{ Name: "postgresql.rows", }, PostgresqlSequentialScans: metricInfo{ Name: "postgresql.sequential_scans", }, PostgresqlTableCount: metricInfo{ Name: "postgresql.table.count", }, PostgresqlTableSize: metricInfo{ Name: "postgresql.table.size", }, PostgresqlTableVacuumCount: metricInfo{ Name: "postgresql.table.vacuum.count", }, PostgresqlTempFiles: metricInfo{ Name: "postgresql.temp_files", }, PostgresqlTupDeleted: metricInfo{ Name: "postgresql.tup_deleted", }, PostgresqlTupFetched: metricInfo{ Name: "postgresql.tup_fetched", }, PostgresqlTupInserted: metricInfo{ Name: "postgresql.tup_inserted", }, PostgresqlTupReturned: metricInfo{ Name: "postgresql.tup_returned", }, PostgresqlTupUpdated: metricInfo{ Name: "postgresql.tup_updated", }, PostgresqlWalAge: metricInfo{ Name: "postgresql.wal.age", }, PostgresqlWalDelay: metricInfo{ Name: "postgresql.wal.delay", }, PostgresqlWalLag: metricInfo{ Name: "postgresql.wal.lag", }, } type metricsInfo struct { PostgresqlBackends metricInfo PostgresqlBgwriterBuffersAllocated metricInfo PostgresqlBgwriterBuffersWrites metricInfo PostgresqlBgwriterCheckpointCount metricInfo PostgresqlBgwriterDuration metricInfo PostgresqlBgwriterMaxwritten metricInfo PostgresqlBlksHit metricInfo PostgresqlBlksRead metricInfo PostgresqlBlocksRead metricInfo PostgresqlCommits metricInfo PostgresqlConnectionMax metricInfo PostgresqlDatabaseCount metricInfo PostgresqlDatabaseLocks metricInfo PostgresqlDbSize metricInfo PostgresqlDeadlocks metricInfo PostgresqlIndexScans metricInfo PostgresqlIndexSize metricInfo PostgresqlOperations metricInfo PostgresqlReplicationDataDelay metricInfo PostgresqlRollbacks metricInfo PostgresqlRows metricInfo PostgresqlSequentialScans metricInfo PostgresqlTableCount metricInfo PostgresqlTableSize metricInfo PostgresqlTableVacuumCount metricInfo PostgresqlTempFiles metricInfo PostgresqlTupDeleted metricInfo PostgresqlTupFetched metricInfo PostgresqlTupInserted metricInfo PostgresqlTupReturned metricInfo PostgresqlTupUpdated metricInfo PostgresqlWalAge metricInfo PostgresqlWalDelay metricInfo PostgresqlWalLag metricInfo } type metricInfo struct { Name string } type metricPostgresqlBackends struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.backends metric with initial data. func (m *metricPostgresqlBackends) init() { m.data.SetName("postgresql.backends") m.data.SetDescription("The number of backends.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlBackends) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlBackends) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlBackends) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlBackends(cfg MetricConfig) metricPostgresqlBackends { m := metricPostgresqlBackends{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlBgwriterBuffersAllocated struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.bgwriter.buffers.allocated metric with initial data. func (m *metricPostgresqlBgwriterBuffersAllocated) init() { m.data.SetName("postgresql.bgwriter.buffers.allocated") m.data.SetDescription("Number of buffers allocated.") m.data.SetUnit("{buffers}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlBgwriterBuffersAllocated) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlBgwriterBuffersAllocated) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlBgwriterBuffersAllocated) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlBgwriterBuffersAllocated(cfg MetricConfig) metricPostgresqlBgwriterBuffersAllocated { m := metricPostgresqlBgwriterBuffersAllocated{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlBgwriterBuffersWrites struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.bgwriter.buffers.writes metric with initial data. func (m *metricPostgresqlBgwriterBuffersWrites) init() { m.data.SetName("postgresql.bgwriter.buffers.writes") m.data.SetDescription("Number of buffers written.") m.data.SetUnit("{buffers}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricPostgresqlBgwriterBuffersWrites) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, bgBufferSourceAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("source", bgBufferSourceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlBgwriterBuffersWrites) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlBgwriterBuffersWrites) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlBgwriterBuffersWrites(cfg MetricConfig) metricPostgresqlBgwriterBuffersWrites { m := metricPostgresqlBgwriterBuffersWrites{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlBgwriterCheckpointCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.bgwriter.checkpoint.count metric with initial data. func (m *metricPostgresqlBgwriterCheckpointCount) init() { m.data.SetName("postgresql.bgwriter.checkpoint.count") m.data.SetDescription("The number of checkpoints performed.") m.data.SetUnit("{checkpoints}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricPostgresqlBgwriterCheckpointCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, bgCheckpointTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("type", bgCheckpointTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlBgwriterCheckpointCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlBgwriterCheckpointCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlBgwriterCheckpointCount(cfg MetricConfig) metricPostgresqlBgwriterCheckpointCount { m := metricPostgresqlBgwriterCheckpointCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlBgwriterDuration struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.bgwriter.duration metric with initial data. func (m *metricPostgresqlBgwriterDuration) init() { m.data.SetName("postgresql.bgwriter.duration") m.data.SetDescription("Total time spent writing and syncing files to disk by checkpoints.") m.data.SetUnit("ms") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricPostgresqlBgwriterDuration) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, bgDurationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) dp.Attributes().PutStr("type", bgDurationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlBgwriterDuration) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlBgwriterDuration) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlBgwriterDuration(cfg MetricConfig) metricPostgresqlBgwriterDuration { m := metricPostgresqlBgwriterDuration{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlBgwriterMaxwritten struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.bgwriter.maxwritten metric with initial data. func (m *metricPostgresqlBgwriterMaxwritten) init() { m.data.SetName("postgresql.bgwriter.maxwritten") m.data.SetDescription("Number of times the background writer stopped a cleaning scan because it had written too many buffers.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlBgwriterMaxwritten) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlBgwriterMaxwritten) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlBgwriterMaxwritten) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlBgwriterMaxwritten(cfg MetricConfig) metricPostgresqlBgwriterMaxwritten { m := metricPostgresqlBgwriterMaxwritten{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlBlksHit struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.blks_hit metric with initial data. func (m *metricPostgresqlBlksHit) init() { m.data.SetName("postgresql.blks_hit") m.data.SetDescription("Number of times disk blocks were found already in the buffer cache.") m.data.SetUnit("{blks_hit}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlBlksHit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlBlksHit) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlBlksHit) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlBlksHit(cfg MetricConfig) metricPostgresqlBlksHit { m := metricPostgresqlBlksHit{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlBlksRead struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.blks_read metric with initial data. func (m *metricPostgresqlBlksRead) init() { m.data.SetName("postgresql.blks_read") m.data.SetDescription("Number of disk blocks read in this database.") m.data.SetUnit("{blks_read}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlBlksRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlBlksRead) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlBlksRead) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlBlksRead(cfg MetricConfig) metricPostgresqlBlksRead { m := metricPostgresqlBlksRead{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlBlocksRead struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.blocks_read metric with initial data. func (m *metricPostgresqlBlocksRead) init() { m.data.SetName("postgresql.blocks_read") m.data.SetDescription("The number of blocks read.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricPostgresqlBlocksRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sourceAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("source", sourceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlBlocksRead) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlBlocksRead) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlBlocksRead(cfg MetricConfig) metricPostgresqlBlocksRead { m := metricPostgresqlBlocksRead{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlCommits struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.commits metric with initial data. func (m *metricPostgresqlCommits) init() { m.data.SetName("postgresql.commits") m.data.SetDescription("The number of commits.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlCommits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlCommits) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlCommits) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlCommits(cfg MetricConfig) metricPostgresqlCommits { m := metricPostgresqlCommits{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlConnectionMax struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.connection.max metric with initial data. func (m *metricPostgresqlConnectionMax) init() { m.data.SetName("postgresql.connection.max") m.data.SetDescription("Configured maximum number of client connections allowed") m.data.SetUnit("{connections}") m.data.SetEmptyGauge() } func (m *metricPostgresqlConnectionMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlConnectionMax) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlConnectionMax) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlConnectionMax(cfg MetricConfig) metricPostgresqlConnectionMax { m := metricPostgresqlConnectionMax{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlDatabaseCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.database.count metric with initial data. func (m *metricPostgresqlDatabaseCount) init() { m.data.SetName("postgresql.database.count") m.data.SetDescription("Number of user databases.") m.data.SetUnit("{databases}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlDatabaseCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlDatabaseCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlDatabaseCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlDatabaseCount(cfg MetricConfig) metricPostgresqlDatabaseCount { m := metricPostgresqlDatabaseCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlDatabaseLocks struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.database.locks metric with initial data. func (m *metricPostgresqlDatabaseLocks) init() { m.data.SetName("postgresql.database.locks") m.data.SetDescription("The number of database locks.") m.data.SetUnit("{lock}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricPostgresqlDatabaseLocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationAttributeValue string, modeAttributeValue string, lockTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("relation", relationAttributeValue) dp.Attributes().PutStr("mode", modeAttributeValue) dp.Attributes().PutStr("lock_type", lockTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlDatabaseLocks) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlDatabaseLocks) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlDatabaseLocks(cfg MetricConfig) metricPostgresqlDatabaseLocks { m := metricPostgresqlDatabaseLocks{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlDbSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.db_size metric with initial data. func (m *metricPostgresqlDbSize) init() { m.data.SetName("postgresql.db_size") m.data.SetDescription("The database disk usage.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlDbSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlDbSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlDbSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlDbSize(cfg MetricConfig) metricPostgresqlDbSize { m := metricPostgresqlDbSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlDeadlocks struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.deadlocks metric with initial data. func (m *metricPostgresqlDeadlocks) init() { m.data.SetName("postgresql.deadlocks") m.data.SetDescription("The number of deadlocks.") m.data.SetUnit("{deadlock}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlDeadlocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlDeadlocks) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlDeadlocks) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlDeadlocks(cfg MetricConfig) metricPostgresqlDeadlocks { m := metricPostgresqlDeadlocks{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlIndexScans struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.index.scans metric with initial data. func (m *metricPostgresqlIndexScans) init() { m.data.SetName("postgresql.index.scans") m.data.SetDescription("The number of index scans on a table.") m.data.SetUnit("{scans}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlIndexScans) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlIndexScans) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlIndexScans) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlIndexScans(cfg MetricConfig) metricPostgresqlIndexScans { m := metricPostgresqlIndexScans{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlIndexSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.index.size metric with initial data. func (m *metricPostgresqlIndexSize) init() { m.data.SetName("postgresql.index.size") m.data.SetDescription("The size of the index on disk.") m.data.SetUnit("By") m.data.SetEmptyGauge() } func (m *metricPostgresqlIndexSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlIndexSize) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlIndexSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlIndexSize(cfg MetricConfig) metricPostgresqlIndexSize { m := metricPostgresqlIndexSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlOperations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.operations metric with initial data. func (m *metricPostgresqlOperations) init() { m.data.SetName("postgresql.operations") m.data.SetDescription("The number of db row operations.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricPostgresqlOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlOperations) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlOperations) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlOperations(cfg MetricConfig) metricPostgresqlOperations { m := metricPostgresqlOperations{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlReplicationDataDelay struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.replication.data_delay metric with initial data. func (m *metricPostgresqlReplicationDataDelay) init() { m.data.SetName("postgresql.replication.data_delay") m.data.SetDescription("The amount of data delayed in replication.") m.data.SetUnit("By") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricPostgresqlReplicationDataDelay) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, replicationClientAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("replication_client", replicationClientAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlReplicationDataDelay) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlReplicationDataDelay) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlReplicationDataDelay(cfg MetricConfig) metricPostgresqlReplicationDataDelay { m := metricPostgresqlReplicationDataDelay{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlRollbacks struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.rollbacks metric with initial data. func (m *metricPostgresqlRollbacks) init() { m.data.SetName("postgresql.rollbacks") m.data.SetDescription("The number of rollbacks.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlRollbacks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlRollbacks) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlRollbacks) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlRollbacks(cfg MetricConfig) metricPostgresqlRollbacks { m := metricPostgresqlRollbacks{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlRows struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.rows metric with initial data. func (m *metricPostgresqlRows) init() { m.data.SetName("postgresql.rows") m.data.SetDescription("The number of rows in the database.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricPostgresqlRows) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, stateAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlRows) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlRows) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlRows(cfg MetricConfig) metricPostgresqlRows { m := metricPostgresqlRows{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlSequentialScans struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.sequential_scans metric with initial data. func (m *metricPostgresqlSequentialScans) init() { m.data.SetName("postgresql.sequential_scans") m.data.SetDescription("The number of sequential scans.") m.data.SetUnit("{sequential_scan}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlSequentialScans) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlSequentialScans) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlSequentialScans) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlSequentialScans(cfg MetricConfig) metricPostgresqlSequentialScans { m := metricPostgresqlSequentialScans{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlTableCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.table.count metric with initial data. func (m *metricPostgresqlTableCount) init() { m.data.SetName("postgresql.table.count") m.data.SetDescription("Number of user tables in a database.") m.data.SetUnit("{table}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlTableCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlTableCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlTableCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlTableCount(cfg MetricConfig) metricPostgresqlTableCount { m := metricPostgresqlTableCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlTableSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.table.size metric with initial data. func (m *metricPostgresqlTableSize) init() { m.data.SetName("postgresql.table.size") m.data.SetDescription("Disk space used by a table.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlTableSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlTableSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlTableSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlTableSize(cfg MetricConfig) metricPostgresqlTableSize { m := metricPostgresqlTableSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlTableVacuumCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.table.vacuum.count metric with initial data. func (m *metricPostgresqlTableVacuumCount) init() { m.data.SetName("postgresql.table.vacuum.count") m.data.SetDescription("Number of times a table has manually been vacuumed.") m.data.SetUnit("{vacuums}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlTableVacuumCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlTableVacuumCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlTableVacuumCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlTableVacuumCount(cfg MetricConfig) metricPostgresqlTableVacuumCount { m := metricPostgresqlTableVacuumCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlTempFiles struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.temp_files metric with initial data. func (m *metricPostgresqlTempFiles) init() { m.data.SetName("postgresql.temp_files") m.data.SetDescription("The number of temp files.") m.data.SetUnit("{temp_file}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlTempFiles) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlTempFiles) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlTempFiles) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlTempFiles(cfg MetricConfig) metricPostgresqlTempFiles { m := metricPostgresqlTempFiles{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlTupDeleted struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.tup_deleted metric with initial data. func (m *metricPostgresqlTupDeleted) init() { m.data.SetName("postgresql.tup_deleted") m.data.SetDescription("Number of rows deleted by queries in the database.") m.data.SetUnit("{tup_deleted}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlTupDeleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlTupDeleted) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlTupDeleted) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlTupDeleted(cfg MetricConfig) metricPostgresqlTupDeleted { m := metricPostgresqlTupDeleted{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlTupFetched struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.tup_fetched metric with initial data. func (m *metricPostgresqlTupFetched) init() { m.data.SetName("postgresql.tup_fetched") m.data.SetDescription("Number of rows fetched by queries in the database.") m.data.SetUnit("{tup_fetched}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlTupFetched) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlTupFetched) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlTupFetched) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlTupFetched(cfg MetricConfig) metricPostgresqlTupFetched { m := metricPostgresqlTupFetched{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlTupInserted struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.tup_inserted metric with initial data. func (m *metricPostgresqlTupInserted) init() { m.data.SetName("postgresql.tup_inserted") m.data.SetDescription("Number of rows inserted by queries in the database.") m.data.SetUnit("{tup_inserted}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlTupInserted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlTupInserted) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlTupInserted) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlTupInserted(cfg MetricConfig) metricPostgresqlTupInserted { m := metricPostgresqlTupInserted{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlTupReturned struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.tup_returned metric with initial data. func (m *metricPostgresqlTupReturned) init() { m.data.SetName("postgresql.tup_returned") m.data.SetDescription("Number of rows returned by queries in the database.") m.data.SetUnit("{tup_returned}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlTupReturned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlTupReturned) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlTupReturned) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlTupReturned(cfg MetricConfig) metricPostgresqlTupReturned { m := metricPostgresqlTupReturned{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlTupUpdated struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.tup_updated metric with initial data. func (m *metricPostgresqlTupUpdated) init() { m.data.SetName("postgresql.tup_updated") m.data.SetDescription("Number of rows updated by queries in the database.") m.data.SetUnit("{tup_updated}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricPostgresqlTupUpdated) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlTupUpdated) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlTupUpdated) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlTupUpdated(cfg MetricConfig) metricPostgresqlTupUpdated { m := metricPostgresqlTupUpdated{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlWalAge struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.wal.age metric with initial data. func (m *metricPostgresqlWalAge) init() { m.data.SetName("postgresql.wal.age") m.data.SetDescription("Age of the oldest WAL file.") m.data.SetUnit("s") m.data.SetEmptyGauge() } func (m *metricPostgresqlWalAge) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlWalAge) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlWalAge) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlWalAge(cfg MetricConfig) metricPostgresqlWalAge { m := metricPostgresqlWalAge{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlWalDelay struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.wal.delay metric with initial data. func (m *metricPostgresqlWalDelay) init() { m.data.SetName("postgresql.wal.delay") m.data.SetDescription("Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it.") m.data.SetUnit("s") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricPostgresqlWalDelay) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, walOperationLagAttributeValue string, replicationClientAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) dp.Attributes().PutStr("operation", walOperationLagAttributeValue) dp.Attributes().PutStr("replication_client", replicationClientAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlWalDelay) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlWalDelay) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlWalDelay(cfg MetricConfig) metricPostgresqlWalDelay { m := metricPostgresqlWalDelay{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlWalLag struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills postgresql.wal.lag metric with initial data. func (m *metricPostgresqlWalLag) init() { m.data.SetName("postgresql.wal.lag") m.data.SetDescription("Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it.") m.data.SetUnit("s") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricPostgresqlWalLag) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, walOperationLagAttributeValue string, replicationClientAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("operation", walOperationLagAttributeValue) dp.Attributes().PutStr("replication_client", replicationClientAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricPostgresqlWalLag) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricPostgresqlWalLag) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricPostgresqlWalLag(cfg MetricConfig) metricPostgresqlWalLag { m := metricPostgresqlWalLag{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { config MetricsBuilderConfig // config of the metrics builder. startTime pcommon.Timestamp // start time that will be applied to all recorded data points. metricsCapacity int // maximum observed number of metrics per resource. metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. buildInfo component.BuildInfo // contains version information. resourceAttributeIncludeFilter map[string]filter.Filter resourceAttributeExcludeFilter map[string]filter.Filter metricPostgresqlBackends metricPostgresqlBackends metricPostgresqlBgwriterBuffersAllocated metricPostgresqlBgwriterBuffersAllocated metricPostgresqlBgwriterBuffersWrites metricPostgresqlBgwriterBuffersWrites metricPostgresqlBgwriterCheckpointCount metricPostgresqlBgwriterCheckpointCount metricPostgresqlBgwriterDuration metricPostgresqlBgwriterDuration metricPostgresqlBgwriterMaxwritten metricPostgresqlBgwriterMaxwritten metricPostgresqlBlksHit metricPostgresqlBlksHit metricPostgresqlBlksRead metricPostgresqlBlksRead metricPostgresqlBlocksRead metricPostgresqlBlocksRead metricPostgresqlCommits metricPostgresqlCommits metricPostgresqlConnectionMax metricPostgresqlConnectionMax metricPostgresqlDatabaseCount metricPostgresqlDatabaseCount metricPostgresqlDatabaseLocks metricPostgresqlDatabaseLocks metricPostgresqlDbSize metricPostgresqlDbSize metricPostgresqlDeadlocks metricPostgresqlDeadlocks metricPostgresqlIndexScans metricPostgresqlIndexScans metricPostgresqlIndexSize metricPostgresqlIndexSize metricPostgresqlOperations metricPostgresqlOperations metricPostgresqlReplicationDataDelay metricPostgresqlReplicationDataDelay metricPostgresqlRollbacks metricPostgresqlRollbacks metricPostgresqlRows metricPostgresqlRows metricPostgresqlSequentialScans metricPostgresqlSequentialScans metricPostgresqlTableCount metricPostgresqlTableCount metricPostgresqlTableSize metricPostgresqlTableSize metricPostgresqlTableVacuumCount metricPostgresqlTableVacuumCount metricPostgresqlTempFiles metricPostgresqlTempFiles metricPostgresqlTupDeleted metricPostgresqlTupDeleted metricPostgresqlTupFetched metricPostgresqlTupFetched metricPostgresqlTupInserted metricPostgresqlTupInserted metricPostgresqlTupReturned metricPostgresqlTupReturned metricPostgresqlTupUpdated metricPostgresqlTupUpdated metricPostgresqlWalAge metricPostgresqlWalAge metricPostgresqlWalDelay metricPostgresqlWalDelay metricPostgresqlWalLag metricPostgresqlWalLag } // MetricBuilderOption applies changes to default metrics builder. type MetricBuilderOption interface { apply(*MetricsBuilder) } type metricBuilderOptionFunc func(mb *MetricsBuilder) func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { mbof(mb) } // WithStartTime sets startTime on the metrics builder. func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { return metricBuilderOptionFunc(func(mb *MetricsBuilder) { mb.startTime = startTime }) } func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), metricsBuffer: pmetric.NewMetrics(), buildInfo: settings.BuildInfo, metricPostgresqlBackends: newMetricPostgresqlBackends(mbc.Metrics.PostgresqlBackends), metricPostgresqlBgwriterBuffersAllocated: newMetricPostgresqlBgwriterBuffersAllocated(mbc.Metrics.PostgresqlBgwriterBuffersAllocated), metricPostgresqlBgwriterBuffersWrites: newMetricPostgresqlBgwriterBuffersWrites(mbc.Metrics.PostgresqlBgwriterBuffersWrites), metricPostgresqlBgwriterCheckpointCount: newMetricPostgresqlBgwriterCheckpointCount(mbc.Metrics.PostgresqlBgwriterCheckpointCount), metricPostgresqlBgwriterDuration: newMetricPostgresqlBgwriterDuration(mbc.Metrics.PostgresqlBgwriterDuration), metricPostgresqlBgwriterMaxwritten: newMetricPostgresqlBgwriterMaxwritten(mbc.Metrics.PostgresqlBgwriterMaxwritten), metricPostgresqlBlksHit: newMetricPostgresqlBlksHit(mbc.Metrics.PostgresqlBlksHit), metricPostgresqlBlksRead: newMetricPostgresqlBlksRead(mbc.Metrics.PostgresqlBlksRead), metricPostgresqlBlocksRead: newMetricPostgresqlBlocksRead(mbc.Metrics.PostgresqlBlocksRead), metricPostgresqlCommits: newMetricPostgresqlCommits(mbc.Metrics.PostgresqlCommits), metricPostgresqlConnectionMax: newMetricPostgresqlConnectionMax(mbc.Metrics.PostgresqlConnectionMax), metricPostgresqlDatabaseCount: newMetricPostgresqlDatabaseCount(mbc.Metrics.PostgresqlDatabaseCount), metricPostgresqlDatabaseLocks: newMetricPostgresqlDatabaseLocks(mbc.Metrics.PostgresqlDatabaseLocks), metricPostgresqlDbSize: newMetricPostgresqlDbSize(mbc.Metrics.PostgresqlDbSize), metricPostgresqlDeadlocks: newMetricPostgresqlDeadlocks(mbc.Metrics.PostgresqlDeadlocks), metricPostgresqlIndexScans: newMetricPostgresqlIndexScans(mbc.Metrics.PostgresqlIndexScans), metricPostgresqlIndexSize: newMetricPostgresqlIndexSize(mbc.Metrics.PostgresqlIndexSize), metricPostgresqlOperations: newMetricPostgresqlOperations(mbc.Metrics.PostgresqlOperations), metricPostgresqlReplicationDataDelay: newMetricPostgresqlReplicationDataDelay(mbc.Metrics.PostgresqlReplicationDataDelay), metricPostgresqlRollbacks: newMetricPostgresqlRollbacks(mbc.Metrics.PostgresqlRollbacks), metricPostgresqlRows: newMetricPostgresqlRows(mbc.Metrics.PostgresqlRows), metricPostgresqlSequentialScans: newMetricPostgresqlSequentialScans(mbc.Metrics.PostgresqlSequentialScans), metricPostgresqlTableCount: newMetricPostgresqlTableCount(mbc.Metrics.PostgresqlTableCount), metricPostgresqlTableSize: newMetricPostgresqlTableSize(mbc.Metrics.PostgresqlTableSize), metricPostgresqlTableVacuumCount: newMetricPostgresqlTableVacuumCount(mbc.Metrics.PostgresqlTableVacuumCount), metricPostgresqlTempFiles: newMetricPostgresqlTempFiles(mbc.Metrics.PostgresqlTempFiles), metricPostgresqlTupDeleted: newMetricPostgresqlTupDeleted(mbc.Metrics.PostgresqlTupDeleted), metricPostgresqlTupFetched: newMetricPostgresqlTupFetched(mbc.Metrics.PostgresqlTupFetched), metricPostgresqlTupInserted: newMetricPostgresqlTupInserted(mbc.Metrics.PostgresqlTupInserted), metricPostgresqlTupReturned: newMetricPostgresqlTupReturned(mbc.Metrics.PostgresqlTupReturned), metricPostgresqlTupUpdated: newMetricPostgresqlTupUpdated(mbc.Metrics.PostgresqlTupUpdated), metricPostgresqlWalAge: newMetricPostgresqlWalAge(mbc.Metrics.PostgresqlWalAge), metricPostgresqlWalDelay: newMetricPostgresqlWalDelay(mbc.Metrics.PostgresqlWalDelay), metricPostgresqlWalLag: newMetricPostgresqlWalLag(mbc.Metrics.PostgresqlWalLag), resourceAttributeIncludeFilter: make(map[string]filter.Filter), resourceAttributeExcludeFilter: make(map[string]filter.Filter), } if mbc.ResourceAttributes.PostgresqlDatabaseName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["postgresql.database.name"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlDatabaseName.MetricsInclude) } if mbc.ResourceAttributes.PostgresqlDatabaseName.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["postgresql.database.name"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlDatabaseName.MetricsExclude) } if mbc.ResourceAttributes.PostgresqlIndexName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["postgresql.index.name"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlIndexName.MetricsInclude) } if mbc.ResourceAttributes.PostgresqlIndexName.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["postgresql.index.name"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlIndexName.MetricsExclude) } if mbc.ResourceAttributes.PostgresqlSchemaName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["postgresql.schema.name"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlSchemaName.MetricsInclude) } if mbc.ResourceAttributes.PostgresqlSchemaName.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["postgresql.schema.name"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlSchemaName.MetricsExclude) } if mbc.ResourceAttributes.PostgresqlTableName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["postgresql.table.name"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlTableName.MetricsInclude) } if mbc.ResourceAttributes.PostgresqlTableName.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["postgresql.table.name"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlTableName.MetricsExclude) } for _, op := range options { op.apply(mb) } return mb } // NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { return NewResourceBuilder(mb.config.ResourceAttributes) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } } // ResourceMetricsOption applies changes to provided resource metrics. type ResourceMetricsOption interface { apply(pmetric.ResourceMetrics) } type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { rmof(rm) } // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) }) } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { switch metrics.At(i).Type() { case pmetric.MetricTypeGauge: dps = metrics.At(i).Gauge().DataPoints() case pmetric.MetricTypeSum: dps = metrics.At(i).Sum().DataPoints() } for j := 0; j < dps.Len(); j++ { dps.At(j).SetStartTimestamp(start) } } }) } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName(ScopeName) ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricPostgresqlBackends.emit(ils.Metrics()) mb.metricPostgresqlBgwriterBuffersAllocated.emit(ils.Metrics()) mb.metricPostgresqlBgwriterBuffersWrites.emit(ils.Metrics()) mb.metricPostgresqlBgwriterCheckpointCount.emit(ils.Metrics()) mb.metricPostgresqlBgwriterDuration.emit(ils.Metrics()) mb.metricPostgresqlBgwriterMaxwritten.emit(ils.Metrics()) mb.metricPostgresqlBlksHit.emit(ils.Metrics()) mb.metricPostgresqlBlksRead.emit(ils.Metrics()) mb.metricPostgresqlBlocksRead.emit(ils.Metrics()) mb.metricPostgresqlCommits.emit(ils.Metrics()) mb.metricPostgresqlConnectionMax.emit(ils.Metrics()) mb.metricPostgresqlDatabaseCount.emit(ils.Metrics()) mb.metricPostgresqlDatabaseLocks.emit(ils.Metrics()) mb.metricPostgresqlDbSize.emit(ils.Metrics()) mb.metricPostgresqlDeadlocks.emit(ils.Metrics()) mb.metricPostgresqlIndexScans.emit(ils.Metrics()) mb.metricPostgresqlIndexSize.emit(ils.Metrics()) mb.metricPostgresqlOperations.emit(ils.Metrics()) mb.metricPostgresqlReplicationDataDelay.emit(ils.Metrics()) mb.metricPostgresqlRollbacks.emit(ils.Metrics()) mb.metricPostgresqlRows.emit(ils.Metrics()) mb.metricPostgresqlSequentialScans.emit(ils.Metrics()) mb.metricPostgresqlTableCount.emit(ils.Metrics()) mb.metricPostgresqlTableSize.emit(ils.Metrics()) mb.metricPostgresqlTableVacuumCount.emit(ils.Metrics()) mb.metricPostgresqlTempFiles.emit(ils.Metrics()) mb.metricPostgresqlTupDeleted.emit(ils.Metrics()) mb.metricPostgresqlTupFetched.emit(ils.Metrics()) mb.metricPostgresqlTupInserted.emit(ils.Metrics()) mb.metricPostgresqlTupReturned.emit(ils.Metrics()) mb.metricPostgresqlTupUpdated.emit(ils.Metrics()) mb.metricPostgresqlWalAge.emit(ils.Metrics()) mb.metricPostgresqlWalDelay.emit(ils.Metrics()) mb.metricPostgresqlWalLag.emit(ils.Metrics()) for _, op := range options { op.apply(rm) } for attr, filter := range mb.resourceAttributeIncludeFilter { if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) { return } } for attr, filter := range mb.resourceAttributeExcludeFilter { if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) { return } } if ils.Metrics().Len() > 0 { mb.updateCapacity(rm) rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) } } // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { mb.EmitForResource(options...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics } // RecordPostgresqlBackendsDataPoint adds a data point to postgresql.backends metric. func (mb *MetricsBuilder) RecordPostgresqlBackendsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlBackends.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlBgwriterBuffersAllocatedDataPoint adds a data point to postgresql.bgwriter.buffers.allocated metric. func (mb *MetricsBuilder) RecordPostgresqlBgwriterBuffersAllocatedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlBgwriterBuffersAllocated.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlBgwriterBuffersWritesDataPoint adds a data point to postgresql.bgwriter.buffers.writes metric. func (mb *MetricsBuilder) RecordPostgresqlBgwriterBuffersWritesDataPoint(ts pcommon.Timestamp, val int64, bgBufferSourceAttributeValue AttributeBgBufferSource) { mb.metricPostgresqlBgwriterBuffersWrites.recordDataPoint(mb.startTime, ts, val, bgBufferSourceAttributeValue.String()) } // RecordPostgresqlBgwriterCheckpointCountDataPoint adds a data point to postgresql.bgwriter.checkpoint.count metric. func (mb *MetricsBuilder) RecordPostgresqlBgwriterCheckpointCountDataPoint(ts pcommon.Timestamp, val int64, bgCheckpointTypeAttributeValue AttributeBgCheckpointType) { mb.metricPostgresqlBgwriterCheckpointCount.recordDataPoint(mb.startTime, ts, val, bgCheckpointTypeAttributeValue.String()) } // RecordPostgresqlBgwriterDurationDataPoint adds a data point to postgresql.bgwriter.duration metric. func (mb *MetricsBuilder) RecordPostgresqlBgwriterDurationDataPoint(ts pcommon.Timestamp, val float64, bgDurationTypeAttributeValue AttributeBgDurationType) { mb.metricPostgresqlBgwriterDuration.recordDataPoint(mb.startTime, ts, val, bgDurationTypeAttributeValue.String()) } // RecordPostgresqlBgwriterMaxwrittenDataPoint adds a data point to postgresql.bgwriter.maxwritten metric. func (mb *MetricsBuilder) RecordPostgresqlBgwriterMaxwrittenDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlBgwriterMaxwritten.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlBlksHitDataPoint adds a data point to postgresql.blks_hit metric. func (mb *MetricsBuilder) RecordPostgresqlBlksHitDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlBlksHit.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlBlksReadDataPoint adds a data point to postgresql.blks_read metric. func (mb *MetricsBuilder) RecordPostgresqlBlksReadDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlBlksRead.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlBlocksReadDataPoint adds a data point to postgresql.blocks_read metric. func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timestamp, val int64, sourceAttributeValue AttributeSource) { mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, sourceAttributeValue.String()) } // RecordPostgresqlCommitsDataPoint adds a data point to postgresql.commits metric. func (mb *MetricsBuilder) RecordPostgresqlCommitsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlCommits.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlConnectionMaxDataPoint adds a data point to postgresql.connection.max metric. func (mb *MetricsBuilder) RecordPostgresqlConnectionMaxDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlConnectionMax.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlDatabaseCountDataPoint adds a data point to postgresql.database.count metric. func (mb *MetricsBuilder) RecordPostgresqlDatabaseCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlDatabaseCount.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlDatabaseLocksDataPoint adds a data point to postgresql.database.locks metric. func (mb *MetricsBuilder) RecordPostgresqlDatabaseLocksDataPoint(ts pcommon.Timestamp, val int64, relationAttributeValue string, modeAttributeValue string, lockTypeAttributeValue string) { mb.metricPostgresqlDatabaseLocks.recordDataPoint(mb.startTime, ts, val, relationAttributeValue, modeAttributeValue, lockTypeAttributeValue) } // RecordPostgresqlDbSizeDataPoint adds a data point to postgresql.db_size metric. func (mb *MetricsBuilder) RecordPostgresqlDbSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlDbSize.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlDeadlocksDataPoint adds a data point to postgresql.deadlocks metric. func (mb *MetricsBuilder) RecordPostgresqlDeadlocksDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlDeadlocks.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlIndexScansDataPoint adds a data point to postgresql.index.scans metric. func (mb *MetricsBuilder) RecordPostgresqlIndexScansDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlIndexScans.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlIndexSizeDataPoint adds a data point to postgresql.index.size metric. func (mb *MetricsBuilder) RecordPostgresqlIndexSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlIndexSize.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlOperationsDataPoint adds a data point to postgresql.operations metric. func (mb *MetricsBuilder) RecordPostgresqlOperationsDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { mb.metricPostgresqlOperations.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } // RecordPostgresqlReplicationDataDelayDataPoint adds a data point to postgresql.replication.data_delay metric. func (mb *MetricsBuilder) RecordPostgresqlReplicationDataDelayDataPoint(ts pcommon.Timestamp, val int64, replicationClientAttributeValue string) { mb.metricPostgresqlReplicationDataDelay.recordDataPoint(mb.startTime, ts, val, replicationClientAttributeValue) } // RecordPostgresqlRollbacksDataPoint adds a data point to postgresql.rollbacks metric. func (mb *MetricsBuilder) RecordPostgresqlRollbacksDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlRollbacks.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlRowsDataPoint adds a data point to postgresql.rows metric. func (mb *MetricsBuilder) RecordPostgresqlRowsDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue AttributeState) { mb.metricPostgresqlRows.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String()) } // RecordPostgresqlSequentialScansDataPoint adds a data point to postgresql.sequential_scans metric. func (mb *MetricsBuilder) RecordPostgresqlSequentialScansDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlSequentialScans.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlTableCountDataPoint adds a data point to postgresql.table.count metric. func (mb *MetricsBuilder) RecordPostgresqlTableCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTableCount.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlTableSizeDataPoint adds a data point to postgresql.table.size metric. func (mb *MetricsBuilder) RecordPostgresqlTableSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTableSize.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlTableVacuumCountDataPoint adds a data point to postgresql.table.vacuum.count metric. func (mb *MetricsBuilder) RecordPostgresqlTableVacuumCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTableVacuumCount.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlTempFilesDataPoint adds a data point to postgresql.temp_files metric. func (mb *MetricsBuilder) RecordPostgresqlTempFilesDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTempFiles.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlTupDeletedDataPoint adds a data point to postgresql.tup_deleted metric. func (mb *MetricsBuilder) RecordPostgresqlTupDeletedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTupDeleted.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlTupFetchedDataPoint adds a data point to postgresql.tup_fetched metric. func (mb *MetricsBuilder) RecordPostgresqlTupFetchedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTupFetched.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlTupInsertedDataPoint adds a data point to postgresql.tup_inserted metric. func (mb *MetricsBuilder) RecordPostgresqlTupInsertedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTupInserted.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlTupReturnedDataPoint adds a data point to postgresql.tup_returned metric. func (mb *MetricsBuilder) RecordPostgresqlTupReturnedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTupReturned.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlTupUpdatedDataPoint adds a data point to postgresql.tup_updated metric. func (mb *MetricsBuilder) RecordPostgresqlTupUpdatedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTupUpdated.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlWalAgeDataPoint adds a data point to postgresql.wal.age metric. func (mb *MetricsBuilder) RecordPostgresqlWalAgeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlWalAge.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlWalDelayDataPoint adds a data point to postgresql.wal.delay metric. func (mb *MetricsBuilder) RecordPostgresqlWalDelayDataPoint(ts pcommon.Timestamp, val float64, walOperationLagAttributeValue AttributeWalOperationLag, replicationClientAttributeValue string) { mb.metricPostgresqlWalDelay.recordDataPoint(mb.startTime, ts, val, walOperationLagAttributeValue.String(), replicationClientAttributeValue) } // RecordPostgresqlWalLagDataPoint adds a data point to postgresql.wal.lag metric. func (mb *MetricsBuilder) RecordPostgresqlWalLagDataPoint(ts pcommon.Timestamp, val int64, walOperationLagAttributeValue AttributeWalOperationLag, replicationClientAttributeValue string) { mb.metricPostgresqlWalLag.recordDataPoint(mb.startTime, ts, val, walOperationLagAttributeValue.String(), replicationClientAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op.apply(mb) } }