components/otelopscol/receiver/mongodbreceiver/internal/metadata/generated_metrics.go (1,696 lines of code) (raw):

// Code generated by mdatagen. DO NOT EDIT. package metadata import ( "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/filter" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" ) // AttributeConnectionType specifies the value connection_type attribute. type AttributeConnectionType int const ( _ AttributeConnectionType = iota AttributeConnectionTypeActive AttributeConnectionTypeAvailable AttributeConnectionTypeCurrent ) // String returns the string representation of the AttributeConnectionType. func (av AttributeConnectionType) String() string { switch av { case AttributeConnectionTypeActive: return "active" case AttributeConnectionTypeAvailable: return "available" case AttributeConnectionTypeCurrent: return "current" } return "" } // MapAttributeConnectionType is a helper map of string to AttributeConnectionType attribute value. var MapAttributeConnectionType = map[string]AttributeConnectionType{ "active": AttributeConnectionTypeActive, "available": AttributeConnectionTypeAvailable, "current": AttributeConnectionTypeCurrent, } // AttributeLockMode specifies the value lock_mode attribute. type AttributeLockMode int const ( _ AttributeLockMode = iota AttributeLockModeShared AttributeLockModeExclusive AttributeLockModeIntentShared AttributeLockModeIntentExclusive ) // String returns the string representation of the AttributeLockMode. func (av AttributeLockMode) String() string { switch av { case AttributeLockModeShared: return "shared" case AttributeLockModeExclusive: return "exclusive" case AttributeLockModeIntentShared: return "intent_shared" case AttributeLockModeIntentExclusive: return "intent_exclusive" } return "" } // MapAttributeLockMode is a helper map of string to AttributeLockMode attribute value. var MapAttributeLockMode = map[string]AttributeLockMode{ "shared": AttributeLockModeShared, "exclusive": AttributeLockModeExclusive, "intent_shared": AttributeLockModeIntentShared, "intent_exclusive": AttributeLockModeIntentExclusive, } // AttributeLockType specifies the value lock_type attribute. type AttributeLockType int const ( _ AttributeLockType = iota AttributeLockTypeParallelBatchWriteMode AttributeLockTypeReplicationStateTransition AttributeLockTypeGlobal AttributeLockTypeDatabase AttributeLockTypeCollection AttributeLockTypeMutex AttributeLockTypeMetadata AttributeLockTypeOplog ) // String returns the string representation of the AttributeLockType. func (av AttributeLockType) String() string { switch av { case AttributeLockTypeParallelBatchWriteMode: return "parallel_batch_write_mode" case AttributeLockTypeReplicationStateTransition: return "replication_state_transition" case AttributeLockTypeGlobal: return "global" case AttributeLockTypeDatabase: return "database" case AttributeLockTypeCollection: return "collection" case AttributeLockTypeMutex: return "mutex" case AttributeLockTypeMetadata: return "metadata" case AttributeLockTypeOplog: return "oplog" } return "" } // MapAttributeLockType is a helper map of string to AttributeLockType attribute value. var MapAttributeLockType = map[string]AttributeLockType{ "parallel_batch_write_mode": AttributeLockTypeParallelBatchWriteMode, "replication_state_transition": AttributeLockTypeReplicationStateTransition, "global": AttributeLockTypeGlobal, "database": AttributeLockTypeDatabase, "collection": AttributeLockTypeCollection, "mutex": AttributeLockTypeMutex, "metadata": AttributeLockTypeMetadata, "oplog": AttributeLockTypeOplog, } // AttributeMemoryType specifies the value memory_type attribute. type AttributeMemoryType int const ( _ AttributeMemoryType = iota AttributeMemoryTypeResident AttributeMemoryTypeVirtual ) // String returns the string representation of the AttributeMemoryType. func (av AttributeMemoryType) String() string { switch av { case AttributeMemoryTypeResident: return "resident" case AttributeMemoryTypeVirtual: return "virtual" } return "" } // MapAttributeMemoryType is a helper map of string to AttributeMemoryType attribute value. var MapAttributeMemoryType = map[string]AttributeMemoryType{ "resident": AttributeMemoryTypeResident, "virtual": AttributeMemoryTypeVirtual, } // AttributeOperation specifies the value operation attribute. type AttributeOperation int const ( _ AttributeOperation = iota AttributeOperationInsert AttributeOperationQuery AttributeOperationUpdate AttributeOperationDelete AttributeOperationGetmore AttributeOperationCommand ) // String returns the string representation of the AttributeOperation. func (av AttributeOperation) String() string { switch av { case AttributeOperationInsert: return "insert" case AttributeOperationQuery: return "query" case AttributeOperationUpdate: return "update" case AttributeOperationDelete: return "delete" case AttributeOperationGetmore: return "getmore" case AttributeOperationCommand: return "command" } return "" } // MapAttributeOperation is a helper map of string to AttributeOperation attribute value. var MapAttributeOperation = map[string]AttributeOperation{ "insert": AttributeOperationInsert, "query": AttributeOperationQuery, "update": AttributeOperationUpdate, "delete": AttributeOperationDelete, "getmore": AttributeOperationGetmore, "command": AttributeOperationCommand, } // AttributeType specifies the value type attribute. type AttributeType int const ( _ AttributeType = iota AttributeTypeHit AttributeTypeMiss ) // String returns the string representation of the AttributeType. func (av AttributeType) String() string { switch av { case AttributeTypeHit: return "hit" case AttributeTypeMiss: return "miss" } return "" } // MapAttributeType is a helper map of string to AttributeType attribute value. var MapAttributeType = map[string]AttributeType{ "hit": AttributeTypeHit, "miss": AttributeTypeMiss, } var MetricsInfo = metricsInfo{ MongodbCacheOperations: metricInfo{ Name: "mongodb.cache.operations", }, MongodbCollectionCount: metricInfo{ Name: "mongodb.collection.count", }, MongodbConnectionCount: metricInfo{ Name: "mongodb.connection.count", }, MongodbCursorCount: metricInfo{ Name: "mongodb.cursor.count", }, MongodbCursorTimeoutCount: metricInfo{ Name: "mongodb.cursor.timeout.count", }, MongodbDataSize: metricInfo{ Name: "mongodb.data.size", }, MongodbDatabaseCount: metricInfo{ Name: "mongodb.database.count", }, MongodbDocumentOperationCount: metricInfo{ Name: "mongodb.document.operation.count", }, MongodbExtentCount: metricInfo{ Name: "mongodb.extent.count", }, MongodbGlobalLockTime: metricInfo{ Name: "mongodb.global_lock.time", }, MongodbIndexAccessCount: metricInfo{ Name: "mongodb.index.access.count", }, MongodbIndexCount: metricInfo{ Name: "mongodb.index.count", }, MongodbIndexSize: metricInfo{ Name: "mongodb.index.size", }, MongodbLockAcquireCount: metricInfo{ Name: "mongodb.lock.acquire.count", }, MongodbLockAcquireTime: metricInfo{ Name: "mongodb.lock.acquire.time", }, MongodbLockAcquireWaitCount: metricInfo{ Name: "mongodb.lock.acquire.wait_count", }, MongodbLockDeadlockCount: metricInfo{ Name: "mongodb.lock.deadlock.count", }, MongodbMemoryUsage: metricInfo{ Name: "mongodb.memory.usage", }, MongodbNetworkIoReceive: metricInfo{ Name: "mongodb.network.io.receive", }, MongodbNetworkIoTransmit: metricInfo{ Name: "mongodb.network.io.transmit", }, MongodbNetworkRequestCount: metricInfo{ Name: "mongodb.network.request.count", }, MongodbObjectCount: metricInfo{ Name: "mongodb.object.count", }, MongodbOperationCount: metricInfo{ Name: "mongodb.operation.count", }, MongodbOperationTime: metricInfo{ Name: "mongodb.operation.time", }, MongodbSessionCount: metricInfo{ Name: "mongodb.session.count", }, MongodbStorageSize: metricInfo{ Name: "mongodb.storage.size", }, } type metricsInfo struct { MongodbCacheOperations metricInfo MongodbCollectionCount metricInfo MongodbConnectionCount metricInfo MongodbCursorCount metricInfo MongodbCursorTimeoutCount metricInfo MongodbDataSize metricInfo MongodbDatabaseCount metricInfo MongodbDocumentOperationCount metricInfo MongodbExtentCount metricInfo MongodbGlobalLockTime metricInfo MongodbIndexAccessCount metricInfo MongodbIndexCount metricInfo MongodbIndexSize metricInfo MongodbLockAcquireCount metricInfo MongodbLockAcquireTime metricInfo MongodbLockAcquireWaitCount metricInfo MongodbLockDeadlockCount metricInfo MongodbMemoryUsage metricInfo MongodbNetworkIoReceive metricInfo MongodbNetworkIoTransmit metricInfo MongodbNetworkRequestCount metricInfo MongodbObjectCount metricInfo MongodbOperationCount metricInfo MongodbOperationTime metricInfo MongodbSessionCount metricInfo MongodbStorageSize metricInfo } type metricInfo struct { Name string } type metricMongodbCacheOperations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.cache.operations metric with initial data. func (m *metricMongodbCacheOperations) init() { m.data.SetName("mongodb.cache.operations") m.data.SetDescription("The number of cache operations of the instance.") m.data.SetUnit("{operations}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbCacheOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, typeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("type", typeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbCacheOperations) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbCacheOperations) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbCacheOperations(cfg MetricConfig) metricMongodbCacheOperations { m := metricMongodbCacheOperations{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbCollectionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.collection.count metric with initial data. func (m *metricMongodbCollectionCount) init() { m.data.SetName("mongodb.collection.count") m.data.SetDescription("The number of collections.") m.data.SetUnit("{collections}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbCollectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbCollectionCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbCollectionCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbCollectionCount(cfg MetricConfig) metricMongodbCollectionCount { m := metricMongodbCollectionCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbConnectionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.connection.count metric with initial data. func (m *metricMongodbConnectionCount) init() { m.data.SetName("mongodb.connection.count") m.data.SetDescription("The number of connections.") m.data.SetUnit("{connections}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, connectionTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) dp.Attributes().PutStr("type", connectionTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbConnectionCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbConnectionCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbConnectionCount(cfg MetricConfig) metricMongodbConnectionCount { m := metricMongodbConnectionCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbCursorCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.cursor.count metric with initial data. func (m *metricMongodbCursorCount) init() { m.data.SetName("mongodb.cursor.count") m.data.SetDescription("The number of open cursors maintained for clients.") m.data.SetUnit("{cursors}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricMongodbCursorCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbCursorCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbCursorCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbCursorCount(cfg MetricConfig) metricMongodbCursorCount { m := metricMongodbCursorCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbCursorTimeoutCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.cursor.timeout.count metric with initial data. func (m *metricMongodbCursorTimeoutCount) init() { m.data.SetName("mongodb.cursor.timeout.count") m.data.SetDescription("The number of cursors that have timed out.") m.data.SetUnit("{cursors}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricMongodbCursorTimeoutCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbCursorTimeoutCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbCursorTimeoutCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbCursorTimeoutCount(cfg MetricConfig) metricMongodbCursorTimeoutCount { m := metricMongodbCursorTimeoutCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbDataSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.data.size metric with initial data. func (m *metricMongodbDataSize) init() { m.data.SetName("mongodb.data.size") m.data.SetDescription("The size of the collection. Data compression does not affect this value.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbDataSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbDataSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbDataSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbDataSize(cfg MetricConfig) metricMongodbDataSize { m := metricMongodbDataSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbDatabaseCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.database.count metric with initial data. func (m *metricMongodbDatabaseCount) init() { m.data.SetName("mongodb.database.count") m.data.SetDescription("The number of existing databases.") m.data.SetUnit("{databases}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricMongodbDatabaseCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbDatabaseCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbDatabaseCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbDatabaseCount(cfg MetricConfig) metricMongodbDatabaseCount { m := metricMongodbDatabaseCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbDocumentOperationCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.document.operation.count metric with initial data. func (m *metricMongodbDocumentOperationCount) init() { m.data.SetName("mongodb.document.operation.count") m.data.SetDescription("The number of document operations executed.") m.data.SetUnit("{documents}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbDocumentOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, operationAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) dp.Attributes().PutStr("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbDocumentOperationCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbDocumentOperationCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbDocumentOperationCount(cfg MetricConfig) metricMongodbDocumentOperationCount { m := metricMongodbDocumentOperationCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbExtentCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.extent.count metric with initial data. func (m *metricMongodbExtentCount) init() { m.data.SetName("mongodb.extent.count") m.data.SetDescription("The number of extents.") m.data.SetUnit("{extents}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbExtentCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbExtentCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbExtentCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbExtentCount(cfg MetricConfig) metricMongodbExtentCount { m := metricMongodbExtentCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbGlobalLockTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.global_lock.time metric with initial data. func (m *metricMongodbGlobalLockTime) init() { m.data.SetName("mongodb.global_lock.time") m.data.SetDescription("The time the global lock has been held.") m.data.SetUnit("ms") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricMongodbGlobalLockTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbGlobalLockTime) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbGlobalLockTime) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbGlobalLockTime(cfg MetricConfig) metricMongodbGlobalLockTime { m := metricMongodbGlobalLockTime{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbIndexAccessCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.index.access.count metric with initial data. func (m *metricMongodbIndexAccessCount) init() { m.data.SetName("mongodb.index.access.count") m.data.SetDescription("The number of times an index has been accessed.") m.data.SetUnit("{accesses}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbIndexAccessCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, collectionAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) dp.Attributes().PutStr("collection", collectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbIndexAccessCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbIndexAccessCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbIndexAccessCount(cfg MetricConfig) metricMongodbIndexAccessCount { m := metricMongodbIndexAccessCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbIndexCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.index.count metric with initial data. func (m *metricMongodbIndexCount) init() { m.data.SetName("mongodb.index.count") m.data.SetDescription("The number of indexes.") m.data.SetUnit("{indexes}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbIndexCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbIndexCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbIndexCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbIndexCount(cfg MetricConfig) metricMongodbIndexCount { m := metricMongodbIndexCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbIndexSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.index.size metric with initial data. func (m *metricMongodbIndexSize) init() { m.data.SetName("mongodb.index.size") m.data.SetDescription("Sum of the space allocated to all indexes in the database, including free index space.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbIndexSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbIndexSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbIndexSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbIndexSize(cfg MetricConfig) metricMongodbIndexSize { m := metricMongodbIndexSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbLockAcquireCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.lock.acquire.count metric with initial data. func (m *metricMongodbLockAcquireCount) init() { m.data.SetName("mongodb.lock.acquire.count") m.data.SetDescription("Number of times the lock was acquired in the specified mode.") m.data.SetUnit("{count}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbLockAcquireCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue string, lockModeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) dp.Attributes().PutStr("lock_type", lockTypeAttributeValue) dp.Attributes().PutStr("lock_mode", lockModeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbLockAcquireCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbLockAcquireCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbLockAcquireCount(cfg MetricConfig) metricMongodbLockAcquireCount { m := metricMongodbLockAcquireCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbLockAcquireTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.lock.acquire.time metric with initial data. func (m *metricMongodbLockAcquireTime) init() { m.data.SetName("mongodb.lock.acquire.time") m.data.SetDescription("Cumulative wait time for the lock acquisitions.") m.data.SetUnit("microseconds") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbLockAcquireTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue string, lockModeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) dp.Attributes().PutStr("lock_type", lockTypeAttributeValue) dp.Attributes().PutStr("lock_mode", lockModeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbLockAcquireTime) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbLockAcquireTime) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbLockAcquireTime(cfg MetricConfig) metricMongodbLockAcquireTime { m := metricMongodbLockAcquireTime{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbLockAcquireWaitCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.lock.acquire.wait_count metric with initial data. func (m *metricMongodbLockAcquireWaitCount) init() { m.data.SetName("mongodb.lock.acquire.wait_count") m.data.SetDescription("Number of times the lock acquisitions encountered waits because the locks were held in a conflicting mode.") m.data.SetUnit("{count}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbLockAcquireWaitCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue string, lockModeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) dp.Attributes().PutStr("lock_type", lockTypeAttributeValue) dp.Attributes().PutStr("lock_mode", lockModeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbLockAcquireWaitCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbLockAcquireWaitCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbLockAcquireWaitCount(cfg MetricConfig) metricMongodbLockAcquireWaitCount { m := metricMongodbLockAcquireWaitCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbLockDeadlockCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.lock.deadlock.count metric with initial data. func (m *metricMongodbLockDeadlockCount) init() { m.data.SetName("mongodb.lock.deadlock.count") m.data.SetDescription("Number of times the lock acquisitions encountered deadlocks.") m.data.SetUnit("{count}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbLockDeadlockCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue string, lockModeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) dp.Attributes().PutStr("lock_type", lockTypeAttributeValue) dp.Attributes().PutStr("lock_mode", lockModeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbLockDeadlockCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbLockDeadlockCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbLockDeadlockCount(cfg MetricConfig) metricMongodbLockDeadlockCount { m := metricMongodbLockDeadlockCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbMemoryUsage struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.memory.usage metric with initial data. func (m *metricMongodbMemoryUsage) init() { m.data.SetName("mongodb.memory.usage") m.data.SetDescription("The amount of memory used.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, memoryTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) dp.Attributes().PutStr("type", memoryTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbMemoryUsage) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbMemoryUsage) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbMemoryUsage(cfg MetricConfig) metricMongodbMemoryUsage { m := metricMongodbMemoryUsage{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbNetworkIoReceive struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.network.io.receive metric with initial data. func (m *metricMongodbNetworkIoReceive) init() { m.data.SetName("mongodb.network.io.receive") m.data.SetDescription("The number of bytes received.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricMongodbNetworkIoReceive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbNetworkIoReceive) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbNetworkIoReceive) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbNetworkIoReceive(cfg MetricConfig) metricMongodbNetworkIoReceive { m := metricMongodbNetworkIoReceive{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbNetworkIoTransmit struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.network.io.transmit metric with initial data. func (m *metricMongodbNetworkIoTransmit) init() { m.data.SetName("mongodb.network.io.transmit") m.data.SetDescription("The number of by transmitted.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricMongodbNetworkIoTransmit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbNetworkIoTransmit) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbNetworkIoTransmit) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbNetworkIoTransmit(cfg MetricConfig) metricMongodbNetworkIoTransmit { m := metricMongodbNetworkIoTransmit{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbNetworkRequestCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.network.request.count metric with initial data. func (m *metricMongodbNetworkRequestCount) init() { m.data.SetName("mongodb.network.request.count") m.data.SetDescription("The number of requests received by the server.") m.data.SetUnit("{requests}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricMongodbNetworkRequestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbNetworkRequestCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbNetworkRequestCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbNetworkRequestCount(cfg MetricConfig) metricMongodbNetworkRequestCount { m := metricMongodbNetworkRequestCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbObjectCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.object.count metric with initial data. func (m *metricMongodbObjectCount) init() { m.data.SetName("mongodb.object.count") m.data.SetDescription("The number of objects.") m.data.SetUnit("{objects}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbObjectCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbObjectCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbObjectCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbObjectCount(cfg MetricConfig) metricMongodbObjectCount { m := metricMongodbObjectCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbOperationCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.operation.count metric with initial data. func (m *metricMongodbOperationCount) init() { m.data.SetName("mongodb.operation.count") m.data.SetDescription("The number of operations executed.") m.data.SetUnit("{operations}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbOperationCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbOperationCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbOperationCount(cfg MetricConfig) metricMongodbOperationCount { m := metricMongodbOperationCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbOperationTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.operation.time metric with initial data. func (m *metricMongodbOperationTime) init() { m.data.SetName("mongodb.operation.time") m.data.SetDescription("The total time spent performing operations.") m.data.SetUnit("ms") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbOperationTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbOperationTime) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbOperationTime) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbOperationTime(cfg MetricConfig) metricMongodbOperationTime { m := metricMongodbOperationTime{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbSessionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.session.count metric with initial data. func (m *metricMongodbSessionCount) init() { m.data.SetName("mongodb.session.count") m.data.SetDescription("The total number of active sessions.") m.data.SetUnit("{sessions}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricMongodbSessionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbSessionCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbSessionCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbSessionCount(cfg MetricConfig) metricMongodbSessionCount { m := metricMongodbSessionCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbStorageSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills mongodb.storage.size metric with initial data. func (m *metricMongodbStorageSize) init() { m.data.SetName("mongodb.storage.size") m.data.SetDescription("The total amount of storage allocated to this collection.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricMongodbStorageSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricMongodbStorageSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricMongodbStorageSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricMongodbStorageSize(cfg MetricConfig) metricMongodbStorageSize { m := metricMongodbStorageSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { config MetricsBuilderConfig // config of the metrics builder. startTime pcommon.Timestamp // start time that will be applied to all recorded data points. metricsCapacity int // maximum observed number of metrics per resource. metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. buildInfo component.BuildInfo // contains version information. resourceAttributeIncludeFilter map[string]filter.Filter resourceAttributeExcludeFilter map[string]filter.Filter metricMongodbCacheOperations metricMongodbCacheOperations metricMongodbCollectionCount metricMongodbCollectionCount metricMongodbConnectionCount metricMongodbConnectionCount metricMongodbCursorCount metricMongodbCursorCount metricMongodbCursorTimeoutCount metricMongodbCursorTimeoutCount metricMongodbDataSize metricMongodbDataSize metricMongodbDatabaseCount metricMongodbDatabaseCount metricMongodbDocumentOperationCount metricMongodbDocumentOperationCount metricMongodbExtentCount metricMongodbExtentCount metricMongodbGlobalLockTime metricMongodbGlobalLockTime metricMongodbIndexAccessCount metricMongodbIndexAccessCount metricMongodbIndexCount metricMongodbIndexCount metricMongodbIndexSize metricMongodbIndexSize metricMongodbLockAcquireCount metricMongodbLockAcquireCount metricMongodbLockAcquireTime metricMongodbLockAcquireTime metricMongodbLockAcquireWaitCount metricMongodbLockAcquireWaitCount metricMongodbLockDeadlockCount metricMongodbLockDeadlockCount metricMongodbMemoryUsage metricMongodbMemoryUsage metricMongodbNetworkIoReceive metricMongodbNetworkIoReceive metricMongodbNetworkIoTransmit metricMongodbNetworkIoTransmit metricMongodbNetworkRequestCount metricMongodbNetworkRequestCount metricMongodbObjectCount metricMongodbObjectCount metricMongodbOperationCount metricMongodbOperationCount metricMongodbOperationTime metricMongodbOperationTime metricMongodbSessionCount metricMongodbSessionCount metricMongodbStorageSize metricMongodbStorageSize } // MetricBuilderOption applies changes to default metrics builder. type MetricBuilderOption interface { apply(*MetricsBuilder) } type metricBuilderOptionFunc func(mb *MetricsBuilder) func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { mbof(mb) } // WithStartTime sets startTime on the metrics builder. func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { return metricBuilderOptionFunc(func(mb *MetricsBuilder) { mb.startTime = startTime }) } func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), metricsBuffer: pmetric.NewMetrics(), buildInfo: settings.BuildInfo, metricMongodbCacheOperations: newMetricMongodbCacheOperations(mbc.Metrics.MongodbCacheOperations), metricMongodbCollectionCount: newMetricMongodbCollectionCount(mbc.Metrics.MongodbCollectionCount), metricMongodbConnectionCount: newMetricMongodbConnectionCount(mbc.Metrics.MongodbConnectionCount), metricMongodbCursorCount: newMetricMongodbCursorCount(mbc.Metrics.MongodbCursorCount), metricMongodbCursorTimeoutCount: newMetricMongodbCursorTimeoutCount(mbc.Metrics.MongodbCursorTimeoutCount), metricMongodbDataSize: newMetricMongodbDataSize(mbc.Metrics.MongodbDataSize), metricMongodbDatabaseCount: newMetricMongodbDatabaseCount(mbc.Metrics.MongodbDatabaseCount), metricMongodbDocumentOperationCount: newMetricMongodbDocumentOperationCount(mbc.Metrics.MongodbDocumentOperationCount), metricMongodbExtentCount: newMetricMongodbExtentCount(mbc.Metrics.MongodbExtentCount), metricMongodbGlobalLockTime: newMetricMongodbGlobalLockTime(mbc.Metrics.MongodbGlobalLockTime), metricMongodbIndexAccessCount: newMetricMongodbIndexAccessCount(mbc.Metrics.MongodbIndexAccessCount), metricMongodbIndexCount: newMetricMongodbIndexCount(mbc.Metrics.MongodbIndexCount), metricMongodbIndexSize: newMetricMongodbIndexSize(mbc.Metrics.MongodbIndexSize), metricMongodbLockAcquireCount: newMetricMongodbLockAcquireCount(mbc.Metrics.MongodbLockAcquireCount), metricMongodbLockAcquireTime: newMetricMongodbLockAcquireTime(mbc.Metrics.MongodbLockAcquireTime), metricMongodbLockAcquireWaitCount: newMetricMongodbLockAcquireWaitCount(mbc.Metrics.MongodbLockAcquireWaitCount), metricMongodbLockDeadlockCount: newMetricMongodbLockDeadlockCount(mbc.Metrics.MongodbLockDeadlockCount), metricMongodbMemoryUsage: newMetricMongodbMemoryUsage(mbc.Metrics.MongodbMemoryUsage), metricMongodbNetworkIoReceive: newMetricMongodbNetworkIoReceive(mbc.Metrics.MongodbNetworkIoReceive), metricMongodbNetworkIoTransmit: newMetricMongodbNetworkIoTransmit(mbc.Metrics.MongodbNetworkIoTransmit), metricMongodbNetworkRequestCount: newMetricMongodbNetworkRequestCount(mbc.Metrics.MongodbNetworkRequestCount), metricMongodbObjectCount: newMetricMongodbObjectCount(mbc.Metrics.MongodbObjectCount), metricMongodbOperationCount: newMetricMongodbOperationCount(mbc.Metrics.MongodbOperationCount), metricMongodbOperationTime: newMetricMongodbOperationTime(mbc.Metrics.MongodbOperationTime), metricMongodbSessionCount: newMetricMongodbSessionCount(mbc.Metrics.MongodbSessionCount), metricMongodbStorageSize: newMetricMongodbStorageSize(mbc.Metrics.MongodbStorageSize), resourceAttributeIncludeFilter: make(map[string]filter.Filter), resourceAttributeExcludeFilter: make(map[string]filter.Filter), } if mbc.ResourceAttributes.Database.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["database"] = filter.CreateFilter(mbc.ResourceAttributes.Database.MetricsInclude) } if mbc.ResourceAttributes.Database.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["database"] = filter.CreateFilter(mbc.ResourceAttributes.Database.MetricsExclude) } for _, op := range options { op.apply(mb) } return mb } // NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { return NewResourceBuilder(mb.config.ResourceAttributes) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } } // ResourceMetricsOption applies changes to provided resource metrics. type ResourceMetricsOption interface { apply(pmetric.ResourceMetrics) } type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { rmof(rm) } // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) }) } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { switch metrics.At(i).Type() { case pmetric.MetricTypeGauge: dps = metrics.At(i).Gauge().DataPoints() case pmetric.MetricTypeSum: dps = metrics.At(i).Sum().DataPoints() } for j := 0; j < dps.Len(); j++ { dps.At(j).SetStartTimestamp(start) } } }) } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName(ScopeName) ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricMongodbCacheOperations.emit(ils.Metrics()) mb.metricMongodbCollectionCount.emit(ils.Metrics()) mb.metricMongodbConnectionCount.emit(ils.Metrics()) mb.metricMongodbCursorCount.emit(ils.Metrics()) mb.metricMongodbCursorTimeoutCount.emit(ils.Metrics()) mb.metricMongodbDataSize.emit(ils.Metrics()) mb.metricMongodbDatabaseCount.emit(ils.Metrics()) mb.metricMongodbDocumentOperationCount.emit(ils.Metrics()) mb.metricMongodbExtentCount.emit(ils.Metrics()) mb.metricMongodbGlobalLockTime.emit(ils.Metrics()) mb.metricMongodbIndexAccessCount.emit(ils.Metrics()) mb.metricMongodbIndexCount.emit(ils.Metrics()) mb.metricMongodbIndexSize.emit(ils.Metrics()) mb.metricMongodbLockAcquireCount.emit(ils.Metrics()) mb.metricMongodbLockAcquireTime.emit(ils.Metrics()) mb.metricMongodbLockAcquireWaitCount.emit(ils.Metrics()) mb.metricMongodbLockDeadlockCount.emit(ils.Metrics()) mb.metricMongodbMemoryUsage.emit(ils.Metrics()) mb.metricMongodbNetworkIoReceive.emit(ils.Metrics()) mb.metricMongodbNetworkIoTransmit.emit(ils.Metrics()) mb.metricMongodbNetworkRequestCount.emit(ils.Metrics()) mb.metricMongodbObjectCount.emit(ils.Metrics()) mb.metricMongodbOperationCount.emit(ils.Metrics()) mb.metricMongodbOperationTime.emit(ils.Metrics()) mb.metricMongodbSessionCount.emit(ils.Metrics()) mb.metricMongodbStorageSize.emit(ils.Metrics()) for _, op := range options { op.apply(rm) } for attr, filter := range mb.resourceAttributeIncludeFilter { if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) { return } } for attr, filter := range mb.resourceAttributeExcludeFilter { if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) { return } } if ils.Metrics().Len() > 0 { mb.updateCapacity(rm) rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) } } // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { mb.EmitForResource(options...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics } // RecordMongodbCacheOperationsDataPoint adds a data point to mongodb.cache.operations metric. func (mb *MetricsBuilder) RecordMongodbCacheOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue AttributeType) { mb.metricMongodbCacheOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue.String()) } // RecordMongodbCollectionCountDataPoint adds a data point to mongodb.collection.count metric. func (mb *MetricsBuilder) RecordMongodbCollectionCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbCollectionCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbConnectionCountDataPoint adds a data point to mongodb.connection.count metric. func (mb *MetricsBuilder) RecordMongodbConnectionCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, connectionTypeAttributeValue AttributeConnectionType) { mb.metricMongodbConnectionCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, connectionTypeAttributeValue.String()) } // RecordMongodbCursorCountDataPoint adds a data point to mongodb.cursor.count metric. func (mb *MetricsBuilder) RecordMongodbCursorCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbCursorCount.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbCursorTimeoutCountDataPoint adds a data point to mongodb.cursor.timeout.count metric. func (mb *MetricsBuilder) RecordMongodbCursorTimeoutCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbCursorTimeoutCount.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbDataSizeDataPoint adds a data point to mongodb.data.size metric. func (mb *MetricsBuilder) RecordMongodbDataSizeDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbDataSize.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbDatabaseCountDataPoint adds a data point to mongodb.database.count metric. func (mb *MetricsBuilder) RecordMongodbDatabaseCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbDatabaseCount.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbDocumentOperationCountDataPoint adds a data point to mongodb.document.operation.count metric. func (mb *MetricsBuilder) RecordMongodbDocumentOperationCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, operationAttributeValue AttributeOperation) { mb.metricMongodbDocumentOperationCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, operationAttributeValue.String()) } // RecordMongodbExtentCountDataPoint adds a data point to mongodb.extent.count metric. func (mb *MetricsBuilder) RecordMongodbExtentCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbExtentCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbGlobalLockTimeDataPoint adds a data point to mongodb.global_lock.time metric. func (mb *MetricsBuilder) RecordMongodbGlobalLockTimeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbGlobalLockTime.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbIndexAccessCountDataPoint adds a data point to mongodb.index.access.count metric. func (mb *MetricsBuilder) RecordMongodbIndexAccessCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, collectionAttributeValue string) { mb.metricMongodbIndexAccessCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, collectionAttributeValue) } // RecordMongodbIndexCountDataPoint adds a data point to mongodb.index.count metric. func (mb *MetricsBuilder) RecordMongodbIndexCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbIndexCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbIndexSizeDataPoint adds a data point to mongodb.index.size metric. func (mb *MetricsBuilder) RecordMongodbIndexSizeDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbIndexSize.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbLockAcquireCountDataPoint adds a data point to mongodb.lock.acquire.count metric. func (mb *MetricsBuilder) RecordMongodbLockAcquireCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue AttributeLockType, lockModeAttributeValue AttributeLockMode) { mb.metricMongodbLockAcquireCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, lockTypeAttributeValue.String(), lockModeAttributeValue.String()) } // RecordMongodbLockAcquireTimeDataPoint adds a data point to mongodb.lock.acquire.time metric. func (mb *MetricsBuilder) RecordMongodbLockAcquireTimeDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue AttributeLockType, lockModeAttributeValue AttributeLockMode) { mb.metricMongodbLockAcquireTime.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, lockTypeAttributeValue.String(), lockModeAttributeValue.String()) } // RecordMongodbLockAcquireWaitCountDataPoint adds a data point to mongodb.lock.acquire.wait_count metric. func (mb *MetricsBuilder) RecordMongodbLockAcquireWaitCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue AttributeLockType, lockModeAttributeValue AttributeLockMode) { mb.metricMongodbLockAcquireWaitCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, lockTypeAttributeValue.String(), lockModeAttributeValue.String()) } // RecordMongodbLockDeadlockCountDataPoint adds a data point to mongodb.lock.deadlock.count metric. func (mb *MetricsBuilder) RecordMongodbLockDeadlockCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, lockTypeAttributeValue AttributeLockType, lockModeAttributeValue AttributeLockMode) { mb.metricMongodbLockDeadlockCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, lockTypeAttributeValue.String(), lockModeAttributeValue.String()) } // RecordMongodbMemoryUsageDataPoint adds a data point to mongodb.memory.usage metric. func (mb *MetricsBuilder) RecordMongodbMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, memoryTypeAttributeValue AttributeMemoryType) { mb.metricMongodbMemoryUsage.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, memoryTypeAttributeValue.String()) } // RecordMongodbNetworkIoReceiveDataPoint adds a data point to mongodb.network.io.receive metric. func (mb *MetricsBuilder) RecordMongodbNetworkIoReceiveDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbNetworkIoReceive.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbNetworkIoTransmitDataPoint adds a data point to mongodb.network.io.transmit metric. func (mb *MetricsBuilder) RecordMongodbNetworkIoTransmitDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbNetworkIoTransmit.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbNetworkRequestCountDataPoint adds a data point to mongodb.network.request.count metric. func (mb *MetricsBuilder) RecordMongodbNetworkRequestCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbNetworkRequestCount.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbObjectCountDataPoint adds a data point to mongodb.object.count metric. func (mb *MetricsBuilder) RecordMongodbObjectCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbObjectCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbOperationCountDataPoint adds a data point to mongodb.operation.count metric. func (mb *MetricsBuilder) RecordMongodbOperationCountDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { mb.metricMongodbOperationCount.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } // RecordMongodbOperationTimeDataPoint adds a data point to mongodb.operation.time metric. func (mb *MetricsBuilder) RecordMongodbOperationTimeDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { mb.metricMongodbOperationTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } // RecordMongodbSessionCountDataPoint adds a data point to mongodb.session.count metric. func (mb *MetricsBuilder) RecordMongodbSessionCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbSessionCount.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbStorageSizeDataPoint adds a data point to mongodb.storage.size metric. func (mb *MetricsBuilder) RecordMongodbStorageSizeDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbStorageSize.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op.apply(mb) } }