receiver/aerospikereceiver/internal/metadata/generated_metrics.go (1,209 lines of code) (raw):

// Code generated by mdatagen. DO NOT EDIT. package metadata import ( "fmt" "strconv" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/filter" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" ) // AttributeConnectionOp specifies the value connection_op attribute. type AttributeConnectionOp int const ( _ AttributeConnectionOp = iota AttributeConnectionOpClose AttributeConnectionOpOpen ) // String returns the string representation of the AttributeConnectionOp. func (av AttributeConnectionOp) String() string { switch av { case AttributeConnectionOpClose: return "close" case AttributeConnectionOpOpen: return "open" } return "" } // MapAttributeConnectionOp is a helper map of string to AttributeConnectionOp attribute value. var MapAttributeConnectionOp = map[string]AttributeConnectionOp{ "close": AttributeConnectionOpClose, "open": AttributeConnectionOpOpen, } // AttributeConnectionType specifies the value connection_type attribute. type AttributeConnectionType int const ( _ AttributeConnectionType = iota AttributeConnectionTypeClient AttributeConnectionTypeFabric AttributeConnectionTypeHeartbeat ) // String returns the string representation of the AttributeConnectionType. func (av AttributeConnectionType) String() string { switch av { case AttributeConnectionTypeClient: return "client" case AttributeConnectionTypeFabric: return "fabric" case AttributeConnectionTypeHeartbeat: return "heartbeat" } return "" } // MapAttributeConnectionType is a helper map of string to AttributeConnectionType attribute value. var MapAttributeConnectionType = map[string]AttributeConnectionType{ "client": AttributeConnectionTypeClient, "fabric": AttributeConnectionTypeFabric, "heartbeat": AttributeConnectionTypeHeartbeat, } // AttributeIndexType specifies the value index_type attribute. type AttributeIndexType int const ( _ AttributeIndexType = iota AttributeIndexTypePrimary AttributeIndexTypeSecondary ) // String returns the string representation of the AttributeIndexType. func (av AttributeIndexType) String() string { switch av { case AttributeIndexTypePrimary: return "primary" case AttributeIndexTypeSecondary: return "secondary" } return "" } // MapAttributeIndexType is a helper map of string to AttributeIndexType attribute value. var MapAttributeIndexType = map[string]AttributeIndexType{ "primary": AttributeIndexTypePrimary, "secondary": AttributeIndexTypeSecondary, } // AttributeNamespaceComponent specifies the value namespace_component attribute. type AttributeNamespaceComponent int const ( _ AttributeNamespaceComponent = iota AttributeNamespaceComponentData AttributeNamespaceComponentIndex AttributeNamespaceComponentSetIndex AttributeNamespaceComponentSecondaryIndex ) // String returns the string representation of the AttributeNamespaceComponent. func (av AttributeNamespaceComponent) String() string { switch av { case AttributeNamespaceComponentData: return "data" case AttributeNamespaceComponentIndex: return "index" case AttributeNamespaceComponentSetIndex: return "set_index" case AttributeNamespaceComponentSecondaryIndex: return "secondary_index" } return "" } // MapAttributeNamespaceComponent is a helper map of string to AttributeNamespaceComponent attribute value. var MapAttributeNamespaceComponent = map[string]AttributeNamespaceComponent{ "data": AttributeNamespaceComponentData, "index": AttributeNamespaceComponentIndex, "set_index": AttributeNamespaceComponentSetIndex, "secondary_index": AttributeNamespaceComponentSecondaryIndex, } // AttributeQueryResult specifies the value query_result attribute. type AttributeQueryResult int const ( _ AttributeQueryResult = iota AttributeQueryResultAbort AttributeQueryResultComplete AttributeQueryResultError AttributeQueryResultTimeout ) // String returns the string representation of the AttributeQueryResult. func (av AttributeQueryResult) String() string { switch av { case AttributeQueryResultAbort: return "abort" case AttributeQueryResultComplete: return "complete" case AttributeQueryResultError: return "error" case AttributeQueryResultTimeout: return "timeout" } return "" } // MapAttributeQueryResult is a helper map of string to AttributeQueryResult attribute value. var MapAttributeQueryResult = map[string]AttributeQueryResult{ "abort": AttributeQueryResultAbort, "complete": AttributeQueryResultComplete, "error": AttributeQueryResultError, "timeout": AttributeQueryResultTimeout, } // AttributeQueryType specifies the value query_type attribute. type AttributeQueryType int const ( _ AttributeQueryType = iota AttributeQueryTypeAggregation AttributeQueryTypeBasic AttributeQueryTypeShort AttributeQueryTypeLongBasic AttributeQueryTypeShortBasic AttributeQueryTypeOpsBackground AttributeQueryTypeUdfBackground ) // String returns the string representation of the AttributeQueryType. func (av AttributeQueryType) String() string { switch av { case AttributeQueryTypeAggregation: return "aggregation" case AttributeQueryTypeBasic: return "basic" case AttributeQueryTypeShort: return "short" case AttributeQueryTypeLongBasic: return "long_basic" case AttributeQueryTypeShortBasic: return "short_basic" case AttributeQueryTypeOpsBackground: return "ops_background" case AttributeQueryTypeUdfBackground: return "udf_background" } return "" } // MapAttributeQueryType is a helper map of string to AttributeQueryType attribute value. var MapAttributeQueryType = map[string]AttributeQueryType{ "aggregation": AttributeQueryTypeAggregation, "basic": AttributeQueryTypeBasic, "short": AttributeQueryTypeShort, "long_basic": AttributeQueryTypeLongBasic, "short_basic": AttributeQueryTypeShortBasic, "ops_background": AttributeQueryTypeOpsBackground, "udf_background": AttributeQueryTypeUdfBackground, } // AttributeScanResult specifies the value scan_result attribute. type AttributeScanResult int const ( _ AttributeScanResult = iota AttributeScanResultAbort AttributeScanResultComplete AttributeScanResultError ) // String returns the string representation of the AttributeScanResult. func (av AttributeScanResult) String() string { switch av { case AttributeScanResultAbort: return "abort" case AttributeScanResultComplete: return "complete" case AttributeScanResultError: return "error" } return "" } // MapAttributeScanResult is a helper map of string to AttributeScanResult attribute value. var MapAttributeScanResult = map[string]AttributeScanResult{ "abort": AttributeScanResultAbort, "complete": AttributeScanResultComplete, "error": AttributeScanResultError, } // AttributeScanType specifies the value scan_type attribute. type AttributeScanType int const ( _ AttributeScanType = iota AttributeScanTypeAggregation AttributeScanTypeBasic AttributeScanTypeOpsBackground AttributeScanTypeUdfBackground ) // String returns the string representation of the AttributeScanType. func (av AttributeScanType) String() string { switch av { case AttributeScanTypeAggregation: return "aggregation" case AttributeScanTypeBasic: return "basic" case AttributeScanTypeOpsBackground: return "ops_background" case AttributeScanTypeUdfBackground: return "udf_background" } return "" } // MapAttributeScanType is a helper map of string to AttributeScanType attribute value. var MapAttributeScanType = map[string]AttributeScanType{ "aggregation": AttributeScanTypeAggregation, "basic": AttributeScanTypeBasic, "ops_background": AttributeScanTypeOpsBackground, "udf_background": AttributeScanTypeUdfBackground, } // AttributeTransactionResult specifies the value transaction_result attribute. type AttributeTransactionResult int const ( _ AttributeTransactionResult = iota AttributeTransactionResultError AttributeTransactionResultFilteredOut AttributeTransactionResultNotFound AttributeTransactionResultSuccess AttributeTransactionResultTimeout ) // String returns the string representation of the AttributeTransactionResult. func (av AttributeTransactionResult) String() string { switch av { case AttributeTransactionResultError: return "error" case AttributeTransactionResultFilteredOut: return "filtered_out" case AttributeTransactionResultNotFound: return "not_found" case AttributeTransactionResultSuccess: return "success" case AttributeTransactionResultTimeout: return "timeout" } return "" } // MapAttributeTransactionResult is a helper map of string to AttributeTransactionResult attribute value. var MapAttributeTransactionResult = map[string]AttributeTransactionResult{ "error": AttributeTransactionResultError, "filtered_out": AttributeTransactionResultFilteredOut, "not_found": AttributeTransactionResultNotFound, "success": AttributeTransactionResultSuccess, "timeout": AttributeTransactionResultTimeout, } // AttributeTransactionType specifies the value transaction_type attribute. type AttributeTransactionType int const ( _ AttributeTransactionType = iota AttributeTransactionTypeDelete AttributeTransactionTypeRead AttributeTransactionTypeUdf AttributeTransactionTypeWrite ) // String returns the string representation of the AttributeTransactionType. func (av AttributeTransactionType) String() string { switch av { case AttributeTransactionTypeDelete: return "delete" case AttributeTransactionTypeRead: return "read" case AttributeTransactionTypeUdf: return "udf" case AttributeTransactionTypeWrite: return "write" } return "" } // MapAttributeTransactionType is a helper map of string to AttributeTransactionType attribute value. var MapAttributeTransactionType = map[string]AttributeTransactionType{ "delete": AttributeTransactionTypeDelete, "read": AttributeTransactionTypeRead, "udf": AttributeTransactionTypeUdf, "write": AttributeTransactionTypeWrite, } var MetricsInfo = metricsInfo{ AerospikeNamespaceDiskAvailable: metricInfo{ Name: "aerospike.namespace.disk.available", }, AerospikeNamespaceGeojsonRegionQueryCells: metricInfo{ Name: "aerospike.namespace.geojson.region_query_cells", }, AerospikeNamespaceGeojsonRegionQueryFalsePositive: metricInfo{ Name: "aerospike.namespace.geojson.region_query_false_positive", }, AerospikeNamespaceGeojsonRegionQueryPoints: metricInfo{ Name: "aerospike.namespace.geojson.region_query_points", }, AerospikeNamespaceGeojsonRegionQueryRequests: metricInfo{ Name: "aerospike.namespace.geojson.region_query_requests", }, AerospikeNamespaceMemoryFree: metricInfo{ Name: "aerospike.namespace.memory.free", }, AerospikeNamespaceMemoryUsage: metricInfo{ Name: "aerospike.namespace.memory.usage", }, AerospikeNamespaceQueryCount: metricInfo{ Name: "aerospike.namespace.query.count", }, AerospikeNamespaceScanCount: metricInfo{ Name: "aerospike.namespace.scan.count", }, AerospikeNamespaceTransactionCount: metricInfo{ Name: "aerospike.namespace.transaction.count", }, AerospikeNodeConnectionCount: metricInfo{ Name: "aerospike.node.connection.count", }, AerospikeNodeConnectionOpen: metricInfo{ Name: "aerospike.node.connection.open", }, AerospikeNodeMemoryFree: metricInfo{ Name: "aerospike.node.memory.free", }, AerospikeNodeQueryTracked: metricInfo{ Name: "aerospike.node.query.tracked", }, } type metricsInfo struct { AerospikeNamespaceDiskAvailable metricInfo AerospikeNamespaceGeojsonRegionQueryCells metricInfo AerospikeNamespaceGeojsonRegionQueryFalsePositive metricInfo AerospikeNamespaceGeojsonRegionQueryPoints metricInfo AerospikeNamespaceGeojsonRegionQueryRequests metricInfo AerospikeNamespaceMemoryFree metricInfo AerospikeNamespaceMemoryUsage metricInfo AerospikeNamespaceQueryCount metricInfo AerospikeNamespaceScanCount metricInfo AerospikeNamespaceTransactionCount metricInfo AerospikeNodeConnectionCount metricInfo AerospikeNodeConnectionOpen metricInfo AerospikeNodeMemoryFree metricInfo AerospikeNodeQueryTracked metricInfo } type metricInfo struct { Name string } type metricAerospikeNamespaceDiskAvailable struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.namespace.disk.available metric with initial data. func (m *metricAerospikeNamespaceDiskAvailable) init() { m.data.SetName("aerospike.namespace.disk.available") m.data.SetDescription("Minimum percentage of contiguous disk space free to the namespace across all devices") m.data.SetUnit("%") m.data.SetEmptyGauge() } func (m *metricAerospikeNamespaceDiskAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNamespaceDiskAvailable) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNamespaceDiskAvailable) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNamespaceDiskAvailable(cfg MetricConfig) metricAerospikeNamespaceDiskAvailable { m := metricAerospikeNamespaceDiskAvailable{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNamespaceGeojsonRegionQueryCells struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.namespace.geojson.region_query_cells metric with initial data. func (m *metricAerospikeNamespaceGeojsonRegionQueryCells) init() { m.data.SetName("aerospike.namespace.geojson.region_query_cells") m.data.SetDescription("Number of cell coverings for query region queried") m.data.SetUnit("{cells}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricAerospikeNamespaceGeojsonRegionQueryCells) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNamespaceGeojsonRegionQueryCells) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNamespaceGeojsonRegionQueryCells) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNamespaceGeojsonRegionQueryCells(cfg MetricConfig) metricAerospikeNamespaceGeojsonRegionQueryCells { m := metricAerospikeNamespaceGeojsonRegionQueryCells{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNamespaceGeojsonRegionQueryFalsePositive struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.namespace.geojson.region_query_false_positive metric with initial data. func (m *metricAerospikeNamespaceGeojsonRegionQueryFalsePositive) init() { m.data.SetName("aerospike.namespace.geojson.region_query_false_positive") m.data.SetDescription("Number of points outside the region.") m.data.SetUnit("{points}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricAerospikeNamespaceGeojsonRegionQueryFalsePositive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNamespaceGeojsonRegionQueryFalsePositive) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNamespaceGeojsonRegionQueryFalsePositive) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNamespaceGeojsonRegionQueryFalsePositive(cfg MetricConfig) metricAerospikeNamespaceGeojsonRegionQueryFalsePositive { m := metricAerospikeNamespaceGeojsonRegionQueryFalsePositive{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNamespaceGeojsonRegionQueryPoints struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.namespace.geojson.region_query_points metric with initial data. func (m *metricAerospikeNamespaceGeojsonRegionQueryPoints) init() { m.data.SetName("aerospike.namespace.geojson.region_query_points") m.data.SetDescription("Number of points within the region.") m.data.SetUnit("{points}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricAerospikeNamespaceGeojsonRegionQueryPoints) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNamespaceGeojsonRegionQueryPoints) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNamespaceGeojsonRegionQueryPoints) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNamespaceGeojsonRegionQueryPoints(cfg MetricConfig) metricAerospikeNamespaceGeojsonRegionQueryPoints { m := metricAerospikeNamespaceGeojsonRegionQueryPoints{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNamespaceGeojsonRegionQueryRequests struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.namespace.geojson.region_query_requests metric with initial data. func (m *metricAerospikeNamespaceGeojsonRegionQueryRequests) init() { m.data.SetName("aerospike.namespace.geojson.region_query_requests") m.data.SetDescription("Number of geojson queries on the system since the uptime of the node.") m.data.SetUnit("{queries}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricAerospikeNamespaceGeojsonRegionQueryRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNamespaceGeojsonRegionQueryRequests) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNamespaceGeojsonRegionQueryRequests) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNamespaceGeojsonRegionQueryRequests(cfg MetricConfig) metricAerospikeNamespaceGeojsonRegionQueryRequests { m := metricAerospikeNamespaceGeojsonRegionQueryRequests{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNamespaceMemoryFree struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.namespace.memory.free metric with initial data. func (m *metricAerospikeNamespaceMemoryFree) init() { m.data.SetName("aerospike.namespace.memory.free") m.data.SetDescription("Percentage of the namespace's memory which is still free") m.data.SetUnit("%") m.data.SetEmptyGauge() } func (m *metricAerospikeNamespaceMemoryFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNamespaceMemoryFree) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNamespaceMemoryFree) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNamespaceMemoryFree(cfg MetricConfig) metricAerospikeNamespaceMemoryFree { m := metricAerospikeNamespaceMemoryFree{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNamespaceMemoryUsage struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.namespace.memory.usage metric with initial data. func (m *metricAerospikeNamespaceMemoryUsage) init() { m.data.SetName("aerospike.namespace.memory.usage") m.data.SetDescription("Memory currently used by each component of the namespace") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricAerospikeNamespaceMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, namespaceComponentAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("component", namespaceComponentAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNamespaceMemoryUsage) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNamespaceMemoryUsage) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNamespaceMemoryUsage(cfg MetricConfig) metricAerospikeNamespaceMemoryUsage { m := metricAerospikeNamespaceMemoryUsage{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNamespaceQueryCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.namespace.query.count metric with initial data. func (m *metricAerospikeNamespaceQueryCount) init() { m.data.SetName("aerospike.namespace.query.count") m.data.SetDescription("Number of query operations performed on the namespace") m.data.SetUnit("{queries}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricAerospikeNamespaceQueryCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryTypeAttributeValue string, indexTypeAttributeValue string, queryResultAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("type", queryTypeAttributeValue) dp.Attributes().PutStr("index", indexTypeAttributeValue) dp.Attributes().PutStr("result", queryResultAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNamespaceQueryCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNamespaceQueryCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNamespaceQueryCount(cfg MetricConfig) metricAerospikeNamespaceQueryCount { m := metricAerospikeNamespaceQueryCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNamespaceScanCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.namespace.scan.count metric with initial data. func (m *metricAerospikeNamespaceScanCount) init() { m.data.SetName("aerospike.namespace.scan.count") m.data.SetDescription("Number of scan operations performed on the namespace") m.data.SetUnit("{scans}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricAerospikeNamespaceScanCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, scanTypeAttributeValue string, scanResultAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("type", scanTypeAttributeValue) dp.Attributes().PutStr("result", scanResultAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNamespaceScanCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNamespaceScanCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNamespaceScanCount(cfg MetricConfig) metricAerospikeNamespaceScanCount { m := metricAerospikeNamespaceScanCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNamespaceTransactionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.namespace.transaction.count metric with initial data. func (m *metricAerospikeNamespaceTransactionCount) init() { m.data.SetName("aerospike.namespace.transaction.count") m.data.SetDescription("Number of transactions performed on the namespace") m.data.SetUnit("{transactions}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricAerospikeNamespaceTransactionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, transactionTypeAttributeValue string, transactionResultAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("type", transactionTypeAttributeValue) dp.Attributes().PutStr("result", transactionResultAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNamespaceTransactionCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNamespaceTransactionCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNamespaceTransactionCount(cfg MetricConfig) metricAerospikeNamespaceTransactionCount { m := metricAerospikeNamespaceTransactionCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNodeConnectionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.node.connection.count metric with initial data. func (m *metricAerospikeNodeConnectionCount) init() { m.data.SetName("aerospike.node.connection.count") m.data.SetDescription("Number of connections opened and closed to the node") m.data.SetUnit("{connections}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricAerospikeNodeConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, connectionTypeAttributeValue string, connectionOpAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("type", connectionTypeAttributeValue) dp.Attributes().PutStr("operation", connectionOpAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNodeConnectionCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNodeConnectionCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNodeConnectionCount(cfg MetricConfig) metricAerospikeNodeConnectionCount { m := metricAerospikeNodeConnectionCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNodeConnectionOpen struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.node.connection.open metric with initial data. func (m *metricAerospikeNodeConnectionOpen) init() { m.data.SetName("aerospike.node.connection.open") m.data.SetDescription("Current number of open connections to the node") m.data.SetUnit("{connections}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricAerospikeNodeConnectionOpen) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, connectionTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("type", connectionTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNodeConnectionOpen) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNodeConnectionOpen) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNodeConnectionOpen(cfg MetricConfig) metricAerospikeNodeConnectionOpen { m := metricAerospikeNodeConnectionOpen{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNodeMemoryFree struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.node.memory.free metric with initial data. func (m *metricAerospikeNodeMemoryFree) init() { m.data.SetName("aerospike.node.memory.free") m.data.SetDescription("Percentage of the node's memory which is still free") m.data.SetUnit("%") m.data.SetEmptyGauge() } func (m *metricAerospikeNodeMemoryFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNodeMemoryFree) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNodeMemoryFree) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNodeMemoryFree(cfg MetricConfig) metricAerospikeNodeMemoryFree { m := metricAerospikeNodeMemoryFree{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricAerospikeNodeQueryTracked struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills aerospike.node.query.tracked metric with initial data. func (m *metricAerospikeNodeQueryTracked) init() { m.data.SetName("aerospike.node.query.tracked") m.data.SetDescription("Number of queries tracked by the system.") m.data.SetUnit("{queries}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricAerospikeNodeQueryTracked) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricAerospikeNodeQueryTracked) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricAerospikeNodeQueryTracked) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricAerospikeNodeQueryTracked(cfg MetricConfig) metricAerospikeNodeQueryTracked { m := metricAerospikeNodeQueryTracked{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { config MetricsBuilderConfig // config of the metrics builder. startTime pcommon.Timestamp // start time that will be applied to all recorded data points. metricsCapacity int // maximum observed number of metrics per resource. metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. buildInfo component.BuildInfo // contains version information. resourceAttributeIncludeFilter map[string]filter.Filter resourceAttributeExcludeFilter map[string]filter.Filter metricAerospikeNamespaceDiskAvailable metricAerospikeNamespaceDiskAvailable metricAerospikeNamespaceGeojsonRegionQueryCells metricAerospikeNamespaceGeojsonRegionQueryCells metricAerospikeNamespaceGeojsonRegionQueryFalsePositive metricAerospikeNamespaceGeojsonRegionQueryFalsePositive metricAerospikeNamespaceGeojsonRegionQueryPoints metricAerospikeNamespaceGeojsonRegionQueryPoints metricAerospikeNamespaceGeojsonRegionQueryRequests metricAerospikeNamespaceGeojsonRegionQueryRequests metricAerospikeNamespaceMemoryFree metricAerospikeNamespaceMemoryFree metricAerospikeNamespaceMemoryUsage metricAerospikeNamespaceMemoryUsage metricAerospikeNamespaceQueryCount metricAerospikeNamespaceQueryCount metricAerospikeNamespaceScanCount metricAerospikeNamespaceScanCount metricAerospikeNamespaceTransactionCount metricAerospikeNamespaceTransactionCount metricAerospikeNodeConnectionCount metricAerospikeNodeConnectionCount metricAerospikeNodeConnectionOpen metricAerospikeNodeConnectionOpen metricAerospikeNodeMemoryFree metricAerospikeNodeMemoryFree metricAerospikeNodeQueryTracked metricAerospikeNodeQueryTracked } // MetricBuilderOption applies changes to default metrics builder. type MetricBuilderOption interface { apply(*MetricsBuilder) } type metricBuilderOptionFunc func(mb *MetricsBuilder) func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { mbof(mb) } // WithStartTime sets startTime on the metrics builder. func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { return metricBuilderOptionFunc(func(mb *MetricsBuilder) { mb.startTime = startTime }) } func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), metricsBuffer: pmetric.NewMetrics(), buildInfo: settings.BuildInfo, metricAerospikeNamespaceDiskAvailable: newMetricAerospikeNamespaceDiskAvailable(mbc.Metrics.AerospikeNamespaceDiskAvailable), metricAerospikeNamespaceGeojsonRegionQueryCells: newMetricAerospikeNamespaceGeojsonRegionQueryCells(mbc.Metrics.AerospikeNamespaceGeojsonRegionQueryCells), metricAerospikeNamespaceGeojsonRegionQueryFalsePositive: newMetricAerospikeNamespaceGeojsonRegionQueryFalsePositive(mbc.Metrics.AerospikeNamespaceGeojsonRegionQueryFalsePositive), metricAerospikeNamespaceGeojsonRegionQueryPoints: newMetricAerospikeNamespaceGeojsonRegionQueryPoints(mbc.Metrics.AerospikeNamespaceGeojsonRegionQueryPoints), metricAerospikeNamespaceGeojsonRegionQueryRequests: newMetricAerospikeNamespaceGeojsonRegionQueryRequests(mbc.Metrics.AerospikeNamespaceGeojsonRegionQueryRequests), metricAerospikeNamespaceMemoryFree: newMetricAerospikeNamespaceMemoryFree(mbc.Metrics.AerospikeNamespaceMemoryFree), metricAerospikeNamespaceMemoryUsage: newMetricAerospikeNamespaceMemoryUsage(mbc.Metrics.AerospikeNamespaceMemoryUsage), metricAerospikeNamespaceQueryCount: newMetricAerospikeNamespaceQueryCount(mbc.Metrics.AerospikeNamespaceQueryCount), metricAerospikeNamespaceScanCount: newMetricAerospikeNamespaceScanCount(mbc.Metrics.AerospikeNamespaceScanCount), metricAerospikeNamespaceTransactionCount: newMetricAerospikeNamespaceTransactionCount(mbc.Metrics.AerospikeNamespaceTransactionCount), metricAerospikeNodeConnectionCount: newMetricAerospikeNodeConnectionCount(mbc.Metrics.AerospikeNodeConnectionCount), metricAerospikeNodeConnectionOpen: newMetricAerospikeNodeConnectionOpen(mbc.Metrics.AerospikeNodeConnectionOpen), metricAerospikeNodeMemoryFree: newMetricAerospikeNodeMemoryFree(mbc.Metrics.AerospikeNodeMemoryFree), metricAerospikeNodeQueryTracked: newMetricAerospikeNodeQueryTracked(mbc.Metrics.AerospikeNodeQueryTracked), resourceAttributeIncludeFilter: make(map[string]filter.Filter), resourceAttributeExcludeFilter: make(map[string]filter.Filter), } if mbc.ResourceAttributes.AerospikeNamespace.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["aerospike.namespace"] = filter.CreateFilter(mbc.ResourceAttributes.AerospikeNamespace.MetricsInclude) } if mbc.ResourceAttributes.AerospikeNamespace.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["aerospike.namespace"] = filter.CreateFilter(mbc.ResourceAttributes.AerospikeNamespace.MetricsExclude) } if mbc.ResourceAttributes.AerospikeNodeName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["aerospike.node.name"] = filter.CreateFilter(mbc.ResourceAttributes.AerospikeNodeName.MetricsInclude) } if mbc.ResourceAttributes.AerospikeNodeName.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["aerospike.node.name"] = filter.CreateFilter(mbc.ResourceAttributes.AerospikeNodeName.MetricsExclude) } for _, op := range options { op.apply(mb) } return mb } // NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { return NewResourceBuilder(mb.config.ResourceAttributes) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } } // ResourceMetricsOption applies changes to provided resource metrics. type ResourceMetricsOption interface { apply(pmetric.ResourceMetrics) } type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { rmof(rm) } // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) }) } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { switch metrics.At(i).Type() { case pmetric.MetricTypeGauge: dps = metrics.At(i).Gauge().DataPoints() case pmetric.MetricTypeSum: dps = metrics.At(i).Sum().DataPoints() } for j := 0; j < dps.Len(); j++ { dps.At(j).SetStartTimestamp(start) } } }) } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName(ScopeName) ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricAerospikeNamespaceDiskAvailable.emit(ils.Metrics()) mb.metricAerospikeNamespaceGeojsonRegionQueryCells.emit(ils.Metrics()) mb.metricAerospikeNamespaceGeojsonRegionQueryFalsePositive.emit(ils.Metrics()) mb.metricAerospikeNamespaceGeojsonRegionQueryPoints.emit(ils.Metrics()) mb.metricAerospikeNamespaceGeojsonRegionQueryRequests.emit(ils.Metrics()) mb.metricAerospikeNamespaceMemoryFree.emit(ils.Metrics()) mb.metricAerospikeNamespaceMemoryUsage.emit(ils.Metrics()) mb.metricAerospikeNamespaceQueryCount.emit(ils.Metrics()) mb.metricAerospikeNamespaceScanCount.emit(ils.Metrics()) mb.metricAerospikeNamespaceTransactionCount.emit(ils.Metrics()) mb.metricAerospikeNodeConnectionCount.emit(ils.Metrics()) mb.metricAerospikeNodeConnectionOpen.emit(ils.Metrics()) mb.metricAerospikeNodeMemoryFree.emit(ils.Metrics()) mb.metricAerospikeNodeQueryTracked.emit(ils.Metrics()) for _, op := range options { op.apply(rm) } for attr, filter := range mb.resourceAttributeIncludeFilter { if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) { return } } for attr, filter := range mb.resourceAttributeExcludeFilter { if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) { return } } if ils.Metrics().Len() > 0 { mb.updateCapacity(rm) rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) } } // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { mb.EmitForResource(options...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics } // RecordAerospikeNamespaceDiskAvailableDataPoint adds a data point to aerospike.namespace.disk.available metric. func (mb *MetricsBuilder) RecordAerospikeNamespaceDiskAvailableDataPoint(ts pcommon.Timestamp, inputVal string) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNamespaceDiskAvailable, value was %s: %w", inputVal, err) } mb.metricAerospikeNamespaceDiskAvailable.recordDataPoint(mb.startTime, ts, val) return nil } // RecordAerospikeNamespaceGeojsonRegionQueryCellsDataPoint adds a data point to aerospike.namespace.geojson.region_query_cells metric. func (mb *MetricsBuilder) RecordAerospikeNamespaceGeojsonRegionQueryCellsDataPoint(ts pcommon.Timestamp, inputVal string) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNamespaceGeojsonRegionQueryCells, value was %s: %w", inputVal, err) } mb.metricAerospikeNamespaceGeojsonRegionQueryCells.recordDataPoint(mb.startTime, ts, val) return nil } // RecordAerospikeNamespaceGeojsonRegionQueryFalsePositiveDataPoint adds a data point to aerospike.namespace.geojson.region_query_false_positive metric. func (mb *MetricsBuilder) RecordAerospikeNamespaceGeojsonRegionQueryFalsePositiveDataPoint(ts pcommon.Timestamp, inputVal string) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNamespaceGeojsonRegionQueryFalsePositive, value was %s: %w", inputVal, err) } mb.metricAerospikeNamespaceGeojsonRegionQueryFalsePositive.recordDataPoint(mb.startTime, ts, val) return nil } // RecordAerospikeNamespaceGeojsonRegionQueryPointsDataPoint adds a data point to aerospike.namespace.geojson.region_query_points metric. func (mb *MetricsBuilder) RecordAerospikeNamespaceGeojsonRegionQueryPointsDataPoint(ts pcommon.Timestamp, inputVal string) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNamespaceGeojsonRegionQueryPoints, value was %s: %w", inputVal, err) } mb.metricAerospikeNamespaceGeojsonRegionQueryPoints.recordDataPoint(mb.startTime, ts, val) return nil } // RecordAerospikeNamespaceGeojsonRegionQueryRequestsDataPoint adds a data point to aerospike.namespace.geojson.region_query_requests metric. func (mb *MetricsBuilder) RecordAerospikeNamespaceGeojsonRegionQueryRequestsDataPoint(ts pcommon.Timestamp, inputVal string) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNamespaceGeojsonRegionQueryRequests, value was %s: %w", inputVal, err) } mb.metricAerospikeNamespaceGeojsonRegionQueryRequests.recordDataPoint(mb.startTime, ts, val) return nil } // RecordAerospikeNamespaceMemoryFreeDataPoint adds a data point to aerospike.namespace.memory.free metric. func (mb *MetricsBuilder) RecordAerospikeNamespaceMemoryFreeDataPoint(ts pcommon.Timestamp, inputVal string) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNamespaceMemoryFree, value was %s: %w", inputVal, err) } mb.metricAerospikeNamespaceMemoryFree.recordDataPoint(mb.startTime, ts, val) return nil } // RecordAerospikeNamespaceMemoryUsageDataPoint adds a data point to aerospike.namespace.memory.usage metric. func (mb *MetricsBuilder) RecordAerospikeNamespaceMemoryUsageDataPoint(ts pcommon.Timestamp, inputVal string, namespaceComponentAttributeValue AttributeNamespaceComponent) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNamespaceMemoryUsage, value was %s: %w", inputVal, err) } mb.metricAerospikeNamespaceMemoryUsage.recordDataPoint(mb.startTime, ts, val, namespaceComponentAttributeValue.String()) return nil } // RecordAerospikeNamespaceQueryCountDataPoint adds a data point to aerospike.namespace.query.count metric. func (mb *MetricsBuilder) RecordAerospikeNamespaceQueryCountDataPoint(ts pcommon.Timestamp, inputVal string, queryTypeAttributeValue AttributeQueryType, indexTypeAttributeValue AttributeIndexType, queryResultAttributeValue AttributeQueryResult) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNamespaceQueryCount, value was %s: %w", inputVal, err) } mb.metricAerospikeNamespaceQueryCount.recordDataPoint(mb.startTime, ts, val, queryTypeAttributeValue.String(), indexTypeAttributeValue.String(), queryResultAttributeValue.String()) return nil } // RecordAerospikeNamespaceScanCountDataPoint adds a data point to aerospike.namespace.scan.count metric. func (mb *MetricsBuilder) RecordAerospikeNamespaceScanCountDataPoint(ts pcommon.Timestamp, inputVal string, scanTypeAttributeValue AttributeScanType, scanResultAttributeValue AttributeScanResult) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNamespaceScanCount, value was %s: %w", inputVal, err) } mb.metricAerospikeNamespaceScanCount.recordDataPoint(mb.startTime, ts, val, scanTypeAttributeValue.String(), scanResultAttributeValue.String()) return nil } // RecordAerospikeNamespaceTransactionCountDataPoint adds a data point to aerospike.namespace.transaction.count metric. func (mb *MetricsBuilder) RecordAerospikeNamespaceTransactionCountDataPoint(ts pcommon.Timestamp, inputVal string, transactionTypeAttributeValue AttributeTransactionType, transactionResultAttributeValue AttributeTransactionResult) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNamespaceTransactionCount, value was %s: %w", inputVal, err) } mb.metricAerospikeNamespaceTransactionCount.recordDataPoint(mb.startTime, ts, val, transactionTypeAttributeValue.String(), transactionResultAttributeValue.String()) return nil } // RecordAerospikeNodeConnectionCountDataPoint adds a data point to aerospike.node.connection.count metric. func (mb *MetricsBuilder) RecordAerospikeNodeConnectionCountDataPoint(ts pcommon.Timestamp, inputVal string, connectionTypeAttributeValue AttributeConnectionType, connectionOpAttributeValue AttributeConnectionOp) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNodeConnectionCount, value was %s: %w", inputVal, err) } mb.metricAerospikeNodeConnectionCount.recordDataPoint(mb.startTime, ts, val, connectionTypeAttributeValue.String(), connectionOpAttributeValue.String()) return nil } // RecordAerospikeNodeConnectionOpenDataPoint adds a data point to aerospike.node.connection.open metric. func (mb *MetricsBuilder) RecordAerospikeNodeConnectionOpenDataPoint(ts pcommon.Timestamp, inputVal string, connectionTypeAttributeValue AttributeConnectionType) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNodeConnectionOpen, value was %s: %w", inputVal, err) } mb.metricAerospikeNodeConnectionOpen.recordDataPoint(mb.startTime, ts, val, connectionTypeAttributeValue.String()) return nil } // RecordAerospikeNodeMemoryFreeDataPoint adds a data point to aerospike.node.memory.free metric. func (mb *MetricsBuilder) RecordAerospikeNodeMemoryFreeDataPoint(ts pcommon.Timestamp, inputVal string) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNodeMemoryFree, value was %s: %w", inputVal, err) } mb.metricAerospikeNodeMemoryFree.recordDataPoint(mb.startTime, ts, val) return nil } // RecordAerospikeNodeQueryTrackedDataPoint adds a data point to aerospike.node.query.tracked metric. func (mb *MetricsBuilder) RecordAerospikeNodeQueryTrackedDataPoint(ts pcommon.Timestamp, inputVal string) error { val, err := strconv.ParseInt(inputVal, 10, 64) if err != nil { return fmt.Errorf("failed to parse int64 for AerospikeNodeQueryTracked, value was %s: %w", inputVal, err) } mb.metricAerospikeNodeQueryTracked.recordDataPoint(mb.startTime, ts, val) return nil } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op.apply(mb) } }