receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go (5,404 lines of code) (raw):

// Code generated by mdatagen. DO NOT EDIT. package metadata import ( "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/filter" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" ) // AttributeCacheName specifies the value cache_name attribute. type AttributeCacheName int const ( _ AttributeCacheName = iota AttributeCacheNameFielddata AttributeCacheNameQuery ) // String returns the string representation of the AttributeCacheName. func (av AttributeCacheName) String() string { switch av { case AttributeCacheNameFielddata: return "fielddata" case AttributeCacheNameQuery: return "query" } return "" } // MapAttributeCacheName is a helper map of string to AttributeCacheName attribute value. var MapAttributeCacheName = map[string]AttributeCacheName{ "fielddata": AttributeCacheNameFielddata, "query": AttributeCacheNameQuery, } // AttributeClusterPublishedDifferenceState specifies the value cluster_published_difference_state attribute. type AttributeClusterPublishedDifferenceState int const ( _ AttributeClusterPublishedDifferenceState = iota AttributeClusterPublishedDifferenceStateIncompatible AttributeClusterPublishedDifferenceStateCompatible ) // String returns the string representation of the AttributeClusterPublishedDifferenceState. func (av AttributeClusterPublishedDifferenceState) String() string { switch av { case AttributeClusterPublishedDifferenceStateIncompatible: return "incompatible" case AttributeClusterPublishedDifferenceStateCompatible: return "compatible" } return "" } // MapAttributeClusterPublishedDifferenceState is a helper map of string to AttributeClusterPublishedDifferenceState attribute value. var MapAttributeClusterPublishedDifferenceState = map[string]AttributeClusterPublishedDifferenceState{ "incompatible": AttributeClusterPublishedDifferenceStateIncompatible, "compatible": AttributeClusterPublishedDifferenceStateCompatible, } // AttributeClusterStateQueueState specifies the value cluster_state_queue_state attribute. type AttributeClusterStateQueueState int const ( _ AttributeClusterStateQueueState = iota AttributeClusterStateQueueStatePending AttributeClusterStateQueueStateCommitted ) // String returns the string representation of the AttributeClusterStateQueueState. func (av AttributeClusterStateQueueState) String() string { switch av { case AttributeClusterStateQueueStatePending: return "pending" case AttributeClusterStateQueueStateCommitted: return "committed" } return "" } // MapAttributeClusterStateQueueState is a helper map of string to AttributeClusterStateQueueState attribute value. var MapAttributeClusterStateQueueState = map[string]AttributeClusterStateQueueState{ "pending": AttributeClusterStateQueueStatePending, "committed": AttributeClusterStateQueueStateCommitted, } // AttributeClusterStateUpdateType specifies the value cluster_state_update_type attribute. type AttributeClusterStateUpdateType int const ( _ AttributeClusterStateUpdateType = iota AttributeClusterStateUpdateTypeComputation AttributeClusterStateUpdateTypeContextConstruction AttributeClusterStateUpdateTypeCommit AttributeClusterStateUpdateTypeCompletion AttributeClusterStateUpdateTypeMasterApply AttributeClusterStateUpdateTypeNotification ) // String returns the string representation of the AttributeClusterStateUpdateType. func (av AttributeClusterStateUpdateType) String() string { switch av { case AttributeClusterStateUpdateTypeComputation: return "computation" case AttributeClusterStateUpdateTypeContextConstruction: return "context_construction" case AttributeClusterStateUpdateTypeCommit: return "commit" case AttributeClusterStateUpdateTypeCompletion: return "completion" case AttributeClusterStateUpdateTypeMasterApply: return "master_apply" case AttributeClusterStateUpdateTypeNotification: return "notification" } return "" } // MapAttributeClusterStateUpdateType is a helper map of string to AttributeClusterStateUpdateType attribute value. var MapAttributeClusterStateUpdateType = map[string]AttributeClusterStateUpdateType{ "computation": AttributeClusterStateUpdateTypeComputation, "context_construction": AttributeClusterStateUpdateTypeContextConstruction, "commit": AttributeClusterStateUpdateTypeCommit, "completion": AttributeClusterStateUpdateTypeCompletion, "master_apply": AttributeClusterStateUpdateTypeMasterApply, "notification": AttributeClusterStateUpdateTypeNotification, } // AttributeDirection specifies the value direction attribute. type AttributeDirection int const ( _ AttributeDirection = iota AttributeDirectionReceived AttributeDirectionSent ) // String returns the string representation of the AttributeDirection. func (av AttributeDirection) String() string { switch av { case AttributeDirectionReceived: return "received" case AttributeDirectionSent: return "sent" } return "" } // MapAttributeDirection is a helper map of string to AttributeDirection attribute value. var MapAttributeDirection = map[string]AttributeDirection{ "received": AttributeDirectionReceived, "sent": AttributeDirectionSent, } // AttributeDocumentState specifies the value document_state attribute. type AttributeDocumentState int const ( _ AttributeDocumentState = iota AttributeDocumentStateActive AttributeDocumentStateDeleted ) // String returns the string representation of the AttributeDocumentState. func (av AttributeDocumentState) String() string { switch av { case AttributeDocumentStateActive: return "active" case AttributeDocumentStateDeleted: return "deleted" } return "" } // MapAttributeDocumentState is a helper map of string to AttributeDocumentState attribute value. var MapAttributeDocumentState = map[string]AttributeDocumentState{ "active": AttributeDocumentStateActive, "deleted": AttributeDocumentStateDeleted, } // AttributeGetResult specifies the value get_result attribute. type AttributeGetResult int const ( _ AttributeGetResult = iota AttributeGetResultHit AttributeGetResultMiss ) // String returns the string representation of the AttributeGetResult. func (av AttributeGetResult) String() string { switch av { case AttributeGetResultHit: return "hit" case AttributeGetResultMiss: return "miss" } return "" } // MapAttributeGetResult is a helper map of string to AttributeGetResult attribute value. var MapAttributeGetResult = map[string]AttributeGetResult{ "hit": AttributeGetResultHit, "miss": AttributeGetResultMiss, } // AttributeHealthStatus specifies the value health_status attribute. type AttributeHealthStatus int const ( _ AttributeHealthStatus = iota AttributeHealthStatusGreen AttributeHealthStatusYellow AttributeHealthStatusRed ) // String returns the string representation of the AttributeHealthStatus. func (av AttributeHealthStatus) String() string { switch av { case AttributeHealthStatusGreen: return "green" case AttributeHealthStatusYellow: return "yellow" case AttributeHealthStatusRed: return "red" } return "" } // MapAttributeHealthStatus is a helper map of string to AttributeHealthStatus attribute value. var MapAttributeHealthStatus = map[string]AttributeHealthStatus{ "green": AttributeHealthStatusGreen, "yellow": AttributeHealthStatusYellow, "red": AttributeHealthStatusRed, } // AttributeIndexAggregationType specifies the value index_aggregation_type attribute. type AttributeIndexAggregationType int const ( _ AttributeIndexAggregationType = iota AttributeIndexAggregationTypePrimaryShards AttributeIndexAggregationTypeTotal ) // String returns the string representation of the AttributeIndexAggregationType. func (av AttributeIndexAggregationType) String() string { switch av { case AttributeIndexAggregationTypePrimaryShards: return "primary_shards" case AttributeIndexAggregationTypeTotal: return "total" } return "" } // MapAttributeIndexAggregationType is a helper map of string to AttributeIndexAggregationType attribute value. var MapAttributeIndexAggregationType = map[string]AttributeIndexAggregationType{ "primary_shards": AttributeIndexAggregationTypePrimaryShards, "total": AttributeIndexAggregationTypeTotal, } // AttributeIndexingPressureStage specifies the value indexing_pressure_stage attribute. type AttributeIndexingPressureStage int const ( _ AttributeIndexingPressureStage = iota AttributeIndexingPressureStageCoordinating AttributeIndexingPressureStagePrimary AttributeIndexingPressureStageReplica ) // String returns the string representation of the AttributeIndexingPressureStage. func (av AttributeIndexingPressureStage) String() string { switch av { case AttributeIndexingPressureStageCoordinating: return "coordinating" case AttributeIndexingPressureStagePrimary: return "primary" case AttributeIndexingPressureStageReplica: return "replica" } return "" } // MapAttributeIndexingPressureStage is a helper map of string to AttributeIndexingPressureStage attribute value. var MapAttributeIndexingPressureStage = map[string]AttributeIndexingPressureStage{ "coordinating": AttributeIndexingPressureStageCoordinating, "primary": AttributeIndexingPressureStagePrimary, "replica": AttributeIndexingPressureStageReplica, } // AttributeMemoryState specifies the value memory_state attribute. type AttributeMemoryState int const ( _ AttributeMemoryState = iota AttributeMemoryStateFree AttributeMemoryStateUsed ) // String returns the string representation of the AttributeMemoryState. func (av AttributeMemoryState) String() string { switch av { case AttributeMemoryStateFree: return "free" case AttributeMemoryStateUsed: return "used" } return "" } // MapAttributeMemoryState is a helper map of string to AttributeMemoryState attribute value. var MapAttributeMemoryState = map[string]AttributeMemoryState{ "free": AttributeMemoryStateFree, "used": AttributeMemoryStateUsed, } // AttributeOperation specifies the value operation attribute. type AttributeOperation int const ( _ AttributeOperation = iota AttributeOperationIndex AttributeOperationDelete AttributeOperationGet AttributeOperationQuery AttributeOperationFetch AttributeOperationScroll AttributeOperationSuggest AttributeOperationMerge AttributeOperationRefresh AttributeOperationFlush AttributeOperationWarmer ) // String returns the string representation of the AttributeOperation. func (av AttributeOperation) String() string { switch av { case AttributeOperationIndex: return "index" case AttributeOperationDelete: return "delete" case AttributeOperationGet: return "get" case AttributeOperationQuery: return "query" case AttributeOperationFetch: return "fetch" case AttributeOperationScroll: return "scroll" case AttributeOperationSuggest: return "suggest" case AttributeOperationMerge: return "merge" case AttributeOperationRefresh: return "refresh" case AttributeOperationFlush: return "flush" case AttributeOperationWarmer: return "warmer" } return "" } // MapAttributeOperation is a helper map of string to AttributeOperation attribute value. var MapAttributeOperation = map[string]AttributeOperation{ "index": AttributeOperationIndex, "delete": AttributeOperationDelete, "get": AttributeOperationGet, "query": AttributeOperationQuery, "fetch": AttributeOperationFetch, "scroll": AttributeOperationScroll, "suggest": AttributeOperationSuggest, "merge": AttributeOperationMerge, "refresh": AttributeOperationRefresh, "flush": AttributeOperationFlush, "warmer": AttributeOperationWarmer, } // AttributeQueryCacheCountType specifies the value query_cache_count_type attribute. type AttributeQueryCacheCountType int const ( _ AttributeQueryCacheCountType = iota AttributeQueryCacheCountTypeHit AttributeQueryCacheCountTypeMiss ) // String returns the string representation of the AttributeQueryCacheCountType. func (av AttributeQueryCacheCountType) String() string { switch av { case AttributeQueryCacheCountTypeHit: return "hit" case AttributeQueryCacheCountTypeMiss: return "miss" } return "" } // MapAttributeQueryCacheCountType is a helper map of string to AttributeQueryCacheCountType attribute value. var MapAttributeQueryCacheCountType = map[string]AttributeQueryCacheCountType{ "hit": AttributeQueryCacheCountTypeHit, "miss": AttributeQueryCacheCountTypeMiss, } // AttributeSegmentsMemoryObjectType specifies the value segments_memory_object_type attribute. type AttributeSegmentsMemoryObjectType int const ( _ AttributeSegmentsMemoryObjectType = iota AttributeSegmentsMemoryObjectTypeTerm AttributeSegmentsMemoryObjectTypeDocValue AttributeSegmentsMemoryObjectTypeIndexWriter AttributeSegmentsMemoryObjectTypeFixedBitSet ) // String returns the string representation of the AttributeSegmentsMemoryObjectType. func (av AttributeSegmentsMemoryObjectType) String() string { switch av { case AttributeSegmentsMemoryObjectTypeTerm: return "term" case AttributeSegmentsMemoryObjectTypeDocValue: return "doc_value" case AttributeSegmentsMemoryObjectTypeIndexWriter: return "index_writer" case AttributeSegmentsMemoryObjectTypeFixedBitSet: return "fixed_bit_set" } return "" } // MapAttributeSegmentsMemoryObjectType is a helper map of string to AttributeSegmentsMemoryObjectType attribute value. var MapAttributeSegmentsMemoryObjectType = map[string]AttributeSegmentsMemoryObjectType{ "term": AttributeSegmentsMemoryObjectTypeTerm, "doc_value": AttributeSegmentsMemoryObjectTypeDocValue, "index_writer": AttributeSegmentsMemoryObjectTypeIndexWriter, "fixed_bit_set": AttributeSegmentsMemoryObjectTypeFixedBitSet, } // AttributeShardState specifies the value shard_state attribute. type AttributeShardState int const ( _ AttributeShardState = iota AttributeShardStateActive AttributeShardStateActivePrimary AttributeShardStateRelocating AttributeShardStateInitializing AttributeShardStateUnassigned AttributeShardStateUnassignedDelayed ) // String returns the string representation of the AttributeShardState. func (av AttributeShardState) String() string { switch av { case AttributeShardStateActive: return "active" case AttributeShardStateActivePrimary: return "active_primary" case AttributeShardStateRelocating: return "relocating" case AttributeShardStateInitializing: return "initializing" case AttributeShardStateUnassigned: return "unassigned" case AttributeShardStateUnassignedDelayed: return "unassigned_delayed" } return "" } // MapAttributeShardState is a helper map of string to AttributeShardState attribute value. var MapAttributeShardState = map[string]AttributeShardState{ "active": AttributeShardStateActive, "active_primary": AttributeShardStateActivePrimary, "relocating": AttributeShardStateRelocating, "initializing": AttributeShardStateInitializing, "unassigned": AttributeShardStateUnassigned, "unassigned_delayed": AttributeShardStateUnassignedDelayed, } // AttributeTaskState specifies the value task_state attribute. type AttributeTaskState int const ( _ AttributeTaskState = iota AttributeTaskStateRejected AttributeTaskStateCompleted ) // String returns the string representation of the AttributeTaskState. func (av AttributeTaskState) String() string { switch av { case AttributeTaskStateRejected: return "rejected" case AttributeTaskStateCompleted: return "completed" } return "" } // MapAttributeTaskState is a helper map of string to AttributeTaskState attribute value. var MapAttributeTaskState = map[string]AttributeTaskState{ "rejected": AttributeTaskStateRejected, "completed": AttributeTaskStateCompleted, } // AttributeThreadState specifies the value thread_state attribute. type AttributeThreadState int const ( _ AttributeThreadState = iota AttributeThreadStateActive AttributeThreadStateIdle ) // String returns the string representation of the AttributeThreadState. func (av AttributeThreadState) String() string { switch av { case AttributeThreadStateActive: return "active" case AttributeThreadStateIdle: return "idle" } return "" } // MapAttributeThreadState is a helper map of string to AttributeThreadState attribute value. var MapAttributeThreadState = map[string]AttributeThreadState{ "active": AttributeThreadStateActive, "idle": AttributeThreadStateIdle, } var MetricsInfo = metricsInfo{ ElasticsearchBreakerMemoryEstimated: metricInfo{ Name: "elasticsearch.breaker.memory.estimated", }, ElasticsearchBreakerMemoryLimit: metricInfo{ Name: "elasticsearch.breaker.memory.limit", }, ElasticsearchBreakerTripped: metricInfo{ Name: "elasticsearch.breaker.tripped", }, ElasticsearchClusterDataNodes: metricInfo{ Name: "elasticsearch.cluster.data_nodes", }, ElasticsearchClusterHealth: metricInfo{ Name: "elasticsearch.cluster.health", }, ElasticsearchClusterInFlightFetch: metricInfo{ Name: "elasticsearch.cluster.in_flight_fetch", }, ElasticsearchClusterIndicesCacheEvictions: metricInfo{ Name: "elasticsearch.cluster.indices.cache.evictions", }, ElasticsearchClusterNodes: metricInfo{ Name: "elasticsearch.cluster.nodes", }, ElasticsearchClusterPendingTasks: metricInfo{ Name: "elasticsearch.cluster.pending_tasks", }, ElasticsearchClusterPublishedStatesDifferences: metricInfo{ Name: "elasticsearch.cluster.published_states.differences", }, ElasticsearchClusterPublishedStatesFull: metricInfo{ Name: "elasticsearch.cluster.published_states.full", }, ElasticsearchClusterShards: metricInfo{ Name: "elasticsearch.cluster.shards", }, ElasticsearchClusterStateQueue: metricInfo{ Name: "elasticsearch.cluster.state_queue", }, ElasticsearchClusterStateUpdateCount: metricInfo{ Name: "elasticsearch.cluster.state_update.count", }, ElasticsearchClusterStateUpdateTime: metricInfo{ Name: "elasticsearch.cluster.state_update.time", }, ElasticsearchIndexCacheEvictions: metricInfo{ Name: "elasticsearch.index.cache.evictions", }, ElasticsearchIndexCacheMemoryUsage: metricInfo{ Name: "elasticsearch.index.cache.memory.usage", }, ElasticsearchIndexCacheSize: metricInfo{ Name: "elasticsearch.index.cache.size", }, ElasticsearchIndexDocuments: metricInfo{ Name: "elasticsearch.index.documents", }, ElasticsearchIndexOperationsCompleted: metricInfo{ Name: "elasticsearch.index.operations.completed", }, ElasticsearchIndexOperationsMergeCurrent: metricInfo{ Name: "elasticsearch.index.operations.merge.current", }, ElasticsearchIndexOperationsMergeDocsCount: metricInfo{ Name: "elasticsearch.index.operations.merge.docs_count", }, ElasticsearchIndexOperationsMergeSize: metricInfo{ Name: "elasticsearch.index.operations.merge.size", }, ElasticsearchIndexOperationsTime: metricInfo{ Name: "elasticsearch.index.operations.time", }, ElasticsearchIndexSegmentsCount: metricInfo{ Name: "elasticsearch.index.segments.count", }, ElasticsearchIndexSegmentsMemory: metricInfo{ Name: "elasticsearch.index.segments.memory", }, ElasticsearchIndexSegmentsSize: metricInfo{ Name: "elasticsearch.index.segments.size", }, ElasticsearchIndexShardsSize: metricInfo{ Name: "elasticsearch.index.shards.size", }, ElasticsearchIndexTranslogOperations: metricInfo{ Name: "elasticsearch.index.translog.operations", }, ElasticsearchIndexTranslogSize: metricInfo{ Name: "elasticsearch.index.translog.size", }, ElasticsearchIndexingPressureMemoryLimit: metricInfo{ Name: "elasticsearch.indexing_pressure.memory.limit", }, ElasticsearchIndexingPressureMemoryTotalPrimaryRejections: metricInfo{ Name: "elasticsearch.indexing_pressure.memory.total.primary_rejections", }, ElasticsearchIndexingPressureMemoryTotalReplicaRejections: metricInfo{ Name: "elasticsearch.indexing_pressure.memory.total.replica_rejections", }, ElasticsearchMemoryIndexingPressure: metricInfo{ Name: "elasticsearch.memory.indexing_pressure", }, ElasticsearchNodeCacheCount: metricInfo{ Name: "elasticsearch.node.cache.count", }, ElasticsearchNodeCacheEvictions: metricInfo{ Name: "elasticsearch.node.cache.evictions", }, ElasticsearchNodeCacheMemoryUsage: metricInfo{ Name: "elasticsearch.node.cache.memory.usage", }, ElasticsearchNodeCacheSize: metricInfo{ Name: "elasticsearch.node.cache.size", }, ElasticsearchNodeClusterConnections: metricInfo{ Name: "elasticsearch.node.cluster.connections", }, ElasticsearchNodeClusterIo: metricInfo{ Name: "elasticsearch.node.cluster.io", }, ElasticsearchNodeDiskIoRead: metricInfo{ Name: "elasticsearch.node.disk.io.read", }, ElasticsearchNodeDiskIoWrite: metricInfo{ Name: "elasticsearch.node.disk.io.write", }, ElasticsearchNodeDocuments: metricInfo{ Name: "elasticsearch.node.documents", }, ElasticsearchNodeFsDiskAvailable: metricInfo{ Name: "elasticsearch.node.fs.disk.available", }, ElasticsearchNodeFsDiskFree: metricInfo{ Name: "elasticsearch.node.fs.disk.free", }, ElasticsearchNodeFsDiskTotal: metricInfo{ Name: "elasticsearch.node.fs.disk.total", }, ElasticsearchNodeHTTPConnections: metricInfo{ Name: "elasticsearch.node.http.connections", }, ElasticsearchNodeIngestDocuments: metricInfo{ Name: "elasticsearch.node.ingest.documents", }, ElasticsearchNodeIngestDocumentsCurrent: metricInfo{ Name: "elasticsearch.node.ingest.documents.current", }, ElasticsearchNodeIngestOperationsFailed: metricInfo{ Name: "elasticsearch.node.ingest.operations.failed", }, ElasticsearchNodeOpenFiles: metricInfo{ Name: "elasticsearch.node.open_files", }, ElasticsearchNodeOperationsCompleted: metricInfo{ Name: "elasticsearch.node.operations.completed", }, ElasticsearchNodeOperationsCurrent: metricInfo{ Name: "elasticsearch.node.operations.current", }, ElasticsearchNodeOperationsGetCompleted: metricInfo{ Name: "elasticsearch.node.operations.get.completed", }, ElasticsearchNodeOperationsGetTime: metricInfo{ Name: "elasticsearch.node.operations.get.time", }, ElasticsearchNodeOperationsTime: metricInfo{ Name: "elasticsearch.node.operations.time", }, ElasticsearchNodePipelineIngestDocumentsCurrent: metricInfo{ Name: "elasticsearch.node.pipeline.ingest.documents.current", }, ElasticsearchNodePipelineIngestDocumentsPreprocessed: metricInfo{ Name: "elasticsearch.node.pipeline.ingest.documents.preprocessed", }, ElasticsearchNodePipelineIngestOperationsFailed: metricInfo{ Name: "elasticsearch.node.pipeline.ingest.operations.failed", }, ElasticsearchNodeScriptCacheEvictions: metricInfo{ Name: "elasticsearch.node.script.cache_evictions", }, ElasticsearchNodeScriptCompilationLimitTriggered: metricInfo{ Name: "elasticsearch.node.script.compilation_limit_triggered", }, ElasticsearchNodeScriptCompilations: metricInfo{ Name: "elasticsearch.node.script.compilations", }, ElasticsearchNodeSegmentsMemory: metricInfo{ Name: "elasticsearch.node.segments.memory", }, ElasticsearchNodeShardsDataSetSize: metricInfo{ Name: "elasticsearch.node.shards.data_set.size", }, ElasticsearchNodeShardsReservedSize: metricInfo{ Name: "elasticsearch.node.shards.reserved.size", }, ElasticsearchNodeShardsSize: metricInfo{ Name: "elasticsearch.node.shards.size", }, ElasticsearchNodeThreadPoolTasksFinished: metricInfo{ Name: "elasticsearch.node.thread_pool.tasks.finished", }, ElasticsearchNodeThreadPoolTasksQueued: metricInfo{ Name: "elasticsearch.node.thread_pool.tasks.queued", }, ElasticsearchNodeThreadPoolThreads: metricInfo{ Name: "elasticsearch.node.thread_pool.threads", }, ElasticsearchNodeTranslogOperations: metricInfo{ Name: "elasticsearch.node.translog.operations", }, ElasticsearchNodeTranslogSize: metricInfo{ Name: "elasticsearch.node.translog.size", }, ElasticsearchNodeTranslogUncommittedSize: metricInfo{ Name: "elasticsearch.node.translog.uncommitted.size", }, ElasticsearchOsCPULoadAvg15m: metricInfo{ Name: "elasticsearch.os.cpu.load_avg.15m", }, ElasticsearchOsCPULoadAvg1m: metricInfo{ Name: "elasticsearch.os.cpu.load_avg.1m", }, ElasticsearchOsCPULoadAvg5m: metricInfo{ Name: "elasticsearch.os.cpu.load_avg.5m", }, ElasticsearchOsCPUUsage: metricInfo{ Name: "elasticsearch.os.cpu.usage", }, ElasticsearchOsMemory: metricInfo{ Name: "elasticsearch.os.memory", }, ElasticsearchProcessCPUTime: metricInfo{ Name: "elasticsearch.process.cpu.time", }, ElasticsearchProcessCPUUsage: metricInfo{ Name: "elasticsearch.process.cpu.usage", }, ElasticsearchProcessMemoryVirtual: metricInfo{ Name: "elasticsearch.process.memory.virtual", }, JvmClassesLoaded: metricInfo{ Name: "jvm.classes.loaded", }, JvmGcCollectionsCount: metricInfo{ Name: "jvm.gc.collections.count", }, JvmGcCollectionsElapsed: metricInfo{ Name: "jvm.gc.collections.elapsed", }, JvmMemoryHeapCommitted: metricInfo{ Name: "jvm.memory.heap.committed", }, JvmMemoryHeapMax: metricInfo{ Name: "jvm.memory.heap.max", }, JvmMemoryHeapUsed: metricInfo{ Name: "jvm.memory.heap.used", }, JvmMemoryHeapUtilization: metricInfo{ Name: "jvm.memory.heap.utilization", }, JvmMemoryNonheapCommitted: metricInfo{ Name: "jvm.memory.nonheap.committed", }, JvmMemoryNonheapUsed: metricInfo{ Name: "jvm.memory.nonheap.used", }, JvmMemoryPoolMax: metricInfo{ Name: "jvm.memory.pool.max", }, JvmMemoryPoolUsed: metricInfo{ Name: "jvm.memory.pool.used", }, JvmThreadsCount: metricInfo{ Name: "jvm.threads.count", }, } type metricsInfo struct { ElasticsearchBreakerMemoryEstimated metricInfo ElasticsearchBreakerMemoryLimit metricInfo ElasticsearchBreakerTripped metricInfo ElasticsearchClusterDataNodes metricInfo ElasticsearchClusterHealth metricInfo ElasticsearchClusterInFlightFetch metricInfo ElasticsearchClusterIndicesCacheEvictions metricInfo ElasticsearchClusterNodes metricInfo ElasticsearchClusterPendingTasks metricInfo ElasticsearchClusterPublishedStatesDifferences metricInfo ElasticsearchClusterPublishedStatesFull metricInfo ElasticsearchClusterShards metricInfo ElasticsearchClusterStateQueue metricInfo ElasticsearchClusterStateUpdateCount metricInfo ElasticsearchClusterStateUpdateTime metricInfo ElasticsearchIndexCacheEvictions metricInfo ElasticsearchIndexCacheMemoryUsage metricInfo ElasticsearchIndexCacheSize metricInfo ElasticsearchIndexDocuments metricInfo ElasticsearchIndexOperationsCompleted metricInfo ElasticsearchIndexOperationsMergeCurrent metricInfo ElasticsearchIndexOperationsMergeDocsCount metricInfo ElasticsearchIndexOperationsMergeSize metricInfo ElasticsearchIndexOperationsTime metricInfo ElasticsearchIndexSegmentsCount metricInfo ElasticsearchIndexSegmentsMemory metricInfo ElasticsearchIndexSegmentsSize metricInfo ElasticsearchIndexShardsSize metricInfo ElasticsearchIndexTranslogOperations metricInfo ElasticsearchIndexTranslogSize metricInfo ElasticsearchIndexingPressureMemoryLimit metricInfo ElasticsearchIndexingPressureMemoryTotalPrimaryRejections metricInfo ElasticsearchIndexingPressureMemoryTotalReplicaRejections metricInfo ElasticsearchMemoryIndexingPressure metricInfo ElasticsearchNodeCacheCount metricInfo ElasticsearchNodeCacheEvictions metricInfo ElasticsearchNodeCacheMemoryUsage metricInfo ElasticsearchNodeCacheSize metricInfo ElasticsearchNodeClusterConnections metricInfo ElasticsearchNodeClusterIo metricInfo ElasticsearchNodeDiskIoRead metricInfo ElasticsearchNodeDiskIoWrite metricInfo ElasticsearchNodeDocuments metricInfo ElasticsearchNodeFsDiskAvailable metricInfo ElasticsearchNodeFsDiskFree metricInfo ElasticsearchNodeFsDiskTotal metricInfo ElasticsearchNodeHTTPConnections metricInfo ElasticsearchNodeIngestDocuments metricInfo ElasticsearchNodeIngestDocumentsCurrent metricInfo ElasticsearchNodeIngestOperationsFailed metricInfo ElasticsearchNodeOpenFiles metricInfo ElasticsearchNodeOperationsCompleted metricInfo ElasticsearchNodeOperationsCurrent metricInfo ElasticsearchNodeOperationsGetCompleted metricInfo ElasticsearchNodeOperationsGetTime metricInfo ElasticsearchNodeOperationsTime metricInfo ElasticsearchNodePipelineIngestDocumentsCurrent metricInfo ElasticsearchNodePipelineIngestDocumentsPreprocessed metricInfo ElasticsearchNodePipelineIngestOperationsFailed metricInfo ElasticsearchNodeScriptCacheEvictions metricInfo ElasticsearchNodeScriptCompilationLimitTriggered metricInfo ElasticsearchNodeScriptCompilations metricInfo ElasticsearchNodeSegmentsMemory metricInfo ElasticsearchNodeShardsDataSetSize metricInfo ElasticsearchNodeShardsReservedSize metricInfo ElasticsearchNodeShardsSize metricInfo ElasticsearchNodeThreadPoolTasksFinished metricInfo ElasticsearchNodeThreadPoolTasksQueued metricInfo ElasticsearchNodeThreadPoolThreads metricInfo ElasticsearchNodeTranslogOperations metricInfo ElasticsearchNodeTranslogSize metricInfo ElasticsearchNodeTranslogUncommittedSize metricInfo ElasticsearchOsCPULoadAvg15m metricInfo ElasticsearchOsCPULoadAvg1m metricInfo ElasticsearchOsCPULoadAvg5m metricInfo ElasticsearchOsCPUUsage metricInfo ElasticsearchOsMemory metricInfo ElasticsearchProcessCPUTime metricInfo ElasticsearchProcessCPUUsage metricInfo ElasticsearchProcessMemoryVirtual metricInfo JvmClassesLoaded metricInfo JvmGcCollectionsCount metricInfo JvmGcCollectionsElapsed metricInfo JvmMemoryHeapCommitted metricInfo JvmMemoryHeapMax metricInfo JvmMemoryHeapUsed metricInfo JvmMemoryHeapUtilization metricInfo JvmMemoryNonheapCommitted metricInfo JvmMemoryNonheapUsed metricInfo JvmMemoryPoolMax metricInfo JvmMemoryPoolUsed metricInfo JvmThreadsCount metricInfo } type metricInfo struct { Name string } type metricElasticsearchBreakerMemoryEstimated struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.breaker.memory.estimated metric with initial data. func (m *metricElasticsearchBreakerMemoryEstimated) init() { m.data.SetName("elasticsearch.breaker.memory.estimated") m.data.SetDescription("Estimated memory used for the operation.") m.data.SetUnit("By") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchBreakerMemoryEstimated) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, circuitBreakerNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("name", circuitBreakerNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchBreakerMemoryEstimated) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchBreakerMemoryEstimated) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchBreakerMemoryEstimated(cfg MetricConfig) metricElasticsearchBreakerMemoryEstimated { m := metricElasticsearchBreakerMemoryEstimated{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchBreakerMemoryLimit struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.breaker.memory.limit metric with initial data. func (m *metricElasticsearchBreakerMemoryLimit) init() { m.data.SetName("elasticsearch.breaker.memory.limit") m.data.SetDescription("Memory limit for the circuit breaker.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchBreakerMemoryLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, circuitBreakerNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("name", circuitBreakerNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchBreakerMemoryLimit) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchBreakerMemoryLimit) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchBreakerMemoryLimit(cfg MetricConfig) metricElasticsearchBreakerMemoryLimit { m := metricElasticsearchBreakerMemoryLimit{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchBreakerTripped struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.breaker.tripped metric with initial data. func (m *metricElasticsearchBreakerTripped) init() { m.data.SetName("elasticsearch.breaker.tripped") m.data.SetDescription("Total number of times the circuit breaker has been triggered and prevented an out of memory error.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchBreakerTripped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, circuitBreakerNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("name", circuitBreakerNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchBreakerTripped) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchBreakerTripped) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchBreakerTripped(cfg MetricConfig) metricElasticsearchBreakerTripped { m := metricElasticsearchBreakerTripped{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterDataNodes struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.data_nodes metric with initial data. func (m *metricElasticsearchClusterDataNodes) init() { m.data.SetName("elasticsearch.cluster.data_nodes") m.data.SetDescription("The number of data nodes in the cluster.") m.data.SetUnit("{nodes}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchClusterDataNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterDataNodes) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterDataNodes) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterDataNodes(cfg MetricConfig) metricElasticsearchClusterDataNodes { m := metricElasticsearchClusterDataNodes{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterHealth struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.health metric with initial data. func (m *metricElasticsearchClusterHealth) init() { m.data.SetName("elasticsearch.cluster.health") m.data.SetDescription("The health status of the cluster.") m.data.SetUnit("{status}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchClusterHealth) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, healthStatusAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("status", healthStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterHealth) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterHealth) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterHealth(cfg MetricConfig) metricElasticsearchClusterHealth { m := metricElasticsearchClusterHealth{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterInFlightFetch struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.in_flight_fetch metric with initial data. func (m *metricElasticsearchClusterInFlightFetch) init() { m.data.SetName("elasticsearch.cluster.in_flight_fetch") m.data.SetDescription("The number of unfinished fetches.") m.data.SetUnit("{fetches}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchClusterInFlightFetch) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterInFlightFetch) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterInFlightFetch) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterInFlightFetch(cfg MetricConfig) metricElasticsearchClusterInFlightFetch { m := metricElasticsearchClusterInFlightFetch{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterIndicesCacheEvictions struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.indices.cache.evictions metric with initial data. func (m *metricElasticsearchClusterIndicesCacheEvictions) init() { m.data.SetName("elasticsearch.cluster.indices.cache.evictions") m.data.SetDescription("The number of evictions from the cache for indices in cluster.") m.data.SetUnit("{evictions}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchClusterIndicesCacheEvictions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, cacheNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("cache_name", cacheNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterIndicesCacheEvictions) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterIndicesCacheEvictions) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterIndicesCacheEvictions(cfg MetricConfig) metricElasticsearchClusterIndicesCacheEvictions { m := metricElasticsearchClusterIndicesCacheEvictions{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterNodes struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.nodes metric with initial data. func (m *metricElasticsearchClusterNodes) init() { m.data.SetName("elasticsearch.cluster.nodes") m.data.SetDescription("The total number of nodes in the cluster.") m.data.SetUnit("{nodes}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchClusterNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterNodes) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterNodes) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterNodes(cfg MetricConfig) metricElasticsearchClusterNodes { m := metricElasticsearchClusterNodes{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterPendingTasks struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.pending_tasks metric with initial data. func (m *metricElasticsearchClusterPendingTasks) init() { m.data.SetName("elasticsearch.cluster.pending_tasks") m.data.SetDescription("The number of cluster-level changes that have not yet been executed.") m.data.SetUnit("{tasks}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchClusterPendingTasks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterPendingTasks) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterPendingTasks) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterPendingTasks(cfg MetricConfig) metricElasticsearchClusterPendingTasks { m := metricElasticsearchClusterPendingTasks{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterPublishedStatesDifferences struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.published_states.differences metric with initial data. func (m *metricElasticsearchClusterPublishedStatesDifferences) init() { m.data.SetName("elasticsearch.cluster.published_states.differences") m.data.SetDescription("Number of differences between published cluster states.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchClusterPublishedStatesDifferences) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, clusterPublishedDifferenceStateAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("state", clusterPublishedDifferenceStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterPublishedStatesDifferences) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterPublishedStatesDifferences) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterPublishedStatesDifferences(cfg MetricConfig) metricElasticsearchClusterPublishedStatesDifferences { m := metricElasticsearchClusterPublishedStatesDifferences{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterPublishedStatesFull struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.published_states.full metric with initial data. func (m *metricElasticsearchClusterPublishedStatesFull) init() { m.data.SetName("elasticsearch.cluster.published_states.full") m.data.SetDescription("Number of published cluster states.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchClusterPublishedStatesFull) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterPublishedStatesFull) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterPublishedStatesFull) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterPublishedStatesFull(cfg MetricConfig) metricElasticsearchClusterPublishedStatesFull { m := metricElasticsearchClusterPublishedStatesFull{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterShards struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.shards metric with initial data. func (m *metricElasticsearchClusterShards) init() { m.data.SetName("elasticsearch.cluster.shards") m.data.SetDescription("The number of shards in the cluster.") m.data.SetUnit("{shards}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchClusterShards) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, shardStateAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("state", shardStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterShards) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterShards) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterShards(cfg MetricConfig) metricElasticsearchClusterShards { m := metricElasticsearchClusterShards{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterStateQueue struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.state_queue metric with initial data. func (m *metricElasticsearchClusterStateQueue) init() { m.data.SetName("elasticsearch.cluster.state_queue") m.data.SetDescription("Number of cluster states in queue.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchClusterStateQueue) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, clusterStateQueueStateAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("state", clusterStateQueueStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterStateQueue) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterStateQueue) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterStateQueue(cfg MetricConfig) metricElasticsearchClusterStateQueue { m := metricElasticsearchClusterStateQueue{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterStateUpdateCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.state_update.count metric with initial data. func (m *metricElasticsearchClusterStateUpdateCount) init() { m.data.SetName("elasticsearch.cluster.state_update.count") m.data.SetDescription("The number of cluster state update attempts that changed the cluster state since the node started.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchClusterStateUpdateCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, clusterStateUpdateStateAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("state", clusterStateUpdateStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterStateUpdateCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterStateUpdateCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterStateUpdateCount(cfg MetricConfig) metricElasticsearchClusterStateUpdateCount { m := metricElasticsearchClusterStateUpdateCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterStateUpdateTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.cluster.state_update.time metric with initial data. func (m *metricElasticsearchClusterStateUpdateTime) init() { m.data.SetName("elasticsearch.cluster.state_update.time") m.data.SetDescription("The cumulative amount of time updating the cluster state since the node started.") m.data.SetUnit("ms") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchClusterStateUpdateTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, clusterStateUpdateStateAttributeValue string, clusterStateUpdateTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("state", clusterStateUpdateStateAttributeValue) dp.Attributes().PutStr("type", clusterStateUpdateTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchClusterStateUpdateTime) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchClusterStateUpdateTime) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchClusterStateUpdateTime(cfg MetricConfig) metricElasticsearchClusterStateUpdateTime { m := metricElasticsearchClusterStateUpdateTime{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexCacheEvictions struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.cache.evictions metric with initial data. func (m *metricElasticsearchIndexCacheEvictions) init() { m.data.SetName("elasticsearch.index.cache.evictions") m.data.SetDescription("The number of evictions from the cache for an index.") m.data.SetUnit("{evictions}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexCacheEvictions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, cacheNameAttributeValue string, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("cache_name", cacheNameAttributeValue) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexCacheEvictions) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexCacheEvictions) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexCacheEvictions(cfg MetricConfig) metricElasticsearchIndexCacheEvictions { m := metricElasticsearchIndexCacheEvictions{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexCacheMemoryUsage struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.cache.memory.usage metric with initial data. func (m *metricElasticsearchIndexCacheMemoryUsage) init() { m.data.SetName("elasticsearch.index.cache.memory.usage") m.data.SetDescription("The size in bytes of the cache for an index.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexCacheMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, cacheNameAttributeValue string, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("cache_name", cacheNameAttributeValue) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexCacheMemoryUsage) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexCacheMemoryUsage) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexCacheMemoryUsage(cfg MetricConfig) metricElasticsearchIndexCacheMemoryUsage { m := metricElasticsearchIndexCacheMemoryUsage{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexCacheSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.cache.size metric with initial data. func (m *metricElasticsearchIndexCacheSize) init() { m.data.SetName("elasticsearch.index.cache.size") m.data.SetDescription("The number of elements of the query cache for an index.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexCacheSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexCacheSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexCacheSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexCacheSize(cfg MetricConfig) metricElasticsearchIndexCacheSize { m := metricElasticsearchIndexCacheSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexDocuments struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.documents metric with initial data. func (m *metricElasticsearchIndexDocuments) init() { m.data.SetName("elasticsearch.index.documents") m.data.SetDescription("The number of documents for an index.") m.data.SetUnit("{documents}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexDocuments) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, documentStateAttributeValue string, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("state", documentStateAttributeValue) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexDocuments) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexDocuments) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexDocuments(cfg MetricConfig) metricElasticsearchIndexDocuments { m := metricElasticsearchIndexDocuments{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexOperationsCompleted struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.operations.completed metric with initial data. func (m *metricElasticsearchIndexOperationsCompleted) init() { m.data.SetName("elasticsearch.index.operations.completed") m.data.SetDescription("The number of operations completed for an index.") m.data.SetUnit("{operations}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexOperationsCompleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("operation", operationAttributeValue) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexOperationsCompleted) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexOperationsCompleted) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexOperationsCompleted(cfg MetricConfig) metricElasticsearchIndexOperationsCompleted { m := metricElasticsearchIndexOperationsCompleted{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexOperationsMergeCurrent struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.operations.merge.current metric with initial data. func (m *metricElasticsearchIndexOperationsMergeCurrent) init() { m.data.SetName("elasticsearch.index.operations.merge.current") m.data.SetDescription("The number of currently active segment merges") m.data.SetUnit("{merges}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexOperationsMergeCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexOperationsMergeCurrent) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexOperationsMergeCurrent) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexOperationsMergeCurrent(cfg MetricConfig) metricElasticsearchIndexOperationsMergeCurrent { m := metricElasticsearchIndexOperationsMergeCurrent{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexOperationsMergeDocsCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.operations.merge.docs_count metric with initial data. func (m *metricElasticsearchIndexOperationsMergeDocsCount) init() { m.data.SetName("elasticsearch.index.operations.merge.docs_count") m.data.SetDescription("The total number of documents in merge operations for an index.") m.data.SetUnit("{documents}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexOperationsMergeDocsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexOperationsMergeDocsCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexOperationsMergeDocsCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexOperationsMergeDocsCount(cfg MetricConfig) metricElasticsearchIndexOperationsMergeDocsCount { m := metricElasticsearchIndexOperationsMergeDocsCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexOperationsMergeSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.operations.merge.size metric with initial data. func (m *metricElasticsearchIndexOperationsMergeSize) init() { m.data.SetName("elasticsearch.index.operations.merge.size") m.data.SetDescription("The total size of merged segments for an index.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexOperationsMergeSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexOperationsMergeSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexOperationsMergeSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexOperationsMergeSize(cfg MetricConfig) metricElasticsearchIndexOperationsMergeSize { m := metricElasticsearchIndexOperationsMergeSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexOperationsTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.operations.time metric with initial data. func (m *metricElasticsearchIndexOperationsTime) init() { m.data.SetName("elasticsearch.index.operations.time") m.data.SetDescription("Time spent on operations for an index.") m.data.SetUnit("ms") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexOperationsTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("operation", operationAttributeValue) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexOperationsTime) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexOperationsTime) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexOperationsTime(cfg MetricConfig) metricElasticsearchIndexOperationsTime { m := metricElasticsearchIndexOperationsTime{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexSegmentsCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.segments.count metric with initial data. func (m *metricElasticsearchIndexSegmentsCount) init() { m.data.SetName("elasticsearch.index.segments.count") m.data.SetDescription("Number of segments of an index.") m.data.SetUnit("{segments}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexSegmentsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexSegmentsCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexSegmentsCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexSegmentsCount(cfg MetricConfig) metricElasticsearchIndexSegmentsCount { m := metricElasticsearchIndexSegmentsCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexSegmentsMemory struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.segments.memory metric with initial data. func (m *metricElasticsearchIndexSegmentsMemory) init() { m.data.SetName("elasticsearch.index.segments.memory") m.data.SetDescription("Size of memory for segment object of an index.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexSegmentsMemory) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string, segmentsMemoryObjectTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) dp.Attributes().PutStr("object", segmentsMemoryObjectTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexSegmentsMemory) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexSegmentsMemory) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexSegmentsMemory(cfg MetricConfig) metricElasticsearchIndexSegmentsMemory { m := metricElasticsearchIndexSegmentsMemory{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexSegmentsSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.segments.size metric with initial data. func (m *metricElasticsearchIndexSegmentsSize) init() { m.data.SetName("elasticsearch.index.segments.size") m.data.SetDescription("Size of segments of an index.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexSegmentsSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexSegmentsSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexSegmentsSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexSegmentsSize(cfg MetricConfig) metricElasticsearchIndexSegmentsSize { m := metricElasticsearchIndexSegmentsSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexShardsSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.shards.size metric with initial data. func (m *metricElasticsearchIndexShardsSize) init() { m.data.SetName("elasticsearch.index.shards.size") m.data.SetDescription("The size of the shards assigned to this index.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexShardsSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexShardsSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexShardsSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexShardsSize(cfg MetricConfig) metricElasticsearchIndexShardsSize { m := metricElasticsearchIndexShardsSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexTranslogOperations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.translog.operations metric with initial data. func (m *metricElasticsearchIndexTranslogOperations) init() { m.data.SetName("elasticsearch.index.translog.operations") m.data.SetDescription("Number of transaction log operations for an index.") m.data.SetUnit("{operations}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexTranslogOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexTranslogOperations) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexTranslogOperations) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexTranslogOperations(cfg MetricConfig) metricElasticsearchIndexTranslogOperations { m := metricElasticsearchIndexTranslogOperations{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexTranslogSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.index.translog.size metric with initial data. func (m *metricElasticsearchIndexTranslogSize) init() { m.data.SetName("elasticsearch.index.translog.size") m.data.SetDescription("Size of the transaction log for an index.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchIndexTranslogSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexTranslogSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexTranslogSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexTranslogSize(cfg MetricConfig) metricElasticsearchIndexTranslogSize { m := metricElasticsearchIndexTranslogSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexingPressureMemoryLimit struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.indexing_pressure.memory.limit metric with initial data. func (m *metricElasticsearchIndexingPressureMemoryLimit) init() { m.data.SetName("elasticsearch.indexing_pressure.memory.limit") m.data.SetDescription("Configured memory limit, in bytes, for the indexing requests.") m.data.SetUnit("By") m.data.SetEmptyGauge() } func (m *metricElasticsearchIndexingPressureMemoryLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexingPressureMemoryLimit) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexingPressureMemoryLimit) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexingPressureMemoryLimit(cfg MetricConfig) metricElasticsearchIndexingPressureMemoryLimit { m := metricElasticsearchIndexingPressureMemoryLimit{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.indexing_pressure.memory.total.primary_rejections metric with initial data. func (m *metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections) init() { m.data.SetName("elasticsearch.indexing_pressure.memory.total.primary_rejections") m.data.SetDescription("Cumulative number of indexing requests rejected in the primary stage.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexingPressureMemoryTotalPrimaryRejections(cfg MetricConfig) metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections { m := metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchIndexingPressureMemoryTotalReplicaRejections struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.indexing_pressure.memory.total.replica_rejections metric with initial data. func (m *metricElasticsearchIndexingPressureMemoryTotalReplicaRejections) init() { m.data.SetName("elasticsearch.indexing_pressure.memory.total.replica_rejections") m.data.SetDescription("Number of indexing requests rejected in the replica stage.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchIndexingPressureMemoryTotalReplicaRejections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchIndexingPressureMemoryTotalReplicaRejections) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchIndexingPressureMemoryTotalReplicaRejections) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchIndexingPressureMemoryTotalReplicaRejections(cfg MetricConfig) metricElasticsearchIndexingPressureMemoryTotalReplicaRejections { m := metricElasticsearchIndexingPressureMemoryTotalReplicaRejections{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchMemoryIndexingPressure struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.memory.indexing_pressure metric with initial data. func (m *metricElasticsearchMemoryIndexingPressure) init() { m.data.SetName("elasticsearch.memory.indexing_pressure") m.data.SetDescription("Memory consumed, in bytes, by indexing requests in the specified stage.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchMemoryIndexingPressure) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexingPressureStageAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("stage", indexingPressureStageAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchMemoryIndexingPressure) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchMemoryIndexingPressure) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchMemoryIndexingPressure(cfg MetricConfig) metricElasticsearchMemoryIndexingPressure { m := metricElasticsearchMemoryIndexingPressure{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeCacheCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.cache.count metric with initial data. func (m *metricElasticsearchNodeCacheCount) init() { m.data.SetName("elasticsearch.node.cache.count") m.data.SetDescription("Total count of query cache misses across all shards assigned to selected nodes.") m.data.SetUnit("{count}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeCacheCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryCacheCountTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("type", queryCacheCountTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeCacheCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeCacheCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeCacheCount(cfg MetricConfig) metricElasticsearchNodeCacheCount { m := metricElasticsearchNodeCacheCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeCacheEvictions struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.cache.evictions metric with initial data. func (m *metricElasticsearchNodeCacheEvictions) init() { m.data.SetName("elasticsearch.node.cache.evictions") m.data.SetDescription("The number of evictions from the cache on a node.") m.data.SetUnit("{evictions}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeCacheEvictions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, cacheNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("cache_name", cacheNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeCacheEvictions) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeCacheEvictions) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeCacheEvictions(cfg MetricConfig) metricElasticsearchNodeCacheEvictions { m := metricElasticsearchNodeCacheEvictions{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeCacheMemoryUsage struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.cache.memory.usage metric with initial data. func (m *metricElasticsearchNodeCacheMemoryUsage) init() { m.data.SetName("elasticsearch.node.cache.memory.usage") m.data.SetDescription("The size in bytes of the cache on a node.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeCacheMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, cacheNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("cache_name", cacheNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeCacheMemoryUsage) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeCacheMemoryUsage) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeCacheMemoryUsage(cfg MetricConfig) metricElasticsearchNodeCacheMemoryUsage { m := metricElasticsearchNodeCacheMemoryUsage{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeCacheSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.cache.size metric with initial data. func (m *metricElasticsearchNodeCacheSize) init() { m.data.SetName("elasticsearch.node.cache.size") m.data.SetDescription("Total amount of memory used for the query cache across all shards assigned to the node.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeCacheSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeCacheSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeCacheSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeCacheSize(cfg MetricConfig) metricElasticsearchNodeCacheSize { m := metricElasticsearchNodeCacheSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeClusterConnections struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.cluster.connections metric with initial data. func (m *metricElasticsearchNodeClusterConnections) init() { m.data.SetName("elasticsearch.node.cluster.connections") m.data.SetDescription("The number of open tcp connections for internal cluster communication.") m.data.SetUnit("{connections}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeClusterConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeClusterConnections) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeClusterConnections) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeClusterConnections(cfg MetricConfig) metricElasticsearchNodeClusterConnections { m := metricElasticsearchNodeClusterConnections{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeClusterIo struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.cluster.io metric with initial data. func (m *metricElasticsearchNodeClusterIo) init() { m.data.SetName("elasticsearch.node.cluster.io") m.data.SetDescription("The number of bytes sent and received on the network for internal cluster communication.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeClusterIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeClusterIo) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeClusterIo) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeClusterIo(cfg MetricConfig) metricElasticsearchNodeClusterIo { m := metricElasticsearchNodeClusterIo{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeDiskIoRead struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.disk.io.read metric with initial data. func (m *metricElasticsearchNodeDiskIoRead) init() { m.data.SetName("elasticsearch.node.disk.io.read") m.data.SetDescription("The total number of kilobytes read across all file stores for this node.") m.data.SetUnit("KiBy") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeDiskIoRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeDiskIoRead) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeDiskIoRead) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeDiskIoRead(cfg MetricConfig) metricElasticsearchNodeDiskIoRead { m := metricElasticsearchNodeDiskIoRead{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeDiskIoWrite struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.disk.io.write metric with initial data. func (m *metricElasticsearchNodeDiskIoWrite) init() { m.data.SetName("elasticsearch.node.disk.io.write") m.data.SetDescription("The total number of kilobytes written across all file stores for this node.") m.data.SetUnit("KiBy") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeDiskIoWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeDiskIoWrite) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeDiskIoWrite) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeDiskIoWrite(cfg MetricConfig) metricElasticsearchNodeDiskIoWrite { m := metricElasticsearchNodeDiskIoWrite{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeDocuments struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.documents metric with initial data. func (m *metricElasticsearchNodeDocuments) init() { m.data.SetName("elasticsearch.node.documents") m.data.SetDescription("The number of documents on the node.") m.data.SetUnit("{documents}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeDocuments) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, documentStateAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("state", documentStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeDocuments) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeDocuments) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeDocuments(cfg MetricConfig) metricElasticsearchNodeDocuments { m := metricElasticsearchNodeDocuments{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeFsDiskAvailable struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.fs.disk.available metric with initial data. func (m *metricElasticsearchNodeFsDiskAvailable) init() { m.data.SetName("elasticsearch.node.fs.disk.available") m.data.SetDescription("The amount of disk space available to the JVM across all file stores for this node. Depending on OS or process level restrictions, this might appear less than free. This is the actual amount of free disk space the Elasticsearch node can utilise.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeFsDiskAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeFsDiskAvailable) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeFsDiskAvailable) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeFsDiskAvailable(cfg MetricConfig) metricElasticsearchNodeFsDiskAvailable { m := metricElasticsearchNodeFsDiskAvailable{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeFsDiskFree struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.fs.disk.free metric with initial data. func (m *metricElasticsearchNodeFsDiskFree) init() { m.data.SetName("elasticsearch.node.fs.disk.free") m.data.SetDescription("The amount of unallocated disk space across all file stores for this node.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeFsDiskFree) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeFsDiskFree) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeFsDiskFree) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeFsDiskFree(cfg MetricConfig) metricElasticsearchNodeFsDiskFree { m := metricElasticsearchNodeFsDiskFree{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeFsDiskTotal struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.fs.disk.total metric with initial data. func (m *metricElasticsearchNodeFsDiskTotal) init() { m.data.SetName("elasticsearch.node.fs.disk.total") m.data.SetDescription("The amount of disk space across all file stores for this node.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeFsDiskTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeFsDiskTotal) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeFsDiskTotal) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeFsDiskTotal(cfg MetricConfig) metricElasticsearchNodeFsDiskTotal { m := metricElasticsearchNodeFsDiskTotal{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeHTTPConnections struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.http.connections metric with initial data. func (m *metricElasticsearchNodeHTTPConnections) init() { m.data.SetName("elasticsearch.node.http.connections") m.data.SetDescription("The number of HTTP connections to the node.") m.data.SetUnit("{connections}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeHTTPConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeHTTPConnections) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeHTTPConnections) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeHTTPConnections(cfg MetricConfig) metricElasticsearchNodeHTTPConnections { m := metricElasticsearchNodeHTTPConnections{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeIngestDocuments struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.ingest.documents metric with initial data. func (m *metricElasticsearchNodeIngestDocuments) init() { m.data.SetName("elasticsearch.node.ingest.documents") m.data.SetDescription("Total number of documents ingested during the lifetime of this node.") m.data.SetUnit("{documents}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeIngestDocuments) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeIngestDocuments) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeIngestDocuments) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeIngestDocuments(cfg MetricConfig) metricElasticsearchNodeIngestDocuments { m := metricElasticsearchNodeIngestDocuments{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeIngestDocumentsCurrent struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.ingest.documents.current metric with initial data. func (m *metricElasticsearchNodeIngestDocumentsCurrent) init() { m.data.SetName("elasticsearch.node.ingest.documents.current") m.data.SetDescription("Total number of documents currently being ingested.") m.data.SetUnit("{documents}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeIngestDocumentsCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeIngestDocumentsCurrent) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeIngestDocumentsCurrent) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeIngestDocumentsCurrent(cfg MetricConfig) metricElasticsearchNodeIngestDocumentsCurrent { m := metricElasticsearchNodeIngestDocumentsCurrent{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeIngestOperationsFailed struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.ingest.operations.failed metric with initial data. func (m *metricElasticsearchNodeIngestOperationsFailed) init() { m.data.SetName("elasticsearch.node.ingest.operations.failed") m.data.SetDescription("Total number of failed ingest operations during the lifetime of this node.") m.data.SetUnit("{operation}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeIngestOperationsFailed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeIngestOperationsFailed) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeIngestOperationsFailed) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeIngestOperationsFailed(cfg MetricConfig) metricElasticsearchNodeIngestOperationsFailed { m := metricElasticsearchNodeIngestOperationsFailed{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeOpenFiles struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.open_files metric with initial data. func (m *metricElasticsearchNodeOpenFiles) init() { m.data.SetName("elasticsearch.node.open_files") m.data.SetDescription("The number of open file descriptors held by the node.") m.data.SetUnit("{files}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeOpenFiles) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeOpenFiles) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeOpenFiles) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeOpenFiles(cfg MetricConfig) metricElasticsearchNodeOpenFiles { m := metricElasticsearchNodeOpenFiles{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeOperationsCompleted struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.operations.completed metric with initial data. func (m *metricElasticsearchNodeOperationsCompleted) init() { m.data.SetName("elasticsearch.node.operations.completed") m.data.SetDescription("The number of operations completed by a node.") m.data.SetUnit("{operations}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeOperationsCompleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeOperationsCompleted) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeOperationsCompleted) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeOperationsCompleted(cfg MetricConfig) metricElasticsearchNodeOperationsCompleted { m := metricElasticsearchNodeOperationsCompleted{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeOperationsCurrent struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.operations.current metric with initial data. func (m *metricElasticsearchNodeOperationsCurrent) init() { m.data.SetName("elasticsearch.node.operations.current") m.data.SetDescription("Number of query operations currently running.") m.data.SetUnit("{operations}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeOperationsCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeOperationsCurrent) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeOperationsCurrent) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeOperationsCurrent(cfg MetricConfig) metricElasticsearchNodeOperationsCurrent { m := metricElasticsearchNodeOperationsCurrent{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeOperationsGetCompleted struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.operations.get.completed metric with initial data. func (m *metricElasticsearchNodeOperationsGetCompleted) init() { m.data.SetName("elasticsearch.node.operations.get.completed") m.data.SetDescription("The number of hits and misses resulting from GET operations.") m.data.SetUnit("{operations}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeOperationsGetCompleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, getResultAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("result", getResultAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeOperationsGetCompleted) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeOperationsGetCompleted) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeOperationsGetCompleted(cfg MetricConfig) metricElasticsearchNodeOperationsGetCompleted { m := metricElasticsearchNodeOperationsGetCompleted{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeOperationsGetTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.operations.get.time metric with initial data. func (m *metricElasticsearchNodeOperationsGetTime) init() { m.data.SetName("elasticsearch.node.operations.get.time") m.data.SetDescription("The time spent on hits and misses resulting from GET operations.") m.data.SetUnit("ms") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeOperationsGetTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, getResultAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("result", getResultAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeOperationsGetTime) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeOperationsGetTime) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeOperationsGetTime(cfg MetricConfig) metricElasticsearchNodeOperationsGetTime { m := metricElasticsearchNodeOperationsGetTime{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeOperationsTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.operations.time metric with initial data. func (m *metricElasticsearchNodeOperationsTime) init() { m.data.SetName("elasticsearch.node.operations.time") m.data.SetDescription("Time spent on operations by a node.") m.data.SetUnit("ms") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeOperationsTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeOperationsTime) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeOperationsTime) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeOperationsTime(cfg MetricConfig) metricElasticsearchNodeOperationsTime { m := metricElasticsearchNodeOperationsTime{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodePipelineIngestDocumentsCurrent struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.pipeline.ingest.documents.current metric with initial data. func (m *metricElasticsearchNodePipelineIngestDocumentsCurrent) init() { m.data.SetName("elasticsearch.node.pipeline.ingest.documents.current") m.data.SetDescription("Total number of documents currently being ingested by a pipeline.") m.data.SetUnit("{documents}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodePipelineIngestDocumentsCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestPipelineNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("name", ingestPipelineNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodePipelineIngestDocumentsCurrent) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodePipelineIngestDocumentsCurrent) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodePipelineIngestDocumentsCurrent(cfg MetricConfig) metricElasticsearchNodePipelineIngestDocumentsCurrent { m := metricElasticsearchNodePipelineIngestDocumentsCurrent{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodePipelineIngestDocumentsPreprocessed struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.pipeline.ingest.documents.preprocessed metric with initial data. func (m *metricElasticsearchNodePipelineIngestDocumentsPreprocessed) init() { m.data.SetName("elasticsearch.node.pipeline.ingest.documents.preprocessed") m.data.SetDescription("Number of documents preprocessed by the ingest pipeline.") m.data.SetUnit("{documents}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodePipelineIngestDocumentsPreprocessed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestPipelineNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("name", ingestPipelineNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodePipelineIngestDocumentsPreprocessed) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodePipelineIngestDocumentsPreprocessed) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodePipelineIngestDocumentsPreprocessed(cfg MetricConfig) metricElasticsearchNodePipelineIngestDocumentsPreprocessed { m := metricElasticsearchNodePipelineIngestDocumentsPreprocessed{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodePipelineIngestOperationsFailed struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.pipeline.ingest.operations.failed metric with initial data. func (m *metricElasticsearchNodePipelineIngestOperationsFailed) init() { m.data.SetName("elasticsearch.node.pipeline.ingest.operations.failed") m.data.SetDescription("Total number of failed operations for the ingest pipeline.") m.data.SetUnit("{operation}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodePipelineIngestOperationsFailed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, ingestPipelineNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("name", ingestPipelineNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodePipelineIngestOperationsFailed) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodePipelineIngestOperationsFailed) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodePipelineIngestOperationsFailed(cfg MetricConfig) metricElasticsearchNodePipelineIngestOperationsFailed { m := metricElasticsearchNodePipelineIngestOperationsFailed{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeScriptCacheEvictions struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.script.cache_evictions metric with initial data. func (m *metricElasticsearchNodeScriptCacheEvictions) init() { m.data.SetName("elasticsearch.node.script.cache_evictions") m.data.SetDescription("Total number of times the script cache has evicted old data.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeScriptCacheEvictions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeScriptCacheEvictions) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeScriptCacheEvictions) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeScriptCacheEvictions(cfg MetricConfig) metricElasticsearchNodeScriptCacheEvictions { m := metricElasticsearchNodeScriptCacheEvictions{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeScriptCompilationLimitTriggered struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.script.compilation_limit_triggered metric with initial data. func (m *metricElasticsearchNodeScriptCompilationLimitTriggered) init() { m.data.SetName("elasticsearch.node.script.compilation_limit_triggered") m.data.SetDescription("Total number of times the script compilation circuit breaker has limited inline script compilations.") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeScriptCompilationLimitTriggered) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeScriptCompilationLimitTriggered) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeScriptCompilationLimitTriggered) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeScriptCompilationLimitTriggered(cfg MetricConfig) metricElasticsearchNodeScriptCompilationLimitTriggered { m := metricElasticsearchNodeScriptCompilationLimitTriggered{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeScriptCompilations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.script.compilations metric with initial data. func (m *metricElasticsearchNodeScriptCompilations) init() { m.data.SetName("elasticsearch.node.script.compilations") m.data.SetDescription("Total number of inline script compilations performed by the node.") m.data.SetUnit("{compilations}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeScriptCompilations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeScriptCompilations) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeScriptCompilations) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeScriptCompilations(cfg MetricConfig) metricElasticsearchNodeScriptCompilations { m := metricElasticsearchNodeScriptCompilations{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeSegmentsMemory struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.segments.memory metric with initial data. func (m *metricElasticsearchNodeSegmentsMemory) init() { m.data.SetName("elasticsearch.node.segments.memory") m.data.SetDescription("Size of memory for segment object of a node.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeSegmentsMemory) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, segmentsMemoryObjectTypeAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("object", segmentsMemoryObjectTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeSegmentsMemory) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeSegmentsMemory) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeSegmentsMemory(cfg MetricConfig) metricElasticsearchNodeSegmentsMemory { m := metricElasticsearchNodeSegmentsMemory{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeShardsDataSetSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.shards.data_set.size metric with initial data. func (m *metricElasticsearchNodeShardsDataSetSize) init() { m.data.SetName("elasticsearch.node.shards.data_set.size") m.data.SetDescription("Total data set size of all shards assigned to the node. This includes the size of shards not stored fully on the node, such as the cache for partially mounted indices.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeShardsDataSetSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeShardsDataSetSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeShardsDataSetSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeShardsDataSetSize(cfg MetricConfig) metricElasticsearchNodeShardsDataSetSize { m := metricElasticsearchNodeShardsDataSetSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeShardsReservedSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.shards.reserved.size metric with initial data. func (m *metricElasticsearchNodeShardsReservedSize) init() { m.data.SetName("elasticsearch.node.shards.reserved.size") m.data.SetDescription("A prediction of how much larger the shard stores on this node will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. A value of -1 indicates that this is not available.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeShardsReservedSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeShardsReservedSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeShardsReservedSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeShardsReservedSize(cfg MetricConfig) metricElasticsearchNodeShardsReservedSize { m := metricElasticsearchNodeShardsReservedSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeShardsSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.shards.size metric with initial data. func (m *metricElasticsearchNodeShardsSize) init() { m.data.SetName("elasticsearch.node.shards.size") m.data.SetDescription("The size of the shards assigned to this node.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeShardsSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeShardsSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeShardsSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeShardsSize(cfg MetricConfig) metricElasticsearchNodeShardsSize { m := metricElasticsearchNodeShardsSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeThreadPoolTasksFinished struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.thread_pool.tasks.finished metric with initial data. func (m *metricElasticsearchNodeThreadPoolTasksFinished) init() { m.data.SetName("elasticsearch.node.thread_pool.tasks.finished") m.data.SetDescription("The number of tasks finished by the thread pool.") m.data.SetUnit("{tasks}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeThreadPoolTasksFinished) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, taskStateAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("thread_pool_name", threadPoolNameAttributeValue) dp.Attributes().PutStr("state", taskStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeThreadPoolTasksFinished) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeThreadPoolTasksFinished) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeThreadPoolTasksFinished(cfg MetricConfig) metricElasticsearchNodeThreadPoolTasksFinished { m := metricElasticsearchNodeThreadPoolTasksFinished{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeThreadPoolTasksQueued struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.thread_pool.tasks.queued metric with initial data. func (m *metricElasticsearchNodeThreadPoolTasksQueued) init() { m.data.SetName("elasticsearch.node.thread_pool.tasks.queued") m.data.SetDescription("The number of queued tasks in the thread pool.") m.data.SetUnit("{tasks}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeThreadPoolTasksQueued) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("thread_pool_name", threadPoolNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeThreadPoolTasksQueued) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeThreadPoolTasksQueued) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeThreadPoolTasksQueued(cfg MetricConfig) metricElasticsearchNodeThreadPoolTasksQueued { m := metricElasticsearchNodeThreadPoolTasksQueued{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeThreadPoolThreads struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.thread_pool.threads metric with initial data. func (m *metricElasticsearchNodeThreadPoolThreads) init() { m.data.SetName("elasticsearch.node.thread_pool.threads") m.data.SetDescription("The number of threads in the thread pool.") m.data.SetUnit("{threads}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchNodeThreadPoolThreads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, threadStateAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("thread_pool_name", threadPoolNameAttributeValue) dp.Attributes().PutStr("state", threadStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeThreadPoolThreads) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeThreadPoolThreads) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeThreadPoolThreads(cfg MetricConfig) metricElasticsearchNodeThreadPoolThreads { m := metricElasticsearchNodeThreadPoolThreads{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeTranslogOperations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.translog.operations metric with initial data. func (m *metricElasticsearchNodeTranslogOperations) init() { m.data.SetName("elasticsearch.node.translog.operations") m.data.SetDescription("Number of transaction log operations.") m.data.SetUnit("{operations}") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeTranslogOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeTranslogOperations) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeTranslogOperations) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeTranslogOperations(cfg MetricConfig) metricElasticsearchNodeTranslogOperations { m := metricElasticsearchNodeTranslogOperations{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeTranslogSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.translog.size metric with initial data. func (m *metricElasticsearchNodeTranslogSize) init() { m.data.SetName("elasticsearch.node.translog.size") m.data.SetDescription("Size of the transaction log.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeTranslogSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeTranslogSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeTranslogSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeTranslogSize(cfg MetricConfig) metricElasticsearchNodeTranslogSize { m := metricElasticsearchNodeTranslogSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeTranslogUncommittedSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.node.translog.uncommitted.size metric with initial data. func (m *metricElasticsearchNodeTranslogUncommittedSize) init() { m.data.SetName("elasticsearch.node.translog.uncommitted.size") m.data.SetDescription("Size of uncommitted transaction log operations.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchNodeTranslogUncommittedSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchNodeTranslogUncommittedSize) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchNodeTranslogUncommittedSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchNodeTranslogUncommittedSize(cfg MetricConfig) metricElasticsearchNodeTranslogUncommittedSize { m := metricElasticsearchNodeTranslogUncommittedSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchOsCPULoadAvg15m struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.os.cpu.load_avg.15m metric with initial data. func (m *metricElasticsearchOsCPULoadAvg15m) init() { m.data.SetName("elasticsearch.os.cpu.load_avg.15m") m.data.SetDescription("Fifteen-minute load average on the system (field is not present if fifteen-minute load average is not available).") m.data.SetUnit("1") m.data.SetEmptyGauge() } func (m *metricElasticsearchOsCPULoadAvg15m) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchOsCPULoadAvg15m) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchOsCPULoadAvg15m) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchOsCPULoadAvg15m(cfg MetricConfig) metricElasticsearchOsCPULoadAvg15m { m := metricElasticsearchOsCPULoadAvg15m{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchOsCPULoadAvg1m struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.os.cpu.load_avg.1m metric with initial data. func (m *metricElasticsearchOsCPULoadAvg1m) init() { m.data.SetName("elasticsearch.os.cpu.load_avg.1m") m.data.SetDescription("One-minute load average on the system (field is not present if one-minute load average is not available).") m.data.SetUnit("1") m.data.SetEmptyGauge() } func (m *metricElasticsearchOsCPULoadAvg1m) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchOsCPULoadAvg1m) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchOsCPULoadAvg1m) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchOsCPULoadAvg1m(cfg MetricConfig) metricElasticsearchOsCPULoadAvg1m { m := metricElasticsearchOsCPULoadAvg1m{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchOsCPULoadAvg5m struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.os.cpu.load_avg.5m metric with initial data. func (m *metricElasticsearchOsCPULoadAvg5m) init() { m.data.SetName("elasticsearch.os.cpu.load_avg.5m") m.data.SetDescription("Five-minute load average on the system (field is not present if five-minute load average is not available).") m.data.SetUnit("1") m.data.SetEmptyGauge() } func (m *metricElasticsearchOsCPULoadAvg5m) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchOsCPULoadAvg5m) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchOsCPULoadAvg5m) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchOsCPULoadAvg5m(cfg MetricConfig) metricElasticsearchOsCPULoadAvg5m { m := metricElasticsearchOsCPULoadAvg5m{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchOsCPUUsage struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.os.cpu.usage metric with initial data. func (m *metricElasticsearchOsCPUUsage) init() { m.data.SetName("elasticsearch.os.cpu.usage") m.data.SetDescription("Recent CPU usage for the whole system, or -1 if not supported.") m.data.SetUnit("%") m.data.SetEmptyGauge() } func (m *metricElasticsearchOsCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchOsCPUUsage) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchOsCPUUsage) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchOsCPUUsage(cfg MetricConfig) metricElasticsearchOsCPUUsage { m := metricElasticsearchOsCPUUsage{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchOsMemory struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.os.memory metric with initial data. func (m *metricElasticsearchOsMemory) init() { m.data.SetName("elasticsearch.os.memory") m.data.SetDescription("Amount of physical memory.") m.data.SetUnit("By") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricElasticsearchOsMemory) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, memoryStateAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("state", memoryStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchOsMemory) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchOsMemory) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchOsMemory(cfg MetricConfig) metricElasticsearchOsMemory { m := metricElasticsearchOsMemory{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchProcessCPUTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.process.cpu.time metric with initial data. func (m *metricElasticsearchProcessCPUTime) init() { m.data.SetName("elasticsearch.process.cpu.time") m.data.SetDescription("CPU time used by the process on which the Java virtual machine is running.") m.data.SetUnit("ms") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchProcessCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchProcessCPUTime) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchProcessCPUTime) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchProcessCPUTime(cfg MetricConfig) metricElasticsearchProcessCPUTime { m := metricElasticsearchProcessCPUTime{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchProcessCPUUsage struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.process.cpu.usage metric with initial data. func (m *metricElasticsearchProcessCPUUsage) init() { m.data.SetName("elasticsearch.process.cpu.usage") m.data.SetDescription("CPU usage in percent.") m.data.SetUnit("1") m.data.SetEmptyGauge() } func (m *metricElasticsearchProcessCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchProcessCPUUsage) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchProcessCPUUsage) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchProcessCPUUsage(cfg MetricConfig) metricElasticsearchProcessCPUUsage { m := metricElasticsearchProcessCPUUsage{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchProcessMemoryVirtual struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills elasticsearch.process.memory.virtual metric with initial data. func (m *metricElasticsearchProcessMemoryVirtual) init() { m.data.SetName("elasticsearch.process.memory.virtual") m.data.SetDescription("Size of virtual memory that is guaranteed to be available to the running process.") m.data.SetUnit("By") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } func (m *metricElasticsearchProcessMemoryVirtual) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricElasticsearchProcessMemoryVirtual) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricElasticsearchProcessMemoryVirtual) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricElasticsearchProcessMemoryVirtual(cfg MetricConfig) metricElasticsearchProcessMemoryVirtual { m := metricElasticsearchProcessMemoryVirtual{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmClassesLoaded struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.classes.loaded metric with initial data. func (m *metricJvmClassesLoaded) init() { m.data.SetName("jvm.classes.loaded") m.data.SetDescription("The number of loaded classes") m.data.SetUnit("1") m.data.SetEmptyGauge() } func (m *metricJvmClassesLoaded) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmClassesLoaded) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmClassesLoaded) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmClassesLoaded(cfg MetricConfig) metricJvmClassesLoaded { m := metricJvmClassesLoaded{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmGcCollectionsCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.gc.collections.count metric with initial data. func (m *metricJvmGcCollectionsCount) init() { m.data.SetName("jvm.gc.collections.count") m.data.SetDescription("The total number of garbage collections that have occurred") m.data.SetUnit("1") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricJvmGcCollectionsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, collectorNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("name", collectorNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmGcCollectionsCount) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmGcCollectionsCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmGcCollectionsCount(cfg MetricConfig) metricJvmGcCollectionsCount { m := metricJvmGcCollectionsCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmGcCollectionsElapsed struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.gc.collections.elapsed metric with initial data. func (m *metricJvmGcCollectionsElapsed) init() { m.data.SetName("jvm.gc.collections.elapsed") m.data.SetDescription("The approximate accumulated collection elapsed time") m.data.SetUnit("ms") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } func (m *metricJvmGcCollectionsElapsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, collectorNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Sum().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("name", collectorNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmGcCollectionsElapsed) updateCapacity() { if m.data.Sum().DataPoints().Len() > m.capacity { m.capacity = m.data.Sum().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmGcCollectionsElapsed) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmGcCollectionsElapsed(cfg MetricConfig) metricJvmGcCollectionsElapsed { m := metricJvmGcCollectionsElapsed{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryHeapCommitted struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.memory.heap.committed metric with initial data. func (m *metricJvmMemoryHeapCommitted) init() { m.data.SetName("jvm.memory.heap.committed") m.data.SetDescription("The amount of memory that is guaranteed to be available for the heap") m.data.SetUnit("By") m.data.SetEmptyGauge() } func (m *metricJvmMemoryHeapCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmMemoryHeapCommitted) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmMemoryHeapCommitted) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmMemoryHeapCommitted(cfg MetricConfig) metricJvmMemoryHeapCommitted { m := metricJvmMemoryHeapCommitted{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryHeapMax struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.memory.heap.max metric with initial data. func (m *metricJvmMemoryHeapMax) init() { m.data.SetName("jvm.memory.heap.max") m.data.SetDescription("The maximum amount of memory can be used for the heap") m.data.SetUnit("By") m.data.SetEmptyGauge() } func (m *metricJvmMemoryHeapMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmMemoryHeapMax) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmMemoryHeapMax) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmMemoryHeapMax(cfg MetricConfig) metricJvmMemoryHeapMax { m := metricJvmMemoryHeapMax{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryHeapUsed struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.memory.heap.used metric with initial data. func (m *metricJvmMemoryHeapUsed) init() { m.data.SetName("jvm.memory.heap.used") m.data.SetDescription("The current heap memory usage") m.data.SetUnit("By") m.data.SetEmptyGauge() } func (m *metricJvmMemoryHeapUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmMemoryHeapUsed) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmMemoryHeapUsed) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmMemoryHeapUsed(cfg MetricConfig) metricJvmMemoryHeapUsed { m := metricJvmMemoryHeapUsed{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryHeapUtilization struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.memory.heap.utilization metric with initial data. func (m *metricJvmMemoryHeapUtilization) init() { m.data.SetName("jvm.memory.heap.utilization") m.data.SetDescription("Fraction of heap memory usage") m.data.SetUnit("1") m.data.SetEmptyGauge() } func (m *metricJvmMemoryHeapUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmMemoryHeapUtilization) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmMemoryHeapUtilization) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmMemoryHeapUtilization(cfg MetricConfig) metricJvmMemoryHeapUtilization { m := metricJvmMemoryHeapUtilization{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryNonheapCommitted struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.memory.nonheap.committed metric with initial data. func (m *metricJvmMemoryNonheapCommitted) init() { m.data.SetName("jvm.memory.nonheap.committed") m.data.SetDescription("The amount of memory that is guaranteed to be available for non-heap purposes") m.data.SetUnit("By") m.data.SetEmptyGauge() } func (m *metricJvmMemoryNonheapCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmMemoryNonheapCommitted) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmMemoryNonheapCommitted) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmMemoryNonheapCommitted(cfg MetricConfig) metricJvmMemoryNonheapCommitted { m := metricJvmMemoryNonheapCommitted{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryNonheapUsed struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.memory.nonheap.used metric with initial data. func (m *metricJvmMemoryNonheapUsed) init() { m.data.SetName("jvm.memory.nonheap.used") m.data.SetDescription("The current non-heap memory usage") m.data.SetUnit("By") m.data.SetEmptyGauge() } func (m *metricJvmMemoryNonheapUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmMemoryNonheapUsed) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmMemoryNonheapUsed) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmMemoryNonheapUsed(cfg MetricConfig) metricJvmMemoryNonheapUsed { m := metricJvmMemoryNonheapUsed{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryPoolMax struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.memory.pool.max metric with initial data. func (m *metricJvmMemoryPoolMax) init() { m.data.SetName("jvm.memory.pool.max") m.data.SetDescription("The maximum amount of memory can be used for the memory pool") m.data.SetUnit("By") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricJvmMemoryPoolMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, memoryPoolNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("name", memoryPoolNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmMemoryPoolMax) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmMemoryPoolMax) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmMemoryPoolMax(cfg MetricConfig) metricJvmMemoryPoolMax { m := metricJvmMemoryPoolMax{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryPoolUsed struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.memory.pool.used metric with initial data. func (m *metricJvmMemoryPoolUsed) init() { m.data.SetName("jvm.memory.pool.used") m.data.SetDescription("The current memory pool memory usage") m.data.SetUnit("By") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } func (m *metricJvmMemoryPoolUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, memoryPoolNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("name", memoryPoolNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmMemoryPoolUsed) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmMemoryPoolUsed) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmMemoryPoolUsed(cfg MetricConfig) metricJvmMemoryPoolUsed { m := metricJvmMemoryPoolUsed{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } type metricJvmThreadsCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } // init fills jvm.threads.count metric with initial data. func (m *metricJvmThreadsCount) init() { m.data.SetName("jvm.threads.count") m.data.SetDescription("The current number of threads") m.data.SetUnit("1") m.data.SetEmptyGauge() } func (m *metricJvmThreadsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. func (m *metricJvmThreadsCount) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. func (m *metricJvmThreadsCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) m.init() } } func newMetricJvmThreadsCount(cfg MetricConfig) metricJvmThreadsCount { m := metricJvmThreadsCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() } return m } // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { config MetricsBuilderConfig // config of the metrics builder. startTime pcommon.Timestamp // start time that will be applied to all recorded data points. metricsCapacity int // maximum observed number of metrics per resource. metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. buildInfo component.BuildInfo // contains version information. resourceAttributeIncludeFilter map[string]filter.Filter resourceAttributeExcludeFilter map[string]filter.Filter metricElasticsearchBreakerMemoryEstimated metricElasticsearchBreakerMemoryEstimated metricElasticsearchBreakerMemoryLimit metricElasticsearchBreakerMemoryLimit metricElasticsearchBreakerTripped metricElasticsearchBreakerTripped metricElasticsearchClusterDataNodes metricElasticsearchClusterDataNodes metricElasticsearchClusterHealth metricElasticsearchClusterHealth metricElasticsearchClusterInFlightFetch metricElasticsearchClusterInFlightFetch metricElasticsearchClusterIndicesCacheEvictions metricElasticsearchClusterIndicesCacheEvictions metricElasticsearchClusterNodes metricElasticsearchClusterNodes metricElasticsearchClusterPendingTasks metricElasticsearchClusterPendingTasks metricElasticsearchClusterPublishedStatesDifferences metricElasticsearchClusterPublishedStatesDifferences metricElasticsearchClusterPublishedStatesFull metricElasticsearchClusterPublishedStatesFull metricElasticsearchClusterShards metricElasticsearchClusterShards metricElasticsearchClusterStateQueue metricElasticsearchClusterStateQueue metricElasticsearchClusterStateUpdateCount metricElasticsearchClusterStateUpdateCount metricElasticsearchClusterStateUpdateTime metricElasticsearchClusterStateUpdateTime metricElasticsearchIndexCacheEvictions metricElasticsearchIndexCacheEvictions metricElasticsearchIndexCacheMemoryUsage metricElasticsearchIndexCacheMemoryUsage metricElasticsearchIndexCacheSize metricElasticsearchIndexCacheSize metricElasticsearchIndexDocuments metricElasticsearchIndexDocuments metricElasticsearchIndexOperationsCompleted metricElasticsearchIndexOperationsCompleted metricElasticsearchIndexOperationsMergeCurrent metricElasticsearchIndexOperationsMergeCurrent metricElasticsearchIndexOperationsMergeDocsCount metricElasticsearchIndexOperationsMergeDocsCount metricElasticsearchIndexOperationsMergeSize metricElasticsearchIndexOperationsMergeSize metricElasticsearchIndexOperationsTime metricElasticsearchIndexOperationsTime metricElasticsearchIndexSegmentsCount metricElasticsearchIndexSegmentsCount metricElasticsearchIndexSegmentsMemory metricElasticsearchIndexSegmentsMemory metricElasticsearchIndexSegmentsSize metricElasticsearchIndexSegmentsSize metricElasticsearchIndexShardsSize metricElasticsearchIndexShardsSize metricElasticsearchIndexTranslogOperations metricElasticsearchIndexTranslogOperations metricElasticsearchIndexTranslogSize metricElasticsearchIndexTranslogSize metricElasticsearchIndexingPressureMemoryLimit metricElasticsearchIndexingPressureMemoryLimit metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections metricElasticsearchIndexingPressureMemoryTotalReplicaRejections metricElasticsearchIndexingPressureMemoryTotalReplicaRejections metricElasticsearchMemoryIndexingPressure metricElasticsearchMemoryIndexingPressure metricElasticsearchNodeCacheCount metricElasticsearchNodeCacheCount metricElasticsearchNodeCacheEvictions metricElasticsearchNodeCacheEvictions metricElasticsearchNodeCacheMemoryUsage metricElasticsearchNodeCacheMemoryUsage metricElasticsearchNodeCacheSize metricElasticsearchNodeCacheSize metricElasticsearchNodeClusterConnections metricElasticsearchNodeClusterConnections metricElasticsearchNodeClusterIo metricElasticsearchNodeClusterIo metricElasticsearchNodeDiskIoRead metricElasticsearchNodeDiskIoRead metricElasticsearchNodeDiskIoWrite metricElasticsearchNodeDiskIoWrite metricElasticsearchNodeDocuments metricElasticsearchNodeDocuments metricElasticsearchNodeFsDiskAvailable metricElasticsearchNodeFsDiskAvailable metricElasticsearchNodeFsDiskFree metricElasticsearchNodeFsDiskFree metricElasticsearchNodeFsDiskTotal metricElasticsearchNodeFsDiskTotal metricElasticsearchNodeHTTPConnections metricElasticsearchNodeHTTPConnections metricElasticsearchNodeIngestDocuments metricElasticsearchNodeIngestDocuments metricElasticsearchNodeIngestDocumentsCurrent metricElasticsearchNodeIngestDocumentsCurrent metricElasticsearchNodeIngestOperationsFailed metricElasticsearchNodeIngestOperationsFailed metricElasticsearchNodeOpenFiles metricElasticsearchNodeOpenFiles metricElasticsearchNodeOperationsCompleted metricElasticsearchNodeOperationsCompleted metricElasticsearchNodeOperationsCurrent metricElasticsearchNodeOperationsCurrent metricElasticsearchNodeOperationsGetCompleted metricElasticsearchNodeOperationsGetCompleted metricElasticsearchNodeOperationsGetTime metricElasticsearchNodeOperationsGetTime metricElasticsearchNodeOperationsTime metricElasticsearchNodeOperationsTime metricElasticsearchNodePipelineIngestDocumentsCurrent metricElasticsearchNodePipelineIngestDocumentsCurrent metricElasticsearchNodePipelineIngestDocumentsPreprocessed metricElasticsearchNodePipelineIngestDocumentsPreprocessed metricElasticsearchNodePipelineIngestOperationsFailed metricElasticsearchNodePipelineIngestOperationsFailed metricElasticsearchNodeScriptCacheEvictions metricElasticsearchNodeScriptCacheEvictions metricElasticsearchNodeScriptCompilationLimitTriggered metricElasticsearchNodeScriptCompilationLimitTriggered metricElasticsearchNodeScriptCompilations metricElasticsearchNodeScriptCompilations metricElasticsearchNodeSegmentsMemory metricElasticsearchNodeSegmentsMemory metricElasticsearchNodeShardsDataSetSize metricElasticsearchNodeShardsDataSetSize metricElasticsearchNodeShardsReservedSize metricElasticsearchNodeShardsReservedSize metricElasticsearchNodeShardsSize metricElasticsearchNodeShardsSize metricElasticsearchNodeThreadPoolTasksFinished metricElasticsearchNodeThreadPoolTasksFinished metricElasticsearchNodeThreadPoolTasksQueued metricElasticsearchNodeThreadPoolTasksQueued metricElasticsearchNodeThreadPoolThreads metricElasticsearchNodeThreadPoolThreads metricElasticsearchNodeTranslogOperations metricElasticsearchNodeTranslogOperations metricElasticsearchNodeTranslogSize metricElasticsearchNodeTranslogSize metricElasticsearchNodeTranslogUncommittedSize metricElasticsearchNodeTranslogUncommittedSize metricElasticsearchOsCPULoadAvg15m metricElasticsearchOsCPULoadAvg15m metricElasticsearchOsCPULoadAvg1m metricElasticsearchOsCPULoadAvg1m metricElasticsearchOsCPULoadAvg5m metricElasticsearchOsCPULoadAvg5m metricElasticsearchOsCPUUsage metricElasticsearchOsCPUUsage metricElasticsearchOsMemory metricElasticsearchOsMemory metricElasticsearchProcessCPUTime metricElasticsearchProcessCPUTime metricElasticsearchProcessCPUUsage metricElasticsearchProcessCPUUsage metricElasticsearchProcessMemoryVirtual metricElasticsearchProcessMemoryVirtual metricJvmClassesLoaded metricJvmClassesLoaded metricJvmGcCollectionsCount metricJvmGcCollectionsCount metricJvmGcCollectionsElapsed metricJvmGcCollectionsElapsed metricJvmMemoryHeapCommitted metricJvmMemoryHeapCommitted metricJvmMemoryHeapMax metricJvmMemoryHeapMax metricJvmMemoryHeapUsed metricJvmMemoryHeapUsed metricJvmMemoryHeapUtilization metricJvmMemoryHeapUtilization metricJvmMemoryNonheapCommitted metricJvmMemoryNonheapCommitted metricJvmMemoryNonheapUsed metricJvmMemoryNonheapUsed metricJvmMemoryPoolMax metricJvmMemoryPoolMax metricJvmMemoryPoolUsed metricJvmMemoryPoolUsed metricJvmThreadsCount metricJvmThreadsCount } // MetricBuilderOption applies changes to default metrics builder. type MetricBuilderOption interface { apply(*MetricsBuilder) } type metricBuilderOptionFunc func(mb *MetricsBuilder) func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { mbof(mb) } // WithStartTime sets startTime on the metrics builder. func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { return metricBuilderOptionFunc(func(mb *MetricsBuilder) { mb.startTime = startTime }) } func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), metricsBuffer: pmetric.NewMetrics(), buildInfo: settings.BuildInfo, metricElasticsearchBreakerMemoryEstimated: newMetricElasticsearchBreakerMemoryEstimated(mbc.Metrics.ElasticsearchBreakerMemoryEstimated), metricElasticsearchBreakerMemoryLimit: newMetricElasticsearchBreakerMemoryLimit(mbc.Metrics.ElasticsearchBreakerMemoryLimit), metricElasticsearchBreakerTripped: newMetricElasticsearchBreakerTripped(mbc.Metrics.ElasticsearchBreakerTripped), metricElasticsearchClusterDataNodes: newMetricElasticsearchClusterDataNodes(mbc.Metrics.ElasticsearchClusterDataNodes), metricElasticsearchClusterHealth: newMetricElasticsearchClusterHealth(mbc.Metrics.ElasticsearchClusterHealth), metricElasticsearchClusterInFlightFetch: newMetricElasticsearchClusterInFlightFetch(mbc.Metrics.ElasticsearchClusterInFlightFetch), metricElasticsearchClusterIndicesCacheEvictions: newMetricElasticsearchClusterIndicesCacheEvictions(mbc.Metrics.ElasticsearchClusterIndicesCacheEvictions), metricElasticsearchClusterNodes: newMetricElasticsearchClusterNodes(mbc.Metrics.ElasticsearchClusterNodes), metricElasticsearchClusterPendingTasks: newMetricElasticsearchClusterPendingTasks(mbc.Metrics.ElasticsearchClusterPendingTasks), metricElasticsearchClusterPublishedStatesDifferences: newMetricElasticsearchClusterPublishedStatesDifferences(mbc.Metrics.ElasticsearchClusterPublishedStatesDifferences), metricElasticsearchClusterPublishedStatesFull: newMetricElasticsearchClusterPublishedStatesFull(mbc.Metrics.ElasticsearchClusterPublishedStatesFull), metricElasticsearchClusterShards: newMetricElasticsearchClusterShards(mbc.Metrics.ElasticsearchClusterShards), metricElasticsearchClusterStateQueue: newMetricElasticsearchClusterStateQueue(mbc.Metrics.ElasticsearchClusterStateQueue), metricElasticsearchClusterStateUpdateCount: newMetricElasticsearchClusterStateUpdateCount(mbc.Metrics.ElasticsearchClusterStateUpdateCount), metricElasticsearchClusterStateUpdateTime: newMetricElasticsearchClusterStateUpdateTime(mbc.Metrics.ElasticsearchClusterStateUpdateTime), metricElasticsearchIndexCacheEvictions: newMetricElasticsearchIndexCacheEvictions(mbc.Metrics.ElasticsearchIndexCacheEvictions), metricElasticsearchIndexCacheMemoryUsage: newMetricElasticsearchIndexCacheMemoryUsage(mbc.Metrics.ElasticsearchIndexCacheMemoryUsage), metricElasticsearchIndexCacheSize: newMetricElasticsearchIndexCacheSize(mbc.Metrics.ElasticsearchIndexCacheSize), metricElasticsearchIndexDocuments: newMetricElasticsearchIndexDocuments(mbc.Metrics.ElasticsearchIndexDocuments), metricElasticsearchIndexOperationsCompleted: newMetricElasticsearchIndexOperationsCompleted(mbc.Metrics.ElasticsearchIndexOperationsCompleted), metricElasticsearchIndexOperationsMergeCurrent: newMetricElasticsearchIndexOperationsMergeCurrent(mbc.Metrics.ElasticsearchIndexOperationsMergeCurrent), metricElasticsearchIndexOperationsMergeDocsCount: newMetricElasticsearchIndexOperationsMergeDocsCount(mbc.Metrics.ElasticsearchIndexOperationsMergeDocsCount), metricElasticsearchIndexOperationsMergeSize: newMetricElasticsearchIndexOperationsMergeSize(mbc.Metrics.ElasticsearchIndexOperationsMergeSize), metricElasticsearchIndexOperationsTime: newMetricElasticsearchIndexOperationsTime(mbc.Metrics.ElasticsearchIndexOperationsTime), metricElasticsearchIndexSegmentsCount: newMetricElasticsearchIndexSegmentsCount(mbc.Metrics.ElasticsearchIndexSegmentsCount), metricElasticsearchIndexSegmentsMemory: newMetricElasticsearchIndexSegmentsMemory(mbc.Metrics.ElasticsearchIndexSegmentsMemory), metricElasticsearchIndexSegmentsSize: newMetricElasticsearchIndexSegmentsSize(mbc.Metrics.ElasticsearchIndexSegmentsSize), metricElasticsearchIndexShardsSize: newMetricElasticsearchIndexShardsSize(mbc.Metrics.ElasticsearchIndexShardsSize), metricElasticsearchIndexTranslogOperations: newMetricElasticsearchIndexTranslogOperations(mbc.Metrics.ElasticsearchIndexTranslogOperations), metricElasticsearchIndexTranslogSize: newMetricElasticsearchIndexTranslogSize(mbc.Metrics.ElasticsearchIndexTranslogSize), metricElasticsearchIndexingPressureMemoryLimit: newMetricElasticsearchIndexingPressureMemoryLimit(mbc.Metrics.ElasticsearchIndexingPressureMemoryLimit), metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections: newMetricElasticsearchIndexingPressureMemoryTotalPrimaryRejections(mbc.Metrics.ElasticsearchIndexingPressureMemoryTotalPrimaryRejections), metricElasticsearchIndexingPressureMemoryTotalReplicaRejections: newMetricElasticsearchIndexingPressureMemoryTotalReplicaRejections(mbc.Metrics.ElasticsearchIndexingPressureMemoryTotalReplicaRejections), metricElasticsearchMemoryIndexingPressure: newMetricElasticsearchMemoryIndexingPressure(mbc.Metrics.ElasticsearchMemoryIndexingPressure), metricElasticsearchNodeCacheCount: newMetricElasticsearchNodeCacheCount(mbc.Metrics.ElasticsearchNodeCacheCount), metricElasticsearchNodeCacheEvictions: newMetricElasticsearchNodeCacheEvictions(mbc.Metrics.ElasticsearchNodeCacheEvictions), metricElasticsearchNodeCacheMemoryUsage: newMetricElasticsearchNodeCacheMemoryUsage(mbc.Metrics.ElasticsearchNodeCacheMemoryUsage), metricElasticsearchNodeCacheSize: newMetricElasticsearchNodeCacheSize(mbc.Metrics.ElasticsearchNodeCacheSize), metricElasticsearchNodeClusterConnections: newMetricElasticsearchNodeClusterConnections(mbc.Metrics.ElasticsearchNodeClusterConnections), metricElasticsearchNodeClusterIo: newMetricElasticsearchNodeClusterIo(mbc.Metrics.ElasticsearchNodeClusterIo), metricElasticsearchNodeDiskIoRead: newMetricElasticsearchNodeDiskIoRead(mbc.Metrics.ElasticsearchNodeDiskIoRead), metricElasticsearchNodeDiskIoWrite: newMetricElasticsearchNodeDiskIoWrite(mbc.Metrics.ElasticsearchNodeDiskIoWrite), metricElasticsearchNodeDocuments: newMetricElasticsearchNodeDocuments(mbc.Metrics.ElasticsearchNodeDocuments), metricElasticsearchNodeFsDiskAvailable: newMetricElasticsearchNodeFsDiskAvailable(mbc.Metrics.ElasticsearchNodeFsDiskAvailable), metricElasticsearchNodeFsDiskFree: newMetricElasticsearchNodeFsDiskFree(mbc.Metrics.ElasticsearchNodeFsDiskFree), metricElasticsearchNodeFsDiskTotal: newMetricElasticsearchNodeFsDiskTotal(mbc.Metrics.ElasticsearchNodeFsDiskTotal), metricElasticsearchNodeHTTPConnections: newMetricElasticsearchNodeHTTPConnections(mbc.Metrics.ElasticsearchNodeHTTPConnections), metricElasticsearchNodeIngestDocuments: newMetricElasticsearchNodeIngestDocuments(mbc.Metrics.ElasticsearchNodeIngestDocuments), metricElasticsearchNodeIngestDocumentsCurrent: newMetricElasticsearchNodeIngestDocumentsCurrent(mbc.Metrics.ElasticsearchNodeIngestDocumentsCurrent), metricElasticsearchNodeIngestOperationsFailed: newMetricElasticsearchNodeIngestOperationsFailed(mbc.Metrics.ElasticsearchNodeIngestOperationsFailed), metricElasticsearchNodeOpenFiles: newMetricElasticsearchNodeOpenFiles(mbc.Metrics.ElasticsearchNodeOpenFiles), metricElasticsearchNodeOperationsCompleted: newMetricElasticsearchNodeOperationsCompleted(mbc.Metrics.ElasticsearchNodeOperationsCompleted), metricElasticsearchNodeOperationsCurrent: newMetricElasticsearchNodeOperationsCurrent(mbc.Metrics.ElasticsearchNodeOperationsCurrent), metricElasticsearchNodeOperationsGetCompleted: newMetricElasticsearchNodeOperationsGetCompleted(mbc.Metrics.ElasticsearchNodeOperationsGetCompleted), metricElasticsearchNodeOperationsGetTime: newMetricElasticsearchNodeOperationsGetTime(mbc.Metrics.ElasticsearchNodeOperationsGetTime), metricElasticsearchNodeOperationsTime: newMetricElasticsearchNodeOperationsTime(mbc.Metrics.ElasticsearchNodeOperationsTime), metricElasticsearchNodePipelineIngestDocumentsCurrent: newMetricElasticsearchNodePipelineIngestDocumentsCurrent(mbc.Metrics.ElasticsearchNodePipelineIngestDocumentsCurrent), metricElasticsearchNodePipelineIngestDocumentsPreprocessed: newMetricElasticsearchNodePipelineIngestDocumentsPreprocessed(mbc.Metrics.ElasticsearchNodePipelineIngestDocumentsPreprocessed), metricElasticsearchNodePipelineIngestOperationsFailed: newMetricElasticsearchNodePipelineIngestOperationsFailed(mbc.Metrics.ElasticsearchNodePipelineIngestOperationsFailed), metricElasticsearchNodeScriptCacheEvictions: newMetricElasticsearchNodeScriptCacheEvictions(mbc.Metrics.ElasticsearchNodeScriptCacheEvictions), metricElasticsearchNodeScriptCompilationLimitTriggered: newMetricElasticsearchNodeScriptCompilationLimitTriggered(mbc.Metrics.ElasticsearchNodeScriptCompilationLimitTriggered), metricElasticsearchNodeScriptCompilations: newMetricElasticsearchNodeScriptCompilations(mbc.Metrics.ElasticsearchNodeScriptCompilations), metricElasticsearchNodeSegmentsMemory: newMetricElasticsearchNodeSegmentsMemory(mbc.Metrics.ElasticsearchNodeSegmentsMemory), metricElasticsearchNodeShardsDataSetSize: newMetricElasticsearchNodeShardsDataSetSize(mbc.Metrics.ElasticsearchNodeShardsDataSetSize), metricElasticsearchNodeShardsReservedSize: newMetricElasticsearchNodeShardsReservedSize(mbc.Metrics.ElasticsearchNodeShardsReservedSize), metricElasticsearchNodeShardsSize: newMetricElasticsearchNodeShardsSize(mbc.Metrics.ElasticsearchNodeShardsSize), metricElasticsearchNodeThreadPoolTasksFinished: newMetricElasticsearchNodeThreadPoolTasksFinished(mbc.Metrics.ElasticsearchNodeThreadPoolTasksFinished), metricElasticsearchNodeThreadPoolTasksQueued: newMetricElasticsearchNodeThreadPoolTasksQueued(mbc.Metrics.ElasticsearchNodeThreadPoolTasksQueued), metricElasticsearchNodeThreadPoolThreads: newMetricElasticsearchNodeThreadPoolThreads(mbc.Metrics.ElasticsearchNodeThreadPoolThreads), metricElasticsearchNodeTranslogOperations: newMetricElasticsearchNodeTranslogOperations(mbc.Metrics.ElasticsearchNodeTranslogOperations), metricElasticsearchNodeTranslogSize: newMetricElasticsearchNodeTranslogSize(mbc.Metrics.ElasticsearchNodeTranslogSize), metricElasticsearchNodeTranslogUncommittedSize: newMetricElasticsearchNodeTranslogUncommittedSize(mbc.Metrics.ElasticsearchNodeTranslogUncommittedSize), metricElasticsearchOsCPULoadAvg15m: newMetricElasticsearchOsCPULoadAvg15m(mbc.Metrics.ElasticsearchOsCPULoadAvg15m), metricElasticsearchOsCPULoadAvg1m: newMetricElasticsearchOsCPULoadAvg1m(mbc.Metrics.ElasticsearchOsCPULoadAvg1m), metricElasticsearchOsCPULoadAvg5m: newMetricElasticsearchOsCPULoadAvg5m(mbc.Metrics.ElasticsearchOsCPULoadAvg5m), metricElasticsearchOsCPUUsage: newMetricElasticsearchOsCPUUsage(mbc.Metrics.ElasticsearchOsCPUUsage), metricElasticsearchOsMemory: newMetricElasticsearchOsMemory(mbc.Metrics.ElasticsearchOsMemory), metricElasticsearchProcessCPUTime: newMetricElasticsearchProcessCPUTime(mbc.Metrics.ElasticsearchProcessCPUTime), metricElasticsearchProcessCPUUsage: newMetricElasticsearchProcessCPUUsage(mbc.Metrics.ElasticsearchProcessCPUUsage), metricElasticsearchProcessMemoryVirtual: newMetricElasticsearchProcessMemoryVirtual(mbc.Metrics.ElasticsearchProcessMemoryVirtual), metricJvmClassesLoaded: newMetricJvmClassesLoaded(mbc.Metrics.JvmClassesLoaded), metricJvmGcCollectionsCount: newMetricJvmGcCollectionsCount(mbc.Metrics.JvmGcCollectionsCount), metricJvmGcCollectionsElapsed: newMetricJvmGcCollectionsElapsed(mbc.Metrics.JvmGcCollectionsElapsed), metricJvmMemoryHeapCommitted: newMetricJvmMemoryHeapCommitted(mbc.Metrics.JvmMemoryHeapCommitted), metricJvmMemoryHeapMax: newMetricJvmMemoryHeapMax(mbc.Metrics.JvmMemoryHeapMax), metricJvmMemoryHeapUsed: newMetricJvmMemoryHeapUsed(mbc.Metrics.JvmMemoryHeapUsed), metricJvmMemoryHeapUtilization: newMetricJvmMemoryHeapUtilization(mbc.Metrics.JvmMemoryHeapUtilization), metricJvmMemoryNonheapCommitted: newMetricJvmMemoryNonheapCommitted(mbc.Metrics.JvmMemoryNonheapCommitted), metricJvmMemoryNonheapUsed: newMetricJvmMemoryNonheapUsed(mbc.Metrics.JvmMemoryNonheapUsed), metricJvmMemoryPoolMax: newMetricJvmMemoryPoolMax(mbc.Metrics.JvmMemoryPoolMax), metricJvmMemoryPoolUsed: newMetricJvmMemoryPoolUsed(mbc.Metrics.JvmMemoryPoolUsed), metricJvmThreadsCount: newMetricJvmThreadsCount(mbc.Metrics.JvmThreadsCount), resourceAttributeIncludeFilter: make(map[string]filter.Filter), resourceAttributeExcludeFilter: make(map[string]filter.Filter), } if mbc.ResourceAttributes.ElasticsearchClusterName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["elasticsearch.cluster.name"] = filter.CreateFilter(mbc.ResourceAttributes.ElasticsearchClusterName.MetricsInclude) } if mbc.ResourceAttributes.ElasticsearchClusterName.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["elasticsearch.cluster.name"] = filter.CreateFilter(mbc.ResourceAttributes.ElasticsearchClusterName.MetricsExclude) } if mbc.ResourceAttributes.ElasticsearchIndexName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["elasticsearch.index.name"] = filter.CreateFilter(mbc.ResourceAttributes.ElasticsearchIndexName.MetricsInclude) } if mbc.ResourceAttributes.ElasticsearchIndexName.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["elasticsearch.index.name"] = filter.CreateFilter(mbc.ResourceAttributes.ElasticsearchIndexName.MetricsExclude) } if mbc.ResourceAttributes.ElasticsearchNodeName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["elasticsearch.node.name"] = filter.CreateFilter(mbc.ResourceAttributes.ElasticsearchNodeName.MetricsInclude) } if mbc.ResourceAttributes.ElasticsearchNodeName.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["elasticsearch.node.name"] = filter.CreateFilter(mbc.ResourceAttributes.ElasticsearchNodeName.MetricsExclude) } if mbc.ResourceAttributes.ElasticsearchNodeVersion.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["elasticsearch.node.version"] = filter.CreateFilter(mbc.ResourceAttributes.ElasticsearchNodeVersion.MetricsInclude) } if mbc.ResourceAttributes.ElasticsearchNodeVersion.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["elasticsearch.node.version"] = filter.CreateFilter(mbc.ResourceAttributes.ElasticsearchNodeVersion.MetricsExclude) } for _, op := range options { op.apply(mb) } return mb } // NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { return NewResourceBuilder(mb.config.ResourceAttributes) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } } // ResourceMetricsOption applies changes to provided resource metrics. type ResourceMetricsOption interface { apply(pmetric.ResourceMetrics) } type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { rmof(rm) } // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) }) } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { switch metrics.At(i).Type() { case pmetric.MetricTypeGauge: dps = metrics.At(i).Gauge().DataPoints() case pmetric.MetricTypeSum: dps = metrics.At(i).Sum().DataPoints() } for j := 0; j < dps.Len(); j++ { dps.At(j).SetStartTimestamp(start) } } }) } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName(ScopeName) ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricElasticsearchBreakerMemoryEstimated.emit(ils.Metrics()) mb.metricElasticsearchBreakerMemoryLimit.emit(ils.Metrics()) mb.metricElasticsearchBreakerTripped.emit(ils.Metrics()) mb.metricElasticsearchClusterDataNodes.emit(ils.Metrics()) mb.metricElasticsearchClusterHealth.emit(ils.Metrics()) mb.metricElasticsearchClusterInFlightFetch.emit(ils.Metrics()) mb.metricElasticsearchClusterIndicesCacheEvictions.emit(ils.Metrics()) mb.metricElasticsearchClusterNodes.emit(ils.Metrics()) mb.metricElasticsearchClusterPendingTasks.emit(ils.Metrics()) mb.metricElasticsearchClusterPublishedStatesDifferences.emit(ils.Metrics()) mb.metricElasticsearchClusterPublishedStatesFull.emit(ils.Metrics()) mb.metricElasticsearchClusterShards.emit(ils.Metrics()) mb.metricElasticsearchClusterStateQueue.emit(ils.Metrics()) mb.metricElasticsearchClusterStateUpdateCount.emit(ils.Metrics()) mb.metricElasticsearchClusterStateUpdateTime.emit(ils.Metrics()) mb.metricElasticsearchIndexCacheEvictions.emit(ils.Metrics()) mb.metricElasticsearchIndexCacheMemoryUsage.emit(ils.Metrics()) mb.metricElasticsearchIndexCacheSize.emit(ils.Metrics()) mb.metricElasticsearchIndexDocuments.emit(ils.Metrics()) mb.metricElasticsearchIndexOperationsCompleted.emit(ils.Metrics()) mb.metricElasticsearchIndexOperationsMergeCurrent.emit(ils.Metrics()) mb.metricElasticsearchIndexOperationsMergeDocsCount.emit(ils.Metrics()) mb.metricElasticsearchIndexOperationsMergeSize.emit(ils.Metrics()) mb.metricElasticsearchIndexOperationsTime.emit(ils.Metrics()) mb.metricElasticsearchIndexSegmentsCount.emit(ils.Metrics()) mb.metricElasticsearchIndexSegmentsMemory.emit(ils.Metrics()) mb.metricElasticsearchIndexSegmentsSize.emit(ils.Metrics()) mb.metricElasticsearchIndexShardsSize.emit(ils.Metrics()) mb.metricElasticsearchIndexTranslogOperations.emit(ils.Metrics()) mb.metricElasticsearchIndexTranslogSize.emit(ils.Metrics()) mb.metricElasticsearchIndexingPressureMemoryLimit.emit(ils.Metrics()) mb.metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections.emit(ils.Metrics()) mb.metricElasticsearchIndexingPressureMemoryTotalReplicaRejections.emit(ils.Metrics()) mb.metricElasticsearchMemoryIndexingPressure.emit(ils.Metrics()) mb.metricElasticsearchNodeCacheCount.emit(ils.Metrics()) mb.metricElasticsearchNodeCacheEvictions.emit(ils.Metrics()) mb.metricElasticsearchNodeCacheMemoryUsage.emit(ils.Metrics()) mb.metricElasticsearchNodeCacheSize.emit(ils.Metrics()) mb.metricElasticsearchNodeClusterConnections.emit(ils.Metrics()) mb.metricElasticsearchNodeClusterIo.emit(ils.Metrics()) mb.metricElasticsearchNodeDiskIoRead.emit(ils.Metrics()) mb.metricElasticsearchNodeDiskIoWrite.emit(ils.Metrics()) mb.metricElasticsearchNodeDocuments.emit(ils.Metrics()) mb.metricElasticsearchNodeFsDiskAvailable.emit(ils.Metrics()) mb.metricElasticsearchNodeFsDiskFree.emit(ils.Metrics()) mb.metricElasticsearchNodeFsDiskTotal.emit(ils.Metrics()) mb.metricElasticsearchNodeHTTPConnections.emit(ils.Metrics()) mb.metricElasticsearchNodeIngestDocuments.emit(ils.Metrics()) mb.metricElasticsearchNodeIngestDocumentsCurrent.emit(ils.Metrics()) mb.metricElasticsearchNodeIngestOperationsFailed.emit(ils.Metrics()) mb.metricElasticsearchNodeOpenFiles.emit(ils.Metrics()) mb.metricElasticsearchNodeOperationsCompleted.emit(ils.Metrics()) mb.metricElasticsearchNodeOperationsCurrent.emit(ils.Metrics()) mb.metricElasticsearchNodeOperationsGetCompleted.emit(ils.Metrics()) mb.metricElasticsearchNodeOperationsGetTime.emit(ils.Metrics()) mb.metricElasticsearchNodeOperationsTime.emit(ils.Metrics()) mb.metricElasticsearchNodePipelineIngestDocumentsCurrent.emit(ils.Metrics()) mb.metricElasticsearchNodePipelineIngestDocumentsPreprocessed.emit(ils.Metrics()) mb.metricElasticsearchNodePipelineIngestOperationsFailed.emit(ils.Metrics()) mb.metricElasticsearchNodeScriptCacheEvictions.emit(ils.Metrics()) mb.metricElasticsearchNodeScriptCompilationLimitTriggered.emit(ils.Metrics()) mb.metricElasticsearchNodeScriptCompilations.emit(ils.Metrics()) mb.metricElasticsearchNodeSegmentsMemory.emit(ils.Metrics()) mb.metricElasticsearchNodeShardsDataSetSize.emit(ils.Metrics()) mb.metricElasticsearchNodeShardsReservedSize.emit(ils.Metrics()) mb.metricElasticsearchNodeShardsSize.emit(ils.Metrics()) mb.metricElasticsearchNodeThreadPoolTasksFinished.emit(ils.Metrics()) mb.metricElasticsearchNodeThreadPoolTasksQueued.emit(ils.Metrics()) mb.metricElasticsearchNodeThreadPoolThreads.emit(ils.Metrics()) mb.metricElasticsearchNodeTranslogOperations.emit(ils.Metrics()) mb.metricElasticsearchNodeTranslogSize.emit(ils.Metrics()) mb.metricElasticsearchNodeTranslogUncommittedSize.emit(ils.Metrics()) mb.metricElasticsearchOsCPULoadAvg15m.emit(ils.Metrics()) mb.metricElasticsearchOsCPULoadAvg1m.emit(ils.Metrics()) mb.metricElasticsearchOsCPULoadAvg5m.emit(ils.Metrics()) mb.metricElasticsearchOsCPUUsage.emit(ils.Metrics()) mb.metricElasticsearchOsMemory.emit(ils.Metrics()) mb.metricElasticsearchProcessCPUTime.emit(ils.Metrics()) mb.metricElasticsearchProcessCPUUsage.emit(ils.Metrics()) mb.metricElasticsearchProcessMemoryVirtual.emit(ils.Metrics()) mb.metricJvmClassesLoaded.emit(ils.Metrics()) mb.metricJvmGcCollectionsCount.emit(ils.Metrics()) mb.metricJvmGcCollectionsElapsed.emit(ils.Metrics()) mb.metricJvmMemoryHeapCommitted.emit(ils.Metrics()) mb.metricJvmMemoryHeapMax.emit(ils.Metrics()) mb.metricJvmMemoryHeapUsed.emit(ils.Metrics()) mb.metricJvmMemoryHeapUtilization.emit(ils.Metrics()) mb.metricJvmMemoryNonheapCommitted.emit(ils.Metrics()) mb.metricJvmMemoryNonheapUsed.emit(ils.Metrics()) mb.metricJvmMemoryPoolMax.emit(ils.Metrics()) mb.metricJvmMemoryPoolUsed.emit(ils.Metrics()) mb.metricJvmThreadsCount.emit(ils.Metrics()) for _, op := range options { op.apply(rm) } for attr, filter := range mb.resourceAttributeIncludeFilter { if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) { return } } for attr, filter := range mb.resourceAttributeExcludeFilter { if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) { return } } if ils.Metrics().Len() > 0 { mb.updateCapacity(rm) rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) } } // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { mb.EmitForResource(options...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics } // RecordElasticsearchBreakerMemoryEstimatedDataPoint adds a data point to elasticsearch.breaker.memory.estimated metric. func (mb *MetricsBuilder) RecordElasticsearchBreakerMemoryEstimatedDataPoint(ts pcommon.Timestamp, val int64, circuitBreakerNameAttributeValue string) { mb.metricElasticsearchBreakerMemoryEstimated.recordDataPoint(mb.startTime, ts, val, circuitBreakerNameAttributeValue) } // RecordElasticsearchBreakerMemoryLimitDataPoint adds a data point to elasticsearch.breaker.memory.limit metric. func (mb *MetricsBuilder) RecordElasticsearchBreakerMemoryLimitDataPoint(ts pcommon.Timestamp, val int64, circuitBreakerNameAttributeValue string) { mb.metricElasticsearchBreakerMemoryLimit.recordDataPoint(mb.startTime, ts, val, circuitBreakerNameAttributeValue) } // RecordElasticsearchBreakerTrippedDataPoint adds a data point to elasticsearch.breaker.tripped metric. func (mb *MetricsBuilder) RecordElasticsearchBreakerTrippedDataPoint(ts pcommon.Timestamp, val int64, circuitBreakerNameAttributeValue string) { mb.metricElasticsearchBreakerTripped.recordDataPoint(mb.startTime, ts, val, circuitBreakerNameAttributeValue) } // RecordElasticsearchClusterDataNodesDataPoint adds a data point to elasticsearch.cluster.data_nodes metric. func (mb *MetricsBuilder) RecordElasticsearchClusterDataNodesDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchClusterDataNodes.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchClusterHealthDataPoint adds a data point to elasticsearch.cluster.health metric. func (mb *MetricsBuilder) RecordElasticsearchClusterHealthDataPoint(ts pcommon.Timestamp, val int64, healthStatusAttributeValue AttributeHealthStatus) { mb.metricElasticsearchClusterHealth.recordDataPoint(mb.startTime, ts, val, healthStatusAttributeValue.String()) } // RecordElasticsearchClusterInFlightFetchDataPoint adds a data point to elasticsearch.cluster.in_flight_fetch metric. func (mb *MetricsBuilder) RecordElasticsearchClusterInFlightFetchDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchClusterInFlightFetch.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchClusterIndicesCacheEvictionsDataPoint adds a data point to elasticsearch.cluster.indices.cache.evictions metric. func (mb *MetricsBuilder) RecordElasticsearchClusterIndicesCacheEvictionsDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue AttributeCacheName) { mb.metricElasticsearchClusterIndicesCacheEvictions.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue.String()) } // RecordElasticsearchClusterNodesDataPoint adds a data point to elasticsearch.cluster.nodes metric. func (mb *MetricsBuilder) RecordElasticsearchClusterNodesDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchClusterNodes.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchClusterPendingTasksDataPoint adds a data point to elasticsearch.cluster.pending_tasks metric. func (mb *MetricsBuilder) RecordElasticsearchClusterPendingTasksDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchClusterPendingTasks.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchClusterPublishedStatesDifferencesDataPoint adds a data point to elasticsearch.cluster.published_states.differences metric. func (mb *MetricsBuilder) RecordElasticsearchClusterPublishedStatesDifferencesDataPoint(ts pcommon.Timestamp, val int64, clusterPublishedDifferenceStateAttributeValue AttributeClusterPublishedDifferenceState) { mb.metricElasticsearchClusterPublishedStatesDifferences.recordDataPoint(mb.startTime, ts, val, clusterPublishedDifferenceStateAttributeValue.String()) } // RecordElasticsearchClusterPublishedStatesFullDataPoint adds a data point to elasticsearch.cluster.published_states.full metric. func (mb *MetricsBuilder) RecordElasticsearchClusterPublishedStatesFullDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchClusterPublishedStatesFull.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchClusterShardsDataPoint adds a data point to elasticsearch.cluster.shards metric. func (mb *MetricsBuilder) RecordElasticsearchClusterShardsDataPoint(ts pcommon.Timestamp, val int64, shardStateAttributeValue AttributeShardState) { mb.metricElasticsearchClusterShards.recordDataPoint(mb.startTime, ts, val, shardStateAttributeValue.String()) } // RecordElasticsearchClusterStateQueueDataPoint adds a data point to elasticsearch.cluster.state_queue metric. func (mb *MetricsBuilder) RecordElasticsearchClusterStateQueueDataPoint(ts pcommon.Timestamp, val int64, clusterStateQueueStateAttributeValue AttributeClusterStateQueueState) { mb.metricElasticsearchClusterStateQueue.recordDataPoint(mb.startTime, ts, val, clusterStateQueueStateAttributeValue.String()) } // RecordElasticsearchClusterStateUpdateCountDataPoint adds a data point to elasticsearch.cluster.state_update.count metric. func (mb *MetricsBuilder) RecordElasticsearchClusterStateUpdateCountDataPoint(ts pcommon.Timestamp, val int64, clusterStateUpdateStateAttributeValue string) { mb.metricElasticsearchClusterStateUpdateCount.recordDataPoint(mb.startTime, ts, val, clusterStateUpdateStateAttributeValue) } // RecordElasticsearchClusterStateUpdateTimeDataPoint adds a data point to elasticsearch.cluster.state_update.time metric. func (mb *MetricsBuilder) RecordElasticsearchClusterStateUpdateTimeDataPoint(ts pcommon.Timestamp, val int64, clusterStateUpdateStateAttributeValue string, clusterStateUpdateTypeAttributeValue AttributeClusterStateUpdateType) { mb.metricElasticsearchClusterStateUpdateTime.recordDataPoint(mb.startTime, ts, val, clusterStateUpdateStateAttributeValue, clusterStateUpdateTypeAttributeValue.String()) } // RecordElasticsearchIndexCacheEvictionsDataPoint adds a data point to elasticsearch.index.cache.evictions metric. func (mb *MetricsBuilder) RecordElasticsearchIndexCacheEvictionsDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue AttributeCacheName, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexCacheEvictions.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue.String(), indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexCacheMemoryUsageDataPoint adds a data point to elasticsearch.index.cache.memory.usage metric. func (mb *MetricsBuilder) RecordElasticsearchIndexCacheMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue AttributeCacheName, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexCacheMemoryUsage.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue.String(), indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexCacheSizeDataPoint adds a data point to elasticsearch.index.cache.size metric. func (mb *MetricsBuilder) RecordElasticsearchIndexCacheSizeDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexCacheSize.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexDocumentsDataPoint adds a data point to elasticsearch.index.documents metric. func (mb *MetricsBuilder) RecordElasticsearchIndexDocumentsDataPoint(ts pcommon.Timestamp, val int64, documentStateAttributeValue AttributeDocumentState, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexDocuments.recordDataPoint(mb.startTime, ts, val, documentStateAttributeValue.String(), indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexOperationsCompletedDataPoint adds a data point to elasticsearch.index.operations.completed metric. func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsCompletedDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexOperationsCompleted.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String(), indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexOperationsMergeCurrentDataPoint adds a data point to elasticsearch.index.operations.merge.current metric. func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsMergeCurrentDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexOperationsMergeCurrent.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexOperationsMergeDocsCountDataPoint adds a data point to elasticsearch.index.operations.merge.docs_count metric. func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsMergeDocsCountDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexOperationsMergeDocsCount.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexOperationsMergeSizeDataPoint adds a data point to elasticsearch.index.operations.merge.size metric. func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsMergeSizeDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexOperationsMergeSize.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexOperationsTimeDataPoint adds a data point to elasticsearch.index.operations.time metric. func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsTimeDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexOperationsTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String(), indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexSegmentsCountDataPoint adds a data point to elasticsearch.index.segments.count metric. func (mb *MetricsBuilder) RecordElasticsearchIndexSegmentsCountDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexSegmentsCount.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexSegmentsMemoryDataPoint adds a data point to elasticsearch.index.segments.memory metric. func (mb *MetricsBuilder) RecordElasticsearchIndexSegmentsMemoryDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType, segmentsMemoryObjectTypeAttributeValue AttributeSegmentsMemoryObjectType) { mb.metricElasticsearchIndexSegmentsMemory.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String(), segmentsMemoryObjectTypeAttributeValue.String()) } // RecordElasticsearchIndexSegmentsSizeDataPoint adds a data point to elasticsearch.index.segments.size metric. func (mb *MetricsBuilder) RecordElasticsearchIndexSegmentsSizeDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexSegmentsSize.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexShardsSizeDataPoint adds a data point to elasticsearch.index.shards.size metric. func (mb *MetricsBuilder) RecordElasticsearchIndexShardsSizeDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexShardsSize.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexTranslogOperationsDataPoint adds a data point to elasticsearch.index.translog.operations metric. func (mb *MetricsBuilder) RecordElasticsearchIndexTranslogOperationsDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexTranslogOperations.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexTranslogSizeDataPoint adds a data point to elasticsearch.index.translog.size metric. func (mb *MetricsBuilder) RecordElasticsearchIndexTranslogSizeDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexTranslogSize.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) } // RecordElasticsearchIndexingPressureMemoryLimitDataPoint adds a data point to elasticsearch.indexing_pressure.memory.limit metric. func (mb *MetricsBuilder) RecordElasticsearchIndexingPressureMemoryLimitDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchIndexingPressureMemoryLimit.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchIndexingPressureMemoryTotalPrimaryRejectionsDataPoint adds a data point to elasticsearch.indexing_pressure.memory.total.primary_rejections metric. func (mb *MetricsBuilder) RecordElasticsearchIndexingPressureMemoryTotalPrimaryRejectionsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchIndexingPressureMemoryTotalPrimaryRejections.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchIndexingPressureMemoryTotalReplicaRejectionsDataPoint adds a data point to elasticsearch.indexing_pressure.memory.total.replica_rejections metric. func (mb *MetricsBuilder) RecordElasticsearchIndexingPressureMemoryTotalReplicaRejectionsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchIndexingPressureMemoryTotalReplicaRejections.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchMemoryIndexingPressureDataPoint adds a data point to elasticsearch.memory.indexing_pressure metric. func (mb *MetricsBuilder) RecordElasticsearchMemoryIndexingPressureDataPoint(ts pcommon.Timestamp, val int64, indexingPressureStageAttributeValue AttributeIndexingPressureStage) { mb.metricElasticsearchMemoryIndexingPressure.recordDataPoint(mb.startTime, ts, val, indexingPressureStageAttributeValue.String()) } // RecordElasticsearchNodeCacheCountDataPoint adds a data point to elasticsearch.node.cache.count metric. func (mb *MetricsBuilder) RecordElasticsearchNodeCacheCountDataPoint(ts pcommon.Timestamp, val int64, queryCacheCountTypeAttributeValue AttributeQueryCacheCountType) { mb.metricElasticsearchNodeCacheCount.recordDataPoint(mb.startTime, ts, val, queryCacheCountTypeAttributeValue.String()) } // RecordElasticsearchNodeCacheEvictionsDataPoint adds a data point to elasticsearch.node.cache.evictions metric. func (mb *MetricsBuilder) RecordElasticsearchNodeCacheEvictionsDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue AttributeCacheName) { mb.metricElasticsearchNodeCacheEvictions.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue.String()) } // RecordElasticsearchNodeCacheMemoryUsageDataPoint adds a data point to elasticsearch.node.cache.memory.usage metric. func (mb *MetricsBuilder) RecordElasticsearchNodeCacheMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue AttributeCacheName) { mb.metricElasticsearchNodeCacheMemoryUsage.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue.String()) } // RecordElasticsearchNodeCacheSizeDataPoint adds a data point to elasticsearch.node.cache.size metric. func (mb *MetricsBuilder) RecordElasticsearchNodeCacheSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeCacheSize.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeClusterConnectionsDataPoint adds a data point to elasticsearch.node.cluster.connections metric. func (mb *MetricsBuilder) RecordElasticsearchNodeClusterConnectionsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeClusterConnections.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeClusterIoDataPoint adds a data point to elasticsearch.node.cluster.io metric. func (mb *MetricsBuilder) RecordElasticsearchNodeClusterIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) { mb.metricElasticsearchNodeClusterIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String()) } // RecordElasticsearchNodeDiskIoReadDataPoint adds a data point to elasticsearch.node.disk.io.read metric. func (mb *MetricsBuilder) RecordElasticsearchNodeDiskIoReadDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeDiskIoRead.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeDiskIoWriteDataPoint adds a data point to elasticsearch.node.disk.io.write metric. func (mb *MetricsBuilder) RecordElasticsearchNodeDiskIoWriteDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeDiskIoWrite.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeDocumentsDataPoint adds a data point to elasticsearch.node.documents metric. func (mb *MetricsBuilder) RecordElasticsearchNodeDocumentsDataPoint(ts pcommon.Timestamp, val int64, documentStateAttributeValue AttributeDocumentState) { mb.metricElasticsearchNodeDocuments.recordDataPoint(mb.startTime, ts, val, documentStateAttributeValue.String()) } // RecordElasticsearchNodeFsDiskAvailableDataPoint adds a data point to elasticsearch.node.fs.disk.available metric. func (mb *MetricsBuilder) RecordElasticsearchNodeFsDiskAvailableDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeFsDiskAvailable.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeFsDiskFreeDataPoint adds a data point to elasticsearch.node.fs.disk.free metric. func (mb *MetricsBuilder) RecordElasticsearchNodeFsDiskFreeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeFsDiskFree.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeFsDiskTotalDataPoint adds a data point to elasticsearch.node.fs.disk.total metric. func (mb *MetricsBuilder) RecordElasticsearchNodeFsDiskTotalDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeFsDiskTotal.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeHTTPConnectionsDataPoint adds a data point to elasticsearch.node.http.connections metric. func (mb *MetricsBuilder) RecordElasticsearchNodeHTTPConnectionsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeHTTPConnections.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeIngestDocumentsDataPoint adds a data point to elasticsearch.node.ingest.documents metric. func (mb *MetricsBuilder) RecordElasticsearchNodeIngestDocumentsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeIngestDocuments.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeIngestDocumentsCurrentDataPoint adds a data point to elasticsearch.node.ingest.documents.current metric. func (mb *MetricsBuilder) RecordElasticsearchNodeIngestDocumentsCurrentDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeIngestDocumentsCurrent.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeIngestOperationsFailedDataPoint adds a data point to elasticsearch.node.ingest.operations.failed metric. func (mb *MetricsBuilder) RecordElasticsearchNodeIngestOperationsFailedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeIngestOperationsFailed.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeOpenFilesDataPoint adds a data point to elasticsearch.node.open_files metric. func (mb *MetricsBuilder) RecordElasticsearchNodeOpenFilesDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeOpenFiles.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeOperationsCompletedDataPoint adds a data point to elasticsearch.node.operations.completed metric. func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsCompletedDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { mb.metricElasticsearchNodeOperationsCompleted.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } // RecordElasticsearchNodeOperationsCurrentDataPoint adds a data point to elasticsearch.node.operations.current metric. func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsCurrentDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { mb.metricElasticsearchNodeOperationsCurrent.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } // RecordElasticsearchNodeOperationsGetCompletedDataPoint adds a data point to elasticsearch.node.operations.get.completed metric. func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsGetCompletedDataPoint(ts pcommon.Timestamp, val int64, getResultAttributeValue AttributeGetResult) { mb.metricElasticsearchNodeOperationsGetCompleted.recordDataPoint(mb.startTime, ts, val, getResultAttributeValue.String()) } // RecordElasticsearchNodeOperationsGetTimeDataPoint adds a data point to elasticsearch.node.operations.get.time metric. func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsGetTimeDataPoint(ts pcommon.Timestamp, val int64, getResultAttributeValue AttributeGetResult) { mb.metricElasticsearchNodeOperationsGetTime.recordDataPoint(mb.startTime, ts, val, getResultAttributeValue.String()) } // RecordElasticsearchNodeOperationsTimeDataPoint adds a data point to elasticsearch.node.operations.time metric. func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsTimeDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { mb.metricElasticsearchNodeOperationsTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } // RecordElasticsearchNodePipelineIngestDocumentsCurrentDataPoint adds a data point to elasticsearch.node.pipeline.ingest.documents.current metric. func (mb *MetricsBuilder) RecordElasticsearchNodePipelineIngestDocumentsCurrentDataPoint(ts pcommon.Timestamp, val int64, ingestPipelineNameAttributeValue string) { mb.metricElasticsearchNodePipelineIngestDocumentsCurrent.recordDataPoint(mb.startTime, ts, val, ingestPipelineNameAttributeValue) } // RecordElasticsearchNodePipelineIngestDocumentsPreprocessedDataPoint adds a data point to elasticsearch.node.pipeline.ingest.documents.preprocessed metric. func (mb *MetricsBuilder) RecordElasticsearchNodePipelineIngestDocumentsPreprocessedDataPoint(ts pcommon.Timestamp, val int64, ingestPipelineNameAttributeValue string) { mb.metricElasticsearchNodePipelineIngestDocumentsPreprocessed.recordDataPoint(mb.startTime, ts, val, ingestPipelineNameAttributeValue) } // RecordElasticsearchNodePipelineIngestOperationsFailedDataPoint adds a data point to elasticsearch.node.pipeline.ingest.operations.failed metric. func (mb *MetricsBuilder) RecordElasticsearchNodePipelineIngestOperationsFailedDataPoint(ts pcommon.Timestamp, val int64, ingestPipelineNameAttributeValue string) { mb.metricElasticsearchNodePipelineIngestOperationsFailed.recordDataPoint(mb.startTime, ts, val, ingestPipelineNameAttributeValue) } // RecordElasticsearchNodeScriptCacheEvictionsDataPoint adds a data point to elasticsearch.node.script.cache_evictions metric. func (mb *MetricsBuilder) RecordElasticsearchNodeScriptCacheEvictionsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeScriptCacheEvictions.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeScriptCompilationLimitTriggeredDataPoint adds a data point to elasticsearch.node.script.compilation_limit_triggered metric. func (mb *MetricsBuilder) RecordElasticsearchNodeScriptCompilationLimitTriggeredDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeScriptCompilationLimitTriggered.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeScriptCompilationsDataPoint adds a data point to elasticsearch.node.script.compilations metric. func (mb *MetricsBuilder) RecordElasticsearchNodeScriptCompilationsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeScriptCompilations.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeSegmentsMemoryDataPoint adds a data point to elasticsearch.node.segments.memory metric. func (mb *MetricsBuilder) RecordElasticsearchNodeSegmentsMemoryDataPoint(ts pcommon.Timestamp, val int64, segmentsMemoryObjectTypeAttributeValue AttributeSegmentsMemoryObjectType) { mb.metricElasticsearchNodeSegmentsMemory.recordDataPoint(mb.startTime, ts, val, segmentsMemoryObjectTypeAttributeValue.String()) } // RecordElasticsearchNodeShardsDataSetSizeDataPoint adds a data point to elasticsearch.node.shards.data_set.size metric. func (mb *MetricsBuilder) RecordElasticsearchNodeShardsDataSetSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeShardsDataSetSize.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeShardsReservedSizeDataPoint adds a data point to elasticsearch.node.shards.reserved.size metric. func (mb *MetricsBuilder) RecordElasticsearchNodeShardsReservedSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeShardsReservedSize.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeShardsSizeDataPoint adds a data point to elasticsearch.node.shards.size metric. func (mb *MetricsBuilder) RecordElasticsearchNodeShardsSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeShardsSize.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint adds a data point to elasticsearch.node.thread_pool.tasks.finished metric. func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint(ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, taskStateAttributeValue AttributeTaskState) { mb.metricElasticsearchNodeThreadPoolTasksFinished.recordDataPoint(mb.startTime, ts, val, threadPoolNameAttributeValue, taskStateAttributeValue.String()) } // RecordElasticsearchNodeThreadPoolTasksQueuedDataPoint adds a data point to elasticsearch.node.thread_pool.tasks.queued metric. func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolTasksQueuedDataPoint(ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string) { mb.metricElasticsearchNodeThreadPoolTasksQueued.recordDataPoint(mb.startTime, ts, val, threadPoolNameAttributeValue) } // RecordElasticsearchNodeThreadPoolThreadsDataPoint adds a data point to elasticsearch.node.thread_pool.threads metric. func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolThreadsDataPoint(ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, threadStateAttributeValue AttributeThreadState) { mb.metricElasticsearchNodeThreadPoolThreads.recordDataPoint(mb.startTime, ts, val, threadPoolNameAttributeValue, threadStateAttributeValue.String()) } // RecordElasticsearchNodeTranslogOperationsDataPoint adds a data point to elasticsearch.node.translog.operations metric. func (mb *MetricsBuilder) RecordElasticsearchNodeTranslogOperationsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeTranslogOperations.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeTranslogSizeDataPoint adds a data point to elasticsearch.node.translog.size metric. func (mb *MetricsBuilder) RecordElasticsearchNodeTranslogSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeTranslogSize.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeTranslogUncommittedSizeDataPoint adds a data point to elasticsearch.node.translog.uncommitted.size metric. func (mb *MetricsBuilder) RecordElasticsearchNodeTranslogUncommittedSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeTranslogUncommittedSize.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchOsCPULoadAvg15mDataPoint adds a data point to elasticsearch.os.cpu.load_avg.15m metric. func (mb *MetricsBuilder) RecordElasticsearchOsCPULoadAvg15mDataPoint(ts pcommon.Timestamp, val float64) { mb.metricElasticsearchOsCPULoadAvg15m.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchOsCPULoadAvg1mDataPoint adds a data point to elasticsearch.os.cpu.load_avg.1m metric. func (mb *MetricsBuilder) RecordElasticsearchOsCPULoadAvg1mDataPoint(ts pcommon.Timestamp, val float64) { mb.metricElasticsearchOsCPULoadAvg1m.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchOsCPULoadAvg5mDataPoint adds a data point to elasticsearch.os.cpu.load_avg.5m metric. func (mb *MetricsBuilder) RecordElasticsearchOsCPULoadAvg5mDataPoint(ts pcommon.Timestamp, val float64) { mb.metricElasticsearchOsCPULoadAvg5m.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchOsCPUUsageDataPoint adds a data point to elasticsearch.os.cpu.usage metric. func (mb *MetricsBuilder) RecordElasticsearchOsCPUUsageDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchOsCPUUsage.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchOsMemoryDataPoint adds a data point to elasticsearch.os.memory metric. func (mb *MetricsBuilder) RecordElasticsearchOsMemoryDataPoint(ts pcommon.Timestamp, val int64, memoryStateAttributeValue AttributeMemoryState) { mb.metricElasticsearchOsMemory.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String()) } // RecordElasticsearchProcessCPUTimeDataPoint adds a data point to elasticsearch.process.cpu.time metric. func (mb *MetricsBuilder) RecordElasticsearchProcessCPUTimeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchProcessCPUTime.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchProcessCPUUsageDataPoint adds a data point to elasticsearch.process.cpu.usage metric. func (mb *MetricsBuilder) RecordElasticsearchProcessCPUUsageDataPoint(ts pcommon.Timestamp, val float64) { mb.metricElasticsearchProcessCPUUsage.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchProcessMemoryVirtualDataPoint adds a data point to elasticsearch.process.memory.virtual metric. func (mb *MetricsBuilder) RecordElasticsearchProcessMemoryVirtualDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchProcessMemoryVirtual.recordDataPoint(mb.startTime, ts, val) } // RecordJvmClassesLoadedDataPoint adds a data point to jvm.classes.loaded metric. func (mb *MetricsBuilder) RecordJvmClassesLoadedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmClassesLoaded.recordDataPoint(mb.startTime, ts, val) } // RecordJvmGcCollectionsCountDataPoint adds a data point to jvm.gc.collections.count metric. func (mb *MetricsBuilder) RecordJvmGcCollectionsCountDataPoint(ts pcommon.Timestamp, val int64, collectorNameAttributeValue string) { mb.metricJvmGcCollectionsCount.recordDataPoint(mb.startTime, ts, val, collectorNameAttributeValue) } // RecordJvmGcCollectionsElapsedDataPoint adds a data point to jvm.gc.collections.elapsed metric. func (mb *MetricsBuilder) RecordJvmGcCollectionsElapsedDataPoint(ts pcommon.Timestamp, val int64, collectorNameAttributeValue string) { mb.metricJvmGcCollectionsElapsed.recordDataPoint(mb.startTime, ts, val, collectorNameAttributeValue) } // RecordJvmMemoryHeapCommittedDataPoint adds a data point to jvm.memory.heap.committed metric. func (mb *MetricsBuilder) RecordJvmMemoryHeapCommittedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmMemoryHeapCommitted.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryHeapMaxDataPoint adds a data point to jvm.memory.heap.max metric. func (mb *MetricsBuilder) RecordJvmMemoryHeapMaxDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmMemoryHeapMax.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryHeapUsedDataPoint adds a data point to jvm.memory.heap.used metric. func (mb *MetricsBuilder) RecordJvmMemoryHeapUsedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmMemoryHeapUsed.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryHeapUtilizationDataPoint adds a data point to jvm.memory.heap.utilization metric. func (mb *MetricsBuilder) RecordJvmMemoryHeapUtilizationDataPoint(ts pcommon.Timestamp, val float64) { mb.metricJvmMemoryHeapUtilization.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryNonheapCommittedDataPoint adds a data point to jvm.memory.nonheap.committed metric. func (mb *MetricsBuilder) RecordJvmMemoryNonheapCommittedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmMemoryNonheapCommitted.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryNonheapUsedDataPoint adds a data point to jvm.memory.nonheap.used metric. func (mb *MetricsBuilder) RecordJvmMemoryNonheapUsedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmMemoryNonheapUsed.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryPoolMaxDataPoint adds a data point to jvm.memory.pool.max metric. func (mb *MetricsBuilder) RecordJvmMemoryPoolMaxDataPoint(ts pcommon.Timestamp, val int64, memoryPoolNameAttributeValue string) { mb.metricJvmMemoryPoolMax.recordDataPoint(mb.startTime, ts, val, memoryPoolNameAttributeValue) } // RecordJvmMemoryPoolUsedDataPoint adds a data point to jvm.memory.pool.used metric. func (mb *MetricsBuilder) RecordJvmMemoryPoolUsedDataPoint(ts pcommon.Timestamp, val int64, memoryPoolNameAttributeValue string) { mb.metricJvmMemoryPoolUsed.recordDataPoint(mb.startTime, ts, val, memoryPoolNameAttributeValue) } // RecordJvmThreadsCountDataPoint adds a data point to jvm.threads.count metric. func (mb *MetricsBuilder) RecordJvmThreadsCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmThreadsCount.recordDataPoint(mb.startTime, ts, val) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op.apply(mb) } }