receiver/k8sclusterreceiver/internal/metadata/generated_metrics.go (2,564 lines of code) (raw):
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/filter"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
conventions "go.opentelemetry.io/collector/semconv/v1.18.0"
)
var MetricsInfo = metricsInfo{
K8sContainerCPULimit: metricInfo{
Name: "k8s.container.cpu_limit",
},
K8sContainerCPURequest: metricInfo{
Name: "k8s.container.cpu_request",
},
K8sContainerEphemeralstorageLimit: metricInfo{
Name: "k8s.container.ephemeralstorage_limit",
},
K8sContainerEphemeralstorageRequest: metricInfo{
Name: "k8s.container.ephemeralstorage_request",
},
K8sContainerMemoryLimit: metricInfo{
Name: "k8s.container.memory_limit",
},
K8sContainerMemoryRequest: metricInfo{
Name: "k8s.container.memory_request",
},
K8sContainerReady: metricInfo{
Name: "k8s.container.ready",
},
K8sContainerRestarts: metricInfo{
Name: "k8s.container.restarts",
},
K8sContainerStorageLimit: metricInfo{
Name: "k8s.container.storage_limit",
},
K8sContainerStorageRequest: metricInfo{
Name: "k8s.container.storage_request",
},
K8sCronjobActiveJobs: metricInfo{
Name: "k8s.cronjob.active_jobs",
},
K8sDaemonsetCurrentScheduledNodes: metricInfo{
Name: "k8s.daemonset.current_scheduled_nodes",
},
K8sDaemonsetDesiredScheduledNodes: metricInfo{
Name: "k8s.daemonset.desired_scheduled_nodes",
},
K8sDaemonsetMisscheduledNodes: metricInfo{
Name: "k8s.daemonset.misscheduled_nodes",
},
K8sDaemonsetReadyNodes: metricInfo{
Name: "k8s.daemonset.ready_nodes",
},
K8sDeploymentAvailable: metricInfo{
Name: "k8s.deployment.available",
},
K8sDeploymentDesired: metricInfo{
Name: "k8s.deployment.desired",
},
K8sHpaCurrentReplicas: metricInfo{
Name: "k8s.hpa.current_replicas",
},
K8sHpaDesiredReplicas: metricInfo{
Name: "k8s.hpa.desired_replicas",
},
K8sHpaMaxReplicas: metricInfo{
Name: "k8s.hpa.max_replicas",
},
K8sHpaMinReplicas: metricInfo{
Name: "k8s.hpa.min_replicas",
},
K8sJobActivePods: metricInfo{
Name: "k8s.job.active_pods",
},
K8sJobDesiredSuccessfulPods: metricInfo{
Name: "k8s.job.desired_successful_pods",
},
K8sJobFailedPods: metricInfo{
Name: "k8s.job.failed_pods",
},
K8sJobMaxParallelPods: metricInfo{
Name: "k8s.job.max_parallel_pods",
},
K8sJobSuccessfulPods: metricInfo{
Name: "k8s.job.successful_pods",
},
K8sNamespacePhase: metricInfo{
Name: "k8s.namespace.phase",
},
K8sNodeCondition: metricInfo{
Name: "k8s.node.condition",
},
K8sPodPhase: metricInfo{
Name: "k8s.pod.phase",
},
K8sPodStatusReason: metricInfo{
Name: "k8s.pod.status_reason",
},
K8sReplicasetAvailable: metricInfo{
Name: "k8s.replicaset.available",
},
K8sReplicasetDesired: metricInfo{
Name: "k8s.replicaset.desired",
},
K8sReplicationControllerAvailable: metricInfo{
Name: "k8s.replication_controller.available",
},
K8sReplicationControllerDesired: metricInfo{
Name: "k8s.replication_controller.desired",
},
K8sResourceQuotaHardLimit: metricInfo{
Name: "k8s.resource_quota.hard_limit",
},
K8sResourceQuotaUsed: metricInfo{
Name: "k8s.resource_quota.used",
},
K8sStatefulsetCurrentPods: metricInfo{
Name: "k8s.statefulset.current_pods",
},
K8sStatefulsetDesiredPods: metricInfo{
Name: "k8s.statefulset.desired_pods",
},
K8sStatefulsetReadyPods: metricInfo{
Name: "k8s.statefulset.ready_pods",
},
K8sStatefulsetUpdatedPods: metricInfo{
Name: "k8s.statefulset.updated_pods",
},
OpenshiftAppliedclusterquotaLimit: metricInfo{
Name: "openshift.appliedclusterquota.limit",
},
OpenshiftAppliedclusterquotaUsed: metricInfo{
Name: "openshift.appliedclusterquota.used",
},
OpenshiftClusterquotaLimit: metricInfo{
Name: "openshift.clusterquota.limit",
},
OpenshiftClusterquotaUsed: metricInfo{
Name: "openshift.clusterquota.used",
},
}
type metricsInfo struct {
K8sContainerCPULimit metricInfo
K8sContainerCPURequest metricInfo
K8sContainerEphemeralstorageLimit metricInfo
K8sContainerEphemeralstorageRequest metricInfo
K8sContainerMemoryLimit metricInfo
K8sContainerMemoryRequest metricInfo
K8sContainerReady metricInfo
K8sContainerRestarts metricInfo
K8sContainerStorageLimit metricInfo
K8sContainerStorageRequest metricInfo
K8sCronjobActiveJobs metricInfo
K8sDaemonsetCurrentScheduledNodes metricInfo
K8sDaemonsetDesiredScheduledNodes metricInfo
K8sDaemonsetMisscheduledNodes metricInfo
K8sDaemonsetReadyNodes metricInfo
K8sDeploymentAvailable metricInfo
K8sDeploymentDesired metricInfo
K8sHpaCurrentReplicas metricInfo
K8sHpaDesiredReplicas metricInfo
K8sHpaMaxReplicas metricInfo
K8sHpaMinReplicas metricInfo
K8sJobActivePods metricInfo
K8sJobDesiredSuccessfulPods metricInfo
K8sJobFailedPods metricInfo
K8sJobMaxParallelPods metricInfo
K8sJobSuccessfulPods metricInfo
K8sNamespacePhase metricInfo
K8sNodeCondition metricInfo
K8sPodPhase metricInfo
K8sPodStatusReason metricInfo
K8sReplicasetAvailable metricInfo
K8sReplicasetDesired metricInfo
K8sReplicationControllerAvailable metricInfo
K8sReplicationControllerDesired metricInfo
K8sResourceQuotaHardLimit metricInfo
K8sResourceQuotaUsed metricInfo
K8sStatefulsetCurrentPods metricInfo
K8sStatefulsetDesiredPods metricInfo
K8sStatefulsetReadyPods metricInfo
K8sStatefulsetUpdatedPods metricInfo
OpenshiftAppliedclusterquotaLimit metricInfo
OpenshiftAppliedclusterquotaUsed metricInfo
OpenshiftClusterquotaLimit metricInfo
OpenshiftClusterquotaUsed metricInfo
}
type metricInfo struct {
Name string
}
type metricK8sContainerCPULimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.container.cpu_limit metric with initial data.
func (m *metricK8sContainerCPULimit) init() {
m.data.SetName("k8s.container.cpu_limit")
m.data.SetDescription("Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details")
m.data.SetUnit("{cpu}")
m.data.SetEmptyGauge()
}
func (m *metricK8sContainerCPULimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sContainerCPULimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sContainerCPULimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sContainerCPULimit(cfg MetricConfig) metricK8sContainerCPULimit {
m := metricK8sContainerCPULimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sContainerCPURequest struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.container.cpu_request metric with initial data.
func (m *metricK8sContainerCPURequest) init() {
m.data.SetName("k8s.container.cpu_request")
m.data.SetDescription("Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details")
m.data.SetUnit("{cpu}")
m.data.SetEmptyGauge()
}
func (m *metricK8sContainerCPURequest) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sContainerCPURequest) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sContainerCPURequest) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sContainerCPURequest(cfg MetricConfig) metricK8sContainerCPURequest {
m := metricK8sContainerCPURequest{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sContainerEphemeralstorageLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.container.ephemeralstorage_limit metric with initial data.
func (m *metricK8sContainerEphemeralstorageLimit) init() {
m.data.SetName("k8s.container.ephemeralstorage_limit")
m.data.SetDescription("Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricK8sContainerEphemeralstorageLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sContainerEphemeralstorageLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sContainerEphemeralstorageLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sContainerEphemeralstorageLimit(cfg MetricConfig) metricK8sContainerEphemeralstorageLimit {
m := metricK8sContainerEphemeralstorageLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sContainerEphemeralstorageRequest struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.container.ephemeralstorage_request metric with initial data.
func (m *metricK8sContainerEphemeralstorageRequest) init() {
m.data.SetName("k8s.container.ephemeralstorage_request")
m.data.SetDescription("Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricK8sContainerEphemeralstorageRequest) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sContainerEphemeralstorageRequest) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sContainerEphemeralstorageRequest) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sContainerEphemeralstorageRequest(cfg MetricConfig) metricK8sContainerEphemeralstorageRequest {
m := metricK8sContainerEphemeralstorageRequest{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sContainerMemoryLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.container.memory_limit metric with initial data.
func (m *metricK8sContainerMemoryLimit) init() {
m.data.SetName("k8s.container.memory_limit")
m.data.SetDescription("Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricK8sContainerMemoryLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sContainerMemoryLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sContainerMemoryLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sContainerMemoryLimit(cfg MetricConfig) metricK8sContainerMemoryLimit {
m := metricK8sContainerMemoryLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sContainerMemoryRequest struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.container.memory_request metric with initial data.
func (m *metricK8sContainerMemoryRequest) init() {
m.data.SetName("k8s.container.memory_request")
m.data.SetDescription("Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricK8sContainerMemoryRequest) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sContainerMemoryRequest) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sContainerMemoryRequest) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sContainerMemoryRequest(cfg MetricConfig) metricK8sContainerMemoryRequest {
m := metricK8sContainerMemoryRequest{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sContainerReady struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.container.ready metric with initial data.
func (m *metricK8sContainerReady) init() {
m.data.SetName("k8s.container.ready")
m.data.SetDescription("Whether a container has passed its readiness probe (0 for no, 1 for yes)")
m.data.SetUnit("")
m.data.SetEmptyGauge()
}
func (m *metricK8sContainerReady) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sContainerReady) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sContainerReady) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sContainerReady(cfg MetricConfig) metricK8sContainerReady {
m := metricK8sContainerReady{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sContainerRestarts struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.container.restarts metric with initial data.
func (m *metricK8sContainerRestarts) init() {
m.data.SetName("k8s.container.restarts")
m.data.SetDescription("How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that.")
m.data.SetUnit("{restart}")
m.data.SetEmptyGauge()
}
func (m *metricK8sContainerRestarts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sContainerRestarts) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sContainerRestarts) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sContainerRestarts(cfg MetricConfig) metricK8sContainerRestarts {
m := metricK8sContainerRestarts{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sContainerStorageLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.container.storage_limit metric with initial data.
func (m *metricK8sContainerStorageLimit) init() {
m.data.SetName("k8s.container.storage_limit")
m.data.SetDescription("Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricK8sContainerStorageLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sContainerStorageLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sContainerStorageLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sContainerStorageLimit(cfg MetricConfig) metricK8sContainerStorageLimit {
m := metricK8sContainerStorageLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sContainerStorageRequest struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.container.storage_request metric with initial data.
func (m *metricK8sContainerStorageRequest) init() {
m.data.SetName("k8s.container.storage_request")
m.data.SetDescription("Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricK8sContainerStorageRequest) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sContainerStorageRequest) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sContainerStorageRequest) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sContainerStorageRequest(cfg MetricConfig) metricK8sContainerStorageRequest {
m := metricK8sContainerStorageRequest{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sCronjobActiveJobs struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.cronjob.active_jobs metric with initial data.
func (m *metricK8sCronjobActiveJobs) init() {
m.data.SetName("k8s.cronjob.active_jobs")
m.data.SetDescription("The number of actively running jobs for a cronjob")
m.data.SetUnit("{job}")
m.data.SetEmptyGauge()
}
func (m *metricK8sCronjobActiveJobs) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sCronjobActiveJobs) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sCronjobActiveJobs) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sCronjobActiveJobs(cfg MetricConfig) metricK8sCronjobActiveJobs {
m := metricK8sCronjobActiveJobs{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sDaemonsetCurrentScheduledNodes struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.daemonset.current_scheduled_nodes metric with initial data.
func (m *metricK8sDaemonsetCurrentScheduledNodes) init() {
m.data.SetName("k8s.daemonset.current_scheduled_nodes")
m.data.SetDescription("Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod")
m.data.SetUnit("{node}")
m.data.SetEmptyGauge()
}
func (m *metricK8sDaemonsetCurrentScheduledNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sDaemonsetCurrentScheduledNodes) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sDaemonsetCurrentScheduledNodes) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sDaemonsetCurrentScheduledNodes(cfg MetricConfig) metricK8sDaemonsetCurrentScheduledNodes {
m := metricK8sDaemonsetCurrentScheduledNodes{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sDaemonsetDesiredScheduledNodes struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.daemonset.desired_scheduled_nodes metric with initial data.
func (m *metricK8sDaemonsetDesiredScheduledNodes) init() {
m.data.SetName("k8s.daemonset.desired_scheduled_nodes")
m.data.SetDescription("Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)")
m.data.SetUnit("{node}")
m.data.SetEmptyGauge()
}
func (m *metricK8sDaemonsetDesiredScheduledNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sDaemonsetDesiredScheduledNodes) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sDaemonsetDesiredScheduledNodes) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sDaemonsetDesiredScheduledNodes(cfg MetricConfig) metricK8sDaemonsetDesiredScheduledNodes {
m := metricK8sDaemonsetDesiredScheduledNodes{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sDaemonsetMisscheduledNodes struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.daemonset.misscheduled_nodes metric with initial data.
func (m *metricK8sDaemonsetMisscheduledNodes) init() {
m.data.SetName("k8s.daemonset.misscheduled_nodes")
m.data.SetDescription("Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod")
m.data.SetUnit("{node}")
m.data.SetEmptyGauge()
}
func (m *metricK8sDaemonsetMisscheduledNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sDaemonsetMisscheduledNodes) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sDaemonsetMisscheduledNodes) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sDaemonsetMisscheduledNodes(cfg MetricConfig) metricK8sDaemonsetMisscheduledNodes {
m := metricK8sDaemonsetMisscheduledNodes{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sDaemonsetReadyNodes struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.daemonset.ready_nodes metric with initial data.
func (m *metricK8sDaemonsetReadyNodes) init() {
m.data.SetName("k8s.daemonset.ready_nodes")
m.data.SetDescription("Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready")
m.data.SetUnit("{node}")
m.data.SetEmptyGauge()
}
func (m *metricK8sDaemonsetReadyNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sDaemonsetReadyNodes) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sDaemonsetReadyNodes) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sDaemonsetReadyNodes(cfg MetricConfig) metricK8sDaemonsetReadyNodes {
m := metricK8sDaemonsetReadyNodes{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sDeploymentAvailable struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.deployment.available metric with initial data.
func (m *metricK8sDeploymentAvailable) init() {
m.data.SetName("k8s.deployment.available")
m.data.SetDescription("Total number of available pods (ready for at least minReadySeconds) targeted by this deployment")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sDeploymentAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sDeploymentAvailable) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sDeploymentAvailable) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sDeploymentAvailable(cfg MetricConfig) metricK8sDeploymentAvailable {
m := metricK8sDeploymentAvailable{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sDeploymentDesired struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.deployment.desired metric with initial data.
func (m *metricK8sDeploymentDesired) init() {
m.data.SetName("k8s.deployment.desired")
m.data.SetDescription("Number of desired pods in this deployment")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sDeploymentDesired) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sDeploymentDesired) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sDeploymentDesired) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sDeploymentDesired(cfg MetricConfig) metricK8sDeploymentDesired {
m := metricK8sDeploymentDesired{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sHpaCurrentReplicas struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.hpa.current_replicas metric with initial data.
func (m *metricK8sHpaCurrentReplicas) init() {
m.data.SetName("k8s.hpa.current_replicas")
m.data.SetDescription("Current number of pod replicas managed by this autoscaler.")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sHpaCurrentReplicas) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sHpaCurrentReplicas) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sHpaCurrentReplicas) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sHpaCurrentReplicas(cfg MetricConfig) metricK8sHpaCurrentReplicas {
m := metricK8sHpaCurrentReplicas{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sHpaDesiredReplicas struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.hpa.desired_replicas metric with initial data.
func (m *metricK8sHpaDesiredReplicas) init() {
m.data.SetName("k8s.hpa.desired_replicas")
m.data.SetDescription("Desired number of pod replicas managed by this autoscaler.")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sHpaDesiredReplicas) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sHpaDesiredReplicas) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sHpaDesiredReplicas) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sHpaDesiredReplicas(cfg MetricConfig) metricK8sHpaDesiredReplicas {
m := metricK8sHpaDesiredReplicas{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sHpaMaxReplicas struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.hpa.max_replicas metric with initial data.
func (m *metricK8sHpaMaxReplicas) init() {
m.data.SetName("k8s.hpa.max_replicas")
m.data.SetDescription("Maximum number of replicas to which the autoscaler can scale up.")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sHpaMaxReplicas) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sHpaMaxReplicas) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sHpaMaxReplicas) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sHpaMaxReplicas(cfg MetricConfig) metricK8sHpaMaxReplicas {
m := metricK8sHpaMaxReplicas{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sHpaMinReplicas struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.hpa.min_replicas metric with initial data.
func (m *metricK8sHpaMinReplicas) init() {
m.data.SetName("k8s.hpa.min_replicas")
m.data.SetDescription("Minimum number of replicas to which the autoscaler can scale up.")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sHpaMinReplicas) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sHpaMinReplicas) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sHpaMinReplicas) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sHpaMinReplicas(cfg MetricConfig) metricK8sHpaMinReplicas {
m := metricK8sHpaMinReplicas{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sJobActivePods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.job.active_pods metric with initial data.
func (m *metricK8sJobActivePods) init() {
m.data.SetName("k8s.job.active_pods")
m.data.SetDescription("The number of actively running pods for a job")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sJobActivePods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sJobActivePods) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sJobActivePods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sJobActivePods(cfg MetricConfig) metricK8sJobActivePods {
m := metricK8sJobActivePods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sJobDesiredSuccessfulPods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.job.desired_successful_pods metric with initial data.
func (m *metricK8sJobDesiredSuccessfulPods) init() {
m.data.SetName("k8s.job.desired_successful_pods")
m.data.SetDescription("The desired number of successfully finished pods the job should be run with")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sJobDesiredSuccessfulPods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sJobDesiredSuccessfulPods) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sJobDesiredSuccessfulPods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sJobDesiredSuccessfulPods(cfg MetricConfig) metricK8sJobDesiredSuccessfulPods {
m := metricK8sJobDesiredSuccessfulPods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sJobFailedPods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.job.failed_pods metric with initial data.
func (m *metricK8sJobFailedPods) init() {
m.data.SetName("k8s.job.failed_pods")
m.data.SetDescription("The number of pods which reached phase Failed for a job")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sJobFailedPods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sJobFailedPods) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sJobFailedPods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sJobFailedPods(cfg MetricConfig) metricK8sJobFailedPods {
m := metricK8sJobFailedPods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sJobMaxParallelPods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.job.max_parallel_pods metric with initial data.
func (m *metricK8sJobMaxParallelPods) init() {
m.data.SetName("k8s.job.max_parallel_pods")
m.data.SetDescription("The max desired number of pods the job should run at any given time")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sJobMaxParallelPods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sJobMaxParallelPods) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sJobMaxParallelPods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sJobMaxParallelPods(cfg MetricConfig) metricK8sJobMaxParallelPods {
m := metricK8sJobMaxParallelPods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sJobSuccessfulPods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.job.successful_pods metric with initial data.
func (m *metricK8sJobSuccessfulPods) init() {
m.data.SetName("k8s.job.successful_pods")
m.data.SetDescription("The number of pods which reached phase Succeeded for a job")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sJobSuccessfulPods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sJobSuccessfulPods) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sJobSuccessfulPods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sJobSuccessfulPods(cfg MetricConfig) metricK8sJobSuccessfulPods {
m := metricK8sJobSuccessfulPods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sNamespacePhase struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.namespace.phase metric with initial data.
func (m *metricK8sNamespacePhase) init() {
m.data.SetName("k8s.namespace.phase")
m.data.SetDescription("The current phase of namespaces (1 for active and 0 for terminating)")
m.data.SetUnit("")
m.data.SetEmptyGauge()
}
func (m *metricK8sNamespacePhase) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sNamespacePhase) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sNamespacePhase) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sNamespacePhase(cfg MetricConfig) metricK8sNamespacePhase {
m := metricK8sNamespacePhase{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sNodeCondition struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.node.condition metric with initial data.
func (m *metricK8sNodeCondition) init() {
m.data.SetName("k8s.node.condition")
m.data.SetDescription("The condition of a particular Node.")
m.data.SetUnit("{condition}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricK8sNodeCondition) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, conditionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("condition", conditionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sNodeCondition) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sNodeCondition) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sNodeCondition(cfg MetricConfig) metricK8sNodeCondition {
m := metricK8sNodeCondition{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sPodPhase struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.pod.phase metric with initial data.
func (m *metricK8sPodPhase) init() {
m.data.SetName("k8s.pod.phase")
m.data.SetDescription("Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown)")
m.data.SetUnit("")
m.data.SetEmptyGauge()
}
func (m *metricK8sPodPhase) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sPodPhase) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sPodPhase) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sPodPhase(cfg MetricConfig) metricK8sPodPhase {
m := metricK8sPodPhase{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sPodStatusReason struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.pod.status_reason metric with initial data.
func (m *metricK8sPodStatusReason) init() {
m.data.SetName("k8s.pod.status_reason")
m.data.SetDescription("Current status reason of the pod (1 - Evicted, 2 - NodeAffinity, 3 - NodeLost, 4 - Shutdown, 5 - UnexpectedAdmissionError, 6 - Unknown)")
m.data.SetUnit("")
m.data.SetEmptyGauge()
}
func (m *metricK8sPodStatusReason) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sPodStatusReason) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sPodStatusReason) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sPodStatusReason(cfg MetricConfig) metricK8sPodStatusReason {
m := metricK8sPodStatusReason{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sReplicasetAvailable struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.replicaset.available metric with initial data.
func (m *metricK8sReplicasetAvailable) init() {
m.data.SetName("k8s.replicaset.available")
m.data.SetDescription("Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sReplicasetAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sReplicasetAvailable) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sReplicasetAvailable) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sReplicasetAvailable(cfg MetricConfig) metricK8sReplicasetAvailable {
m := metricK8sReplicasetAvailable{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sReplicasetDesired struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.replicaset.desired metric with initial data.
func (m *metricK8sReplicasetDesired) init() {
m.data.SetName("k8s.replicaset.desired")
m.data.SetDescription("Number of desired pods in this replicaset")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sReplicasetDesired) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sReplicasetDesired) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sReplicasetDesired) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sReplicasetDesired(cfg MetricConfig) metricK8sReplicasetDesired {
m := metricK8sReplicasetDesired{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sReplicationControllerAvailable struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.replication_controller.available metric with initial data.
func (m *metricK8sReplicationControllerAvailable) init() {
m.data.SetName("k8s.replication_controller.available")
m.data.SetDescription("Total number of available pods (ready for at least minReadySeconds) targeted by this replication_controller")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sReplicationControllerAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sReplicationControllerAvailable) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sReplicationControllerAvailable) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sReplicationControllerAvailable(cfg MetricConfig) metricK8sReplicationControllerAvailable {
m := metricK8sReplicationControllerAvailable{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sReplicationControllerDesired struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.replication_controller.desired metric with initial data.
func (m *metricK8sReplicationControllerDesired) init() {
m.data.SetName("k8s.replication_controller.desired")
m.data.SetDescription("Number of desired pods in this replication_controller")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sReplicationControllerDesired) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sReplicationControllerDesired) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sReplicationControllerDesired) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sReplicationControllerDesired(cfg MetricConfig) metricK8sReplicationControllerDesired {
m := metricK8sReplicationControllerDesired{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sResourceQuotaHardLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.resource_quota.hard_limit metric with initial data.
func (m *metricK8sResourceQuotaHardLimit) init() {
m.data.SetName("k8s.resource_quota.hard_limit")
m.data.SetDescription("The upper limit for a particular resource in a specific namespace. Will only be sent if a quota is specified. CPU requests/limits will be sent as millicores")
m.data.SetUnit("{resource}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricK8sResourceQuotaHardLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, resourceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("resource", resourceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sResourceQuotaHardLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sResourceQuotaHardLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sResourceQuotaHardLimit(cfg MetricConfig) metricK8sResourceQuotaHardLimit {
m := metricK8sResourceQuotaHardLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sResourceQuotaUsed struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.resource_quota.used metric with initial data.
func (m *metricK8sResourceQuotaUsed) init() {
m.data.SetName("k8s.resource_quota.used")
m.data.SetDescription("The usage for a particular resource in a specific namespace. Will only be sent if a quota is specified. CPU requests/limits will be sent as millicores")
m.data.SetUnit("{resource}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricK8sResourceQuotaUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, resourceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("resource", resourceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sResourceQuotaUsed) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sResourceQuotaUsed) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sResourceQuotaUsed(cfg MetricConfig) metricK8sResourceQuotaUsed {
m := metricK8sResourceQuotaUsed{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sStatefulsetCurrentPods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.statefulset.current_pods metric with initial data.
func (m *metricK8sStatefulsetCurrentPods) init() {
m.data.SetName("k8s.statefulset.current_pods")
m.data.SetDescription("The number of pods created by the StatefulSet controller from the StatefulSet version")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sStatefulsetCurrentPods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sStatefulsetCurrentPods) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sStatefulsetCurrentPods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sStatefulsetCurrentPods(cfg MetricConfig) metricK8sStatefulsetCurrentPods {
m := metricK8sStatefulsetCurrentPods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sStatefulsetDesiredPods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.statefulset.desired_pods metric with initial data.
func (m *metricK8sStatefulsetDesiredPods) init() {
m.data.SetName("k8s.statefulset.desired_pods")
m.data.SetDescription("Number of desired pods in the stateful set (the `spec.replicas` field)")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sStatefulsetDesiredPods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sStatefulsetDesiredPods) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sStatefulsetDesiredPods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sStatefulsetDesiredPods(cfg MetricConfig) metricK8sStatefulsetDesiredPods {
m := metricK8sStatefulsetDesiredPods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sStatefulsetReadyPods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.statefulset.ready_pods metric with initial data.
func (m *metricK8sStatefulsetReadyPods) init() {
m.data.SetName("k8s.statefulset.ready_pods")
m.data.SetDescription("Number of pods created by the stateful set that have the `Ready` condition")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sStatefulsetReadyPods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sStatefulsetReadyPods) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sStatefulsetReadyPods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sStatefulsetReadyPods(cfg MetricConfig) metricK8sStatefulsetReadyPods {
m := metricK8sStatefulsetReadyPods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricK8sStatefulsetUpdatedPods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills k8s.statefulset.updated_pods metric with initial data.
func (m *metricK8sStatefulsetUpdatedPods) init() {
m.data.SetName("k8s.statefulset.updated_pods")
m.data.SetDescription("Number of pods created by the StatefulSet controller from the StatefulSet version")
m.data.SetUnit("{pod}")
m.data.SetEmptyGauge()
}
func (m *metricK8sStatefulsetUpdatedPods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricK8sStatefulsetUpdatedPods) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricK8sStatefulsetUpdatedPods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricK8sStatefulsetUpdatedPods(cfg MetricConfig) metricK8sStatefulsetUpdatedPods {
m := metricK8sStatefulsetUpdatedPods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOpenshiftAppliedclusterquotaLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills openshift.appliedclusterquota.limit metric with initial data.
func (m *metricOpenshiftAppliedclusterquotaLimit) init() {
m.data.SetName("openshift.appliedclusterquota.limit")
m.data.SetDescription("The upper limit for a particular resource in a specific namespace.")
m.data.SetUnit("{resource}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricOpenshiftAppliedclusterquotaLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, k8sNamespaceNameAttributeValue string, resourceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("k8s.namespace.name", k8sNamespaceNameAttributeValue)
dp.Attributes().PutStr("resource", resourceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOpenshiftAppliedclusterquotaLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOpenshiftAppliedclusterquotaLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOpenshiftAppliedclusterquotaLimit(cfg MetricConfig) metricOpenshiftAppliedclusterquotaLimit {
m := metricOpenshiftAppliedclusterquotaLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOpenshiftAppliedclusterquotaUsed struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills openshift.appliedclusterquota.used metric with initial data.
func (m *metricOpenshiftAppliedclusterquotaUsed) init() {
m.data.SetName("openshift.appliedclusterquota.used")
m.data.SetDescription("The usage for a particular resource in a specific namespace.")
m.data.SetUnit("{resource}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricOpenshiftAppliedclusterquotaUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, k8sNamespaceNameAttributeValue string, resourceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("k8s.namespace.name", k8sNamespaceNameAttributeValue)
dp.Attributes().PutStr("resource", resourceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOpenshiftAppliedclusterquotaUsed) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOpenshiftAppliedclusterquotaUsed) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOpenshiftAppliedclusterquotaUsed(cfg MetricConfig) metricOpenshiftAppliedclusterquotaUsed {
m := metricOpenshiftAppliedclusterquotaUsed{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOpenshiftClusterquotaLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills openshift.clusterquota.limit metric with initial data.
func (m *metricOpenshiftClusterquotaLimit) init() {
m.data.SetName("openshift.clusterquota.limit")
m.data.SetDescription("The configured upper limit for a particular resource.")
m.data.SetUnit("{resource}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricOpenshiftClusterquotaLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, resourceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("resource", resourceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOpenshiftClusterquotaLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOpenshiftClusterquotaLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOpenshiftClusterquotaLimit(cfg MetricConfig) metricOpenshiftClusterquotaLimit {
m := metricOpenshiftClusterquotaLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOpenshiftClusterquotaUsed struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills openshift.clusterquota.used metric with initial data.
func (m *metricOpenshiftClusterquotaUsed) init() {
m.data.SetName("openshift.clusterquota.used")
m.data.SetDescription("The usage for a particular resource with a configured limit.")
m.data.SetUnit("{resource}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricOpenshiftClusterquotaUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, resourceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("resource", resourceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOpenshiftClusterquotaUsed) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOpenshiftClusterquotaUsed) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOpenshiftClusterquotaUsed(cfg MetricConfig) metricOpenshiftClusterquotaUsed {
m := metricOpenshiftClusterquotaUsed{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user config.
type MetricsBuilder struct {
config MetricsBuilderConfig // config of the metrics builder.
startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
metricsCapacity int // maximum observed number of metrics per resource.
metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
buildInfo component.BuildInfo // contains version information.
resourceAttributeIncludeFilter map[string]filter.Filter
resourceAttributeExcludeFilter map[string]filter.Filter
metricK8sContainerCPULimit metricK8sContainerCPULimit
metricK8sContainerCPURequest metricK8sContainerCPURequest
metricK8sContainerEphemeralstorageLimit metricK8sContainerEphemeralstorageLimit
metricK8sContainerEphemeralstorageRequest metricK8sContainerEphemeralstorageRequest
metricK8sContainerMemoryLimit metricK8sContainerMemoryLimit
metricK8sContainerMemoryRequest metricK8sContainerMemoryRequest
metricK8sContainerReady metricK8sContainerReady
metricK8sContainerRestarts metricK8sContainerRestarts
metricK8sContainerStorageLimit metricK8sContainerStorageLimit
metricK8sContainerStorageRequest metricK8sContainerStorageRequest
metricK8sCronjobActiveJobs metricK8sCronjobActiveJobs
metricK8sDaemonsetCurrentScheduledNodes metricK8sDaemonsetCurrentScheduledNodes
metricK8sDaemonsetDesiredScheduledNodes metricK8sDaemonsetDesiredScheduledNodes
metricK8sDaemonsetMisscheduledNodes metricK8sDaemonsetMisscheduledNodes
metricK8sDaemonsetReadyNodes metricK8sDaemonsetReadyNodes
metricK8sDeploymentAvailable metricK8sDeploymentAvailable
metricK8sDeploymentDesired metricK8sDeploymentDesired
metricK8sHpaCurrentReplicas metricK8sHpaCurrentReplicas
metricK8sHpaDesiredReplicas metricK8sHpaDesiredReplicas
metricK8sHpaMaxReplicas metricK8sHpaMaxReplicas
metricK8sHpaMinReplicas metricK8sHpaMinReplicas
metricK8sJobActivePods metricK8sJobActivePods
metricK8sJobDesiredSuccessfulPods metricK8sJobDesiredSuccessfulPods
metricK8sJobFailedPods metricK8sJobFailedPods
metricK8sJobMaxParallelPods metricK8sJobMaxParallelPods
metricK8sJobSuccessfulPods metricK8sJobSuccessfulPods
metricK8sNamespacePhase metricK8sNamespacePhase
metricK8sNodeCondition metricK8sNodeCondition
metricK8sPodPhase metricK8sPodPhase
metricK8sPodStatusReason metricK8sPodStatusReason
metricK8sReplicasetAvailable metricK8sReplicasetAvailable
metricK8sReplicasetDesired metricK8sReplicasetDesired
metricK8sReplicationControllerAvailable metricK8sReplicationControllerAvailable
metricK8sReplicationControllerDesired metricK8sReplicationControllerDesired
metricK8sResourceQuotaHardLimit metricK8sResourceQuotaHardLimit
metricK8sResourceQuotaUsed metricK8sResourceQuotaUsed
metricK8sStatefulsetCurrentPods metricK8sStatefulsetCurrentPods
metricK8sStatefulsetDesiredPods metricK8sStatefulsetDesiredPods
metricK8sStatefulsetReadyPods metricK8sStatefulsetReadyPods
metricK8sStatefulsetUpdatedPods metricK8sStatefulsetUpdatedPods
metricOpenshiftAppliedclusterquotaLimit metricOpenshiftAppliedclusterquotaLimit
metricOpenshiftAppliedclusterquotaUsed metricOpenshiftAppliedclusterquotaUsed
metricOpenshiftClusterquotaLimit metricOpenshiftClusterquotaLimit
metricOpenshiftClusterquotaUsed metricOpenshiftClusterquotaUsed
}
// MetricBuilderOption applies changes to default metrics builder.
type MetricBuilderOption interface {
apply(*MetricsBuilder)
}
type metricBuilderOptionFunc func(mb *MetricsBuilder)
func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) {
mbof(mb)
}
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
return metricBuilderOptionFunc(func(mb *MetricsBuilder) {
mb.startTime = startTime
})
}
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
config: mbc,
startTime: pcommon.NewTimestampFromTime(time.Now()),
metricsBuffer: pmetric.NewMetrics(),
buildInfo: settings.BuildInfo,
metricK8sContainerCPULimit: newMetricK8sContainerCPULimit(mbc.Metrics.K8sContainerCPULimit),
metricK8sContainerCPURequest: newMetricK8sContainerCPURequest(mbc.Metrics.K8sContainerCPURequest),
metricK8sContainerEphemeralstorageLimit: newMetricK8sContainerEphemeralstorageLimit(mbc.Metrics.K8sContainerEphemeralstorageLimit),
metricK8sContainerEphemeralstorageRequest: newMetricK8sContainerEphemeralstorageRequest(mbc.Metrics.K8sContainerEphemeralstorageRequest),
metricK8sContainerMemoryLimit: newMetricK8sContainerMemoryLimit(mbc.Metrics.K8sContainerMemoryLimit),
metricK8sContainerMemoryRequest: newMetricK8sContainerMemoryRequest(mbc.Metrics.K8sContainerMemoryRequest),
metricK8sContainerReady: newMetricK8sContainerReady(mbc.Metrics.K8sContainerReady),
metricK8sContainerRestarts: newMetricK8sContainerRestarts(mbc.Metrics.K8sContainerRestarts),
metricK8sContainerStorageLimit: newMetricK8sContainerStorageLimit(mbc.Metrics.K8sContainerStorageLimit),
metricK8sContainerStorageRequest: newMetricK8sContainerStorageRequest(mbc.Metrics.K8sContainerStorageRequest),
metricK8sCronjobActiveJobs: newMetricK8sCronjobActiveJobs(mbc.Metrics.K8sCronjobActiveJobs),
metricK8sDaemonsetCurrentScheduledNodes: newMetricK8sDaemonsetCurrentScheduledNodes(mbc.Metrics.K8sDaemonsetCurrentScheduledNodes),
metricK8sDaemonsetDesiredScheduledNodes: newMetricK8sDaemonsetDesiredScheduledNodes(mbc.Metrics.K8sDaemonsetDesiredScheduledNodes),
metricK8sDaemonsetMisscheduledNodes: newMetricK8sDaemonsetMisscheduledNodes(mbc.Metrics.K8sDaemonsetMisscheduledNodes),
metricK8sDaemonsetReadyNodes: newMetricK8sDaemonsetReadyNodes(mbc.Metrics.K8sDaemonsetReadyNodes),
metricK8sDeploymentAvailable: newMetricK8sDeploymentAvailable(mbc.Metrics.K8sDeploymentAvailable),
metricK8sDeploymentDesired: newMetricK8sDeploymentDesired(mbc.Metrics.K8sDeploymentDesired),
metricK8sHpaCurrentReplicas: newMetricK8sHpaCurrentReplicas(mbc.Metrics.K8sHpaCurrentReplicas),
metricK8sHpaDesiredReplicas: newMetricK8sHpaDesiredReplicas(mbc.Metrics.K8sHpaDesiredReplicas),
metricK8sHpaMaxReplicas: newMetricK8sHpaMaxReplicas(mbc.Metrics.K8sHpaMaxReplicas),
metricK8sHpaMinReplicas: newMetricK8sHpaMinReplicas(mbc.Metrics.K8sHpaMinReplicas),
metricK8sJobActivePods: newMetricK8sJobActivePods(mbc.Metrics.K8sJobActivePods),
metricK8sJobDesiredSuccessfulPods: newMetricK8sJobDesiredSuccessfulPods(mbc.Metrics.K8sJobDesiredSuccessfulPods),
metricK8sJobFailedPods: newMetricK8sJobFailedPods(mbc.Metrics.K8sJobFailedPods),
metricK8sJobMaxParallelPods: newMetricK8sJobMaxParallelPods(mbc.Metrics.K8sJobMaxParallelPods),
metricK8sJobSuccessfulPods: newMetricK8sJobSuccessfulPods(mbc.Metrics.K8sJobSuccessfulPods),
metricK8sNamespacePhase: newMetricK8sNamespacePhase(mbc.Metrics.K8sNamespacePhase),
metricK8sNodeCondition: newMetricK8sNodeCondition(mbc.Metrics.K8sNodeCondition),
metricK8sPodPhase: newMetricK8sPodPhase(mbc.Metrics.K8sPodPhase),
metricK8sPodStatusReason: newMetricK8sPodStatusReason(mbc.Metrics.K8sPodStatusReason),
metricK8sReplicasetAvailable: newMetricK8sReplicasetAvailable(mbc.Metrics.K8sReplicasetAvailable),
metricK8sReplicasetDesired: newMetricK8sReplicasetDesired(mbc.Metrics.K8sReplicasetDesired),
metricK8sReplicationControllerAvailable: newMetricK8sReplicationControllerAvailable(mbc.Metrics.K8sReplicationControllerAvailable),
metricK8sReplicationControllerDesired: newMetricK8sReplicationControllerDesired(mbc.Metrics.K8sReplicationControllerDesired),
metricK8sResourceQuotaHardLimit: newMetricK8sResourceQuotaHardLimit(mbc.Metrics.K8sResourceQuotaHardLimit),
metricK8sResourceQuotaUsed: newMetricK8sResourceQuotaUsed(mbc.Metrics.K8sResourceQuotaUsed),
metricK8sStatefulsetCurrentPods: newMetricK8sStatefulsetCurrentPods(mbc.Metrics.K8sStatefulsetCurrentPods),
metricK8sStatefulsetDesiredPods: newMetricK8sStatefulsetDesiredPods(mbc.Metrics.K8sStatefulsetDesiredPods),
metricK8sStatefulsetReadyPods: newMetricK8sStatefulsetReadyPods(mbc.Metrics.K8sStatefulsetReadyPods),
metricK8sStatefulsetUpdatedPods: newMetricK8sStatefulsetUpdatedPods(mbc.Metrics.K8sStatefulsetUpdatedPods),
metricOpenshiftAppliedclusterquotaLimit: newMetricOpenshiftAppliedclusterquotaLimit(mbc.Metrics.OpenshiftAppliedclusterquotaLimit),
metricOpenshiftAppliedclusterquotaUsed: newMetricOpenshiftAppliedclusterquotaUsed(mbc.Metrics.OpenshiftAppliedclusterquotaUsed),
metricOpenshiftClusterquotaLimit: newMetricOpenshiftClusterquotaLimit(mbc.Metrics.OpenshiftClusterquotaLimit),
metricOpenshiftClusterquotaUsed: newMetricOpenshiftClusterquotaUsed(mbc.Metrics.OpenshiftClusterquotaUsed),
resourceAttributeIncludeFilter: make(map[string]filter.Filter),
resourceAttributeExcludeFilter: make(map[string]filter.Filter),
}
if mbc.ResourceAttributes.ContainerID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerID.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerID.MetricsExclude)
}
if mbc.ResourceAttributes.ContainerImageName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.image.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageName.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerImageName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.image.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageName.MetricsExclude)
}
if mbc.ResourceAttributes.ContainerImageTag.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.image.tag"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageTag.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerImageTag.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.image.tag"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageTag.MetricsExclude)
}
if mbc.ResourceAttributes.ContainerRuntime.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.runtime"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerRuntime.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerRuntime.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.runtime"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerRuntime.MetricsExclude)
}
if mbc.ResourceAttributes.ContainerRuntimeVersion.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.runtime.version"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerRuntimeVersion.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerRuntimeVersion.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.runtime.version"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerRuntimeVersion.MetricsExclude)
}
if mbc.ResourceAttributes.K8sContainerName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.container.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sContainerName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sContainerName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.container.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sContainerName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sContainerStatusLastTerminatedReason.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.container.status.last_terminated_reason"] = filter.CreateFilter(mbc.ResourceAttributes.K8sContainerStatusLastTerminatedReason.MetricsInclude)
}
if mbc.ResourceAttributes.K8sContainerStatusLastTerminatedReason.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.container.status.last_terminated_reason"] = filter.CreateFilter(mbc.ResourceAttributes.K8sContainerStatusLastTerminatedReason.MetricsExclude)
}
if mbc.ResourceAttributes.K8sCronjobName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.cronjob.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sCronjobName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sCronjobName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.cronjob.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sCronjobName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sCronjobUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.cronjob.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sCronjobUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sCronjobUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.cronjob.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sCronjobUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sDaemonsetName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.daemonset.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sDaemonsetName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sDaemonsetName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.daemonset.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sDaemonsetName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sDaemonsetUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.daemonset.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sDaemonsetUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sDaemonsetUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.daemonset.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sDaemonsetUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sDeploymentName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.deployment.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sDeploymentName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sDeploymentName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.deployment.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sDeploymentName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sDeploymentUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.deployment.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sDeploymentUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sDeploymentUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.deployment.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sDeploymentUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sHpaName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.hpa.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sHpaName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sHpaName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.hpa.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sHpaName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sHpaUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.hpa.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sHpaUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sHpaUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.hpa.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sHpaUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sJobName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.job.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sJobName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sJobName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.job.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sJobName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sJobUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.job.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sJobUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sJobUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.job.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sJobUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sKubeletVersion.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.kubelet.version"] = filter.CreateFilter(mbc.ResourceAttributes.K8sKubeletVersion.MetricsInclude)
}
if mbc.ResourceAttributes.K8sKubeletVersion.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.kubelet.version"] = filter.CreateFilter(mbc.ResourceAttributes.K8sKubeletVersion.MetricsExclude)
}
if mbc.ResourceAttributes.K8sNamespaceName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.namespace.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sNamespaceName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sNamespaceName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.namespace.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sNamespaceName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sNamespaceUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.namespace.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sNamespaceUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sNamespaceUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.namespace.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sNamespaceUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sNodeName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.node.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sNodeName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sNodeName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.node.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sNodeName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sNodeUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.node.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sNodeUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sNodeUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.node.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sNodeUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sPodName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.pod.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sPodName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sPodName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.pod.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sPodName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sPodQosClass.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.pod.qos_class"] = filter.CreateFilter(mbc.ResourceAttributes.K8sPodQosClass.MetricsInclude)
}
if mbc.ResourceAttributes.K8sPodQosClass.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.pod.qos_class"] = filter.CreateFilter(mbc.ResourceAttributes.K8sPodQosClass.MetricsExclude)
}
if mbc.ResourceAttributes.K8sPodUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.pod.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sPodUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sPodUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.pod.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sPodUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sReplicasetName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.replicaset.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sReplicasetName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sReplicasetName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.replicaset.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sReplicasetName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sReplicasetUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.replicaset.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sReplicasetUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sReplicasetUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.replicaset.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sReplicasetUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sReplicationcontrollerName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.replicationcontroller.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sReplicationcontrollerName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sReplicationcontrollerName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.replicationcontroller.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sReplicationcontrollerName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sReplicationcontrollerUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.replicationcontroller.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sReplicationcontrollerUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sReplicationcontrollerUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.replicationcontroller.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sReplicationcontrollerUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sResourcequotaName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.resourcequota.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sResourcequotaName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sResourcequotaName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.resourcequota.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sResourcequotaName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sResourcequotaUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.resourcequota.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sResourcequotaUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sResourcequotaUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.resourcequota.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sResourcequotaUID.MetricsExclude)
}
if mbc.ResourceAttributes.K8sStatefulsetName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.statefulset.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sStatefulsetName.MetricsInclude)
}
if mbc.ResourceAttributes.K8sStatefulsetName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.statefulset.name"] = filter.CreateFilter(mbc.ResourceAttributes.K8sStatefulsetName.MetricsExclude)
}
if mbc.ResourceAttributes.K8sStatefulsetUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["k8s.statefulset.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sStatefulsetUID.MetricsInclude)
}
if mbc.ResourceAttributes.K8sStatefulsetUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["k8s.statefulset.uid"] = filter.CreateFilter(mbc.ResourceAttributes.K8sStatefulsetUID.MetricsExclude)
}
if mbc.ResourceAttributes.OpenshiftClusterquotaName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["openshift.clusterquota.name"] = filter.CreateFilter(mbc.ResourceAttributes.OpenshiftClusterquotaName.MetricsInclude)
}
if mbc.ResourceAttributes.OpenshiftClusterquotaName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["openshift.clusterquota.name"] = filter.CreateFilter(mbc.ResourceAttributes.OpenshiftClusterquotaName.MetricsExclude)
}
if mbc.ResourceAttributes.OpenshiftClusterquotaUID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["openshift.clusterquota.uid"] = filter.CreateFilter(mbc.ResourceAttributes.OpenshiftClusterquotaUID.MetricsInclude)
}
if mbc.ResourceAttributes.OpenshiftClusterquotaUID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["openshift.clusterquota.uid"] = filter.CreateFilter(mbc.ResourceAttributes.OpenshiftClusterquotaUID.MetricsExclude)
}
if mbc.ResourceAttributes.OsDescription.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["os.description"] = filter.CreateFilter(mbc.ResourceAttributes.OsDescription.MetricsInclude)
}
if mbc.ResourceAttributes.OsDescription.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["os.description"] = filter.CreateFilter(mbc.ResourceAttributes.OsDescription.MetricsExclude)
}
if mbc.ResourceAttributes.OsType.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["os.type"] = filter.CreateFilter(mbc.ResourceAttributes.OsType.MetricsInclude)
}
if mbc.ResourceAttributes.OsType.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["os.type"] = filter.CreateFilter(mbc.ResourceAttributes.OsType.MetricsExclude)
}
for _, op := range options {
op.apply(mb)
}
return mb
}
// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
return NewResourceBuilder(mb.config.ResourceAttributes)
}
// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
}
}
// ResourceMetricsOption applies changes to provided resource metrics.
type ResourceMetricsOption interface {
apply(pmetric.ResourceMetrics)
}
type resourceMetricsOptionFunc func(pmetric.ResourceMetrics)
func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) {
rmof(rm)
}
// WithResource sets the provided resource on the emitted ResourceMetrics.
// It's recommended to use ResourceBuilder to create the resource.
func WithResource(res pcommon.Resource) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
res.CopyTo(rm.Resource())
})
}
// WithStartTimeOverride overrides start time for all the resource metrics data points.
// This option should be only used if different start time has to be set on metrics coming from different resources.
func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
var dps pmetric.NumberDataPointSlice
metrics := rm.ScopeMetrics().At(0).Metrics()
for i := 0; i < metrics.Len(); i++ {
switch metrics.At(i).Type() {
case pmetric.MetricTypeGauge:
dps = metrics.At(i).Gauge().DataPoints()
case pmetric.MetricTypeSum:
dps = metrics.At(i).Sum().DataPoints()
}
for j := 0; j < dps.Len(); j++ {
dps.At(j).SetStartTimestamp(start)
}
}
})
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceMetricsOption arguments.
func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
rm.SetSchemaUrl(conventions.SchemaURL)
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName(ScopeName)
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricK8sContainerCPULimit.emit(ils.Metrics())
mb.metricK8sContainerCPURequest.emit(ils.Metrics())
mb.metricK8sContainerEphemeralstorageLimit.emit(ils.Metrics())
mb.metricK8sContainerEphemeralstorageRequest.emit(ils.Metrics())
mb.metricK8sContainerMemoryLimit.emit(ils.Metrics())
mb.metricK8sContainerMemoryRequest.emit(ils.Metrics())
mb.metricK8sContainerReady.emit(ils.Metrics())
mb.metricK8sContainerRestarts.emit(ils.Metrics())
mb.metricK8sContainerStorageLimit.emit(ils.Metrics())
mb.metricK8sContainerStorageRequest.emit(ils.Metrics())
mb.metricK8sCronjobActiveJobs.emit(ils.Metrics())
mb.metricK8sDaemonsetCurrentScheduledNodes.emit(ils.Metrics())
mb.metricK8sDaemonsetDesiredScheduledNodes.emit(ils.Metrics())
mb.metricK8sDaemonsetMisscheduledNodes.emit(ils.Metrics())
mb.metricK8sDaemonsetReadyNodes.emit(ils.Metrics())
mb.metricK8sDeploymentAvailable.emit(ils.Metrics())
mb.metricK8sDeploymentDesired.emit(ils.Metrics())
mb.metricK8sHpaCurrentReplicas.emit(ils.Metrics())
mb.metricK8sHpaDesiredReplicas.emit(ils.Metrics())
mb.metricK8sHpaMaxReplicas.emit(ils.Metrics())
mb.metricK8sHpaMinReplicas.emit(ils.Metrics())
mb.metricK8sJobActivePods.emit(ils.Metrics())
mb.metricK8sJobDesiredSuccessfulPods.emit(ils.Metrics())
mb.metricK8sJobFailedPods.emit(ils.Metrics())
mb.metricK8sJobMaxParallelPods.emit(ils.Metrics())
mb.metricK8sJobSuccessfulPods.emit(ils.Metrics())
mb.metricK8sNamespacePhase.emit(ils.Metrics())
mb.metricK8sNodeCondition.emit(ils.Metrics())
mb.metricK8sPodPhase.emit(ils.Metrics())
mb.metricK8sPodStatusReason.emit(ils.Metrics())
mb.metricK8sReplicasetAvailable.emit(ils.Metrics())
mb.metricK8sReplicasetDesired.emit(ils.Metrics())
mb.metricK8sReplicationControllerAvailable.emit(ils.Metrics())
mb.metricK8sReplicationControllerDesired.emit(ils.Metrics())
mb.metricK8sResourceQuotaHardLimit.emit(ils.Metrics())
mb.metricK8sResourceQuotaUsed.emit(ils.Metrics())
mb.metricK8sStatefulsetCurrentPods.emit(ils.Metrics())
mb.metricK8sStatefulsetDesiredPods.emit(ils.Metrics())
mb.metricK8sStatefulsetReadyPods.emit(ils.Metrics())
mb.metricK8sStatefulsetUpdatedPods.emit(ils.Metrics())
mb.metricOpenshiftAppliedclusterquotaLimit.emit(ils.Metrics())
mb.metricOpenshiftAppliedclusterquotaUsed.emit(ils.Metrics())
mb.metricOpenshiftClusterquotaLimit.emit(ils.Metrics())
mb.metricOpenshiftClusterquotaUsed.emit(ils.Metrics())
for _, op := range options {
op.apply(rm)
}
for attr, filter := range mb.resourceAttributeIncludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) {
return
}
}
for attr, filter := range mb.resourceAttributeExcludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) {
return
}
}
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
}
}
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user config, e.g. delta or cumulative.
func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics {
mb.EmitForResource(options...)
metrics := mb.metricsBuffer
mb.metricsBuffer = pmetric.NewMetrics()
return metrics
}
// RecordK8sContainerCPULimitDataPoint adds a data point to k8s.container.cpu_limit metric.
func (mb *MetricsBuilder) RecordK8sContainerCPULimitDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricK8sContainerCPULimit.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sContainerCPURequestDataPoint adds a data point to k8s.container.cpu_request metric.
func (mb *MetricsBuilder) RecordK8sContainerCPURequestDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricK8sContainerCPURequest.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sContainerEphemeralstorageLimitDataPoint adds a data point to k8s.container.ephemeralstorage_limit metric.
func (mb *MetricsBuilder) RecordK8sContainerEphemeralstorageLimitDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sContainerEphemeralstorageLimit.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sContainerEphemeralstorageRequestDataPoint adds a data point to k8s.container.ephemeralstorage_request metric.
func (mb *MetricsBuilder) RecordK8sContainerEphemeralstorageRequestDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sContainerEphemeralstorageRequest.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sContainerMemoryLimitDataPoint adds a data point to k8s.container.memory_limit metric.
func (mb *MetricsBuilder) RecordK8sContainerMemoryLimitDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sContainerMemoryLimit.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sContainerMemoryRequestDataPoint adds a data point to k8s.container.memory_request metric.
func (mb *MetricsBuilder) RecordK8sContainerMemoryRequestDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sContainerMemoryRequest.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sContainerReadyDataPoint adds a data point to k8s.container.ready metric.
func (mb *MetricsBuilder) RecordK8sContainerReadyDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sContainerReady.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sContainerRestartsDataPoint adds a data point to k8s.container.restarts metric.
func (mb *MetricsBuilder) RecordK8sContainerRestartsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sContainerRestarts.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sContainerStorageLimitDataPoint adds a data point to k8s.container.storage_limit metric.
func (mb *MetricsBuilder) RecordK8sContainerStorageLimitDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sContainerStorageLimit.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sContainerStorageRequestDataPoint adds a data point to k8s.container.storage_request metric.
func (mb *MetricsBuilder) RecordK8sContainerStorageRequestDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sContainerStorageRequest.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sCronjobActiveJobsDataPoint adds a data point to k8s.cronjob.active_jobs metric.
func (mb *MetricsBuilder) RecordK8sCronjobActiveJobsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sCronjobActiveJobs.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sDaemonsetCurrentScheduledNodesDataPoint adds a data point to k8s.daemonset.current_scheduled_nodes metric.
func (mb *MetricsBuilder) RecordK8sDaemonsetCurrentScheduledNodesDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sDaemonsetCurrentScheduledNodes.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sDaemonsetDesiredScheduledNodesDataPoint adds a data point to k8s.daemonset.desired_scheduled_nodes metric.
func (mb *MetricsBuilder) RecordK8sDaemonsetDesiredScheduledNodesDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sDaemonsetDesiredScheduledNodes.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sDaemonsetMisscheduledNodesDataPoint adds a data point to k8s.daemonset.misscheduled_nodes metric.
func (mb *MetricsBuilder) RecordK8sDaemonsetMisscheduledNodesDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sDaemonsetMisscheduledNodes.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sDaemonsetReadyNodesDataPoint adds a data point to k8s.daemonset.ready_nodes metric.
func (mb *MetricsBuilder) RecordK8sDaemonsetReadyNodesDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sDaemonsetReadyNodes.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sDeploymentAvailableDataPoint adds a data point to k8s.deployment.available metric.
func (mb *MetricsBuilder) RecordK8sDeploymentAvailableDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sDeploymentAvailable.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sDeploymentDesiredDataPoint adds a data point to k8s.deployment.desired metric.
func (mb *MetricsBuilder) RecordK8sDeploymentDesiredDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sDeploymentDesired.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sHpaCurrentReplicasDataPoint adds a data point to k8s.hpa.current_replicas metric.
func (mb *MetricsBuilder) RecordK8sHpaCurrentReplicasDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sHpaCurrentReplicas.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sHpaDesiredReplicasDataPoint adds a data point to k8s.hpa.desired_replicas metric.
func (mb *MetricsBuilder) RecordK8sHpaDesiredReplicasDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sHpaDesiredReplicas.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sHpaMaxReplicasDataPoint adds a data point to k8s.hpa.max_replicas metric.
func (mb *MetricsBuilder) RecordK8sHpaMaxReplicasDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sHpaMaxReplicas.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sHpaMinReplicasDataPoint adds a data point to k8s.hpa.min_replicas metric.
func (mb *MetricsBuilder) RecordK8sHpaMinReplicasDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sHpaMinReplicas.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sJobActivePodsDataPoint adds a data point to k8s.job.active_pods metric.
func (mb *MetricsBuilder) RecordK8sJobActivePodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sJobActivePods.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sJobDesiredSuccessfulPodsDataPoint adds a data point to k8s.job.desired_successful_pods metric.
func (mb *MetricsBuilder) RecordK8sJobDesiredSuccessfulPodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sJobDesiredSuccessfulPods.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sJobFailedPodsDataPoint adds a data point to k8s.job.failed_pods metric.
func (mb *MetricsBuilder) RecordK8sJobFailedPodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sJobFailedPods.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sJobMaxParallelPodsDataPoint adds a data point to k8s.job.max_parallel_pods metric.
func (mb *MetricsBuilder) RecordK8sJobMaxParallelPodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sJobMaxParallelPods.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sJobSuccessfulPodsDataPoint adds a data point to k8s.job.successful_pods metric.
func (mb *MetricsBuilder) RecordK8sJobSuccessfulPodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sJobSuccessfulPods.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sNamespacePhaseDataPoint adds a data point to k8s.namespace.phase metric.
func (mb *MetricsBuilder) RecordK8sNamespacePhaseDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sNamespacePhase.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sNodeConditionDataPoint adds a data point to k8s.node.condition metric.
func (mb *MetricsBuilder) RecordK8sNodeConditionDataPoint(ts pcommon.Timestamp, val int64, conditionAttributeValue string) {
mb.metricK8sNodeCondition.recordDataPoint(mb.startTime, ts, val, conditionAttributeValue)
}
// RecordK8sPodPhaseDataPoint adds a data point to k8s.pod.phase metric.
func (mb *MetricsBuilder) RecordK8sPodPhaseDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sPodPhase.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sPodStatusReasonDataPoint adds a data point to k8s.pod.status_reason metric.
func (mb *MetricsBuilder) RecordK8sPodStatusReasonDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sPodStatusReason.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sReplicasetAvailableDataPoint adds a data point to k8s.replicaset.available metric.
func (mb *MetricsBuilder) RecordK8sReplicasetAvailableDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sReplicasetAvailable.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sReplicasetDesiredDataPoint adds a data point to k8s.replicaset.desired metric.
func (mb *MetricsBuilder) RecordK8sReplicasetDesiredDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sReplicasetDesired.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sReplicationControllerAvailableDataPoint adds a data point to k8s.replication_controller.available metric.
func (mb *MetricsBuilder) RecordK8sReplicationControllerAvailableDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sReplicationControllerAvailable.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sReplicationControllerDesiredDataPoint adds a data point to k8s.replication_controller.desired metric.
func (mb *MetricsBuilder) RecordK8sReplicationControllerDesiredDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sReplicationControllerDesired.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sResourceQuotaHardLimitDataPoint adds a data point to k8s.resource_quota.hard_limit metric.
func (mb *MetricsBuilder) RecordK8sResourceQuotaHardLimitDataPoint(ts pcommon.Timestamp, val int64, resourceAttributeValue string) {
mb.metricK8sResourceQuotaHardLimit.recordDataPoint(mb.startTime, ts, val, resourceAttributeValue)
}
// RecordK8sResourceQuotaUsedDataPoint adds a data point to k8s.resource_quota.used metric.
func (mb *MetricsBuilder) RecordK8sResourceQuotaUsedDataPoint(ts pcommon.Timestamp, val int64, resourceAttributeValue string) {
mb.metricK8sResourceQuotaUsed.recordDataPoint(mb.startTime, ts, val, resourceAttributeValue)
}
// RecordK8sStatefulsetCurrentPodsDataPoint adds a data point to k8s.statefulset.current_pods metric.
func (mb *MetricsBuilder) RecordK8sStatefulsetCurrentPodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sStatefulsetCurrentPods.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sStatefulsetDesiredPodsDataPoint adds a data point to k8s.statefulset.desired_pods metric.
func (mb *MetricsBuilder) RecordK8sStatefulsetDesiredPodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sStatefulsetDesiredPods.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sStatefulsetReadyPodsDataPoint adds a data point to k8s.statefulset.ready_pods metric.
func (mb *MetricsBuilder) RecordK8sStatefulsetReadyPodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sStatefulsetReadyPods.recordDataPoint(mb.startTime, ts, val)
}
// RecordK8sStatefulsetUpdatedPodsDataPoint adds a data point to k8s.statefulset.updated_pods metric.
func (mb *MetricsBuilder) RecordK8sStatefulsetUpdatedPodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricK8sStatefulsetUpdatedPods.recordDataPoint(mb.startTime, ts, val)
}
// RecordOpenshiftAppliedclusterquotaLimitDataPoint adds a data point to openshift.appliedclusterquota.limit metric.
func (mb *MetricsBuilder) RecordOpenshiftAppliedclusterquotaLimitDataPoint(ts pcommon.Timestamp, val int64, k8sNamespaceNameAttributeValue string, resourceAttributeValue string) {
mb.metricOpenshiftAppliedclusterquotaLimit.recordDataPoint(mb.startTime, ts, val, k8sNamespaceNameAttributeValue, resourceAttributeValue)
}
// RecordOpenshiftAppliedclusterquotaUsedDataPoint adds a data point to openshift.appliedclusterquota.used metric.
func (mb *MetricsBuilder) RecordOpenshiftAppliedclusterquotaUsedDataPoint(ts pcommon.Timestamp, val int64, k8sNamespaceNameAttributeValue string, resourceAttributeValue string) {
mb.metricOpenshiftAppliedclusterquotaUsed.recordDataPoint(mb.startTime, ts, val, k8sNamespaceNameAttributeValue, resourceAttributeValue)
}
// RecordOpenshiftClusterquotaLimitDataPoint adds a data point to openshift.clusterquota.limit metric.
func (mb *MetricsBuilder) RecordOpenshiftClusterquotaLimitDataPoint(ts pcommon.Timestamp, val int64, resourceAttributeValue string) {
mb.metricOpenshiftClusterquotaLimit.recordDataPoint(mb.startTime, ts, val, resourceAttributeValue)
}
// RecordOpenshiftClusterquotaUsedDataPoint adds a data point to openshift.clusterquota.used metric.
func (mb *MetricsBuilder) RecordOpenshiftClusterquotaUsedDataPoint(ts pcommon.Timestamp, val int64, resourceAttributeValue string) {
mb.metricOpenshiftClusterquotaUsed.recordDataPoint(mb.startTime, ts, val, resourceAttributeValue)
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) {
mb.startTime = pcommon.NewTimestampFromTime(time.Now())
for _, op := range options {
op.apply(mb)
}
}