receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go (3,898 lines of code) (raw):
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/filter"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
conventions "go.opentelemetry.io/collector/semconv/v1.27.0"
)
var MetricsInfo = metricsInfo{
ContainerBlockioIoMergedRecursive: metricInfo{
Name: "container.blockio.io_merged_recursive",
},
ContainerBlockioIoQueuedRecursive: metricInfo{
Name: "container.blockio.io_queued_recursive",
},
ContainerBlockioIoServiceBytesRecursive: metricInfo{
Name: "container.blockio.io_service_bytes_recursive",
},
ContainerBlockioIoServiceTimeRecursive: metricInfo{
Name: "container.blockio.io_service_time_recursive",
},
ContainerBlockioIoServicedRecursive: metricInfo{
Name: "container.blockio.io_serviced_recursive",
},
ContainerBlockioIoTimeRecursive: metricInfo{
Name: "container.blockio.io_time_recursive",
},
ContainerBlockioIoWaitTimeRecursive: metricInfo{
Name: "container.blockio.io_wait_time_recursive",
},
ContainerBlockioSectorsRecursive: metricInfo{
Name: "container.blockio.sectors_recursive",
},
ContainerCPULimit: metricInfo{
Name: "container.cpu.limit",
},
ContainerCPULogicalCount: metricInfo{
Name: "container.cpu.logical.count",
},
ContainerCPUShares: metricInfo{
Name: "container.cpu.shares",
},
ContainerCPUThrottlingDataPeriods: metricInfo{
Name: "container.cpu.throttling_data.periods",
},
ContainerCPUThrottlingDataThrottledPeriods: metricInfo{
Name: "container.cpu.throttling_data.throttled_periods",
},
ContainerCPUThrottlingDataThrottledTime: metricInfo{
Name: "container.cpu.throttling_data.throttled_time",
},
ContainerCPUUsageKernelmode: metricInfo{
Name: "container.cpu.usage.kernelmode",
},
ContainerCPUUsagePercpu: metricInfo{
Name: "container.cpu.usage.percpu",
},
ContainerCPUUsageSystem: metricInfo{
Name: "container.cpu.usage.system",
},
ContainerCPUUsageTotal: metricInfo{
Name: "container.cpu.usage.total",
},
ContainerCPUUsageUsermode: metricInfo{
Name: "container.cpu.usage.usermode",
},
ContainerCPUUtilization: metricInfo{
Name: "container.cpu.utilization",
},
ContainerMemoryActiveAnon: metricInfo{
Name: "container.memory.active_anon",
},
ContainerMemoryActiveFile: metricInfo{
Name: "container.memory.active_file",
},
ContainerMemoryAnon: metricInfo{
Name: "container.memory.anon",
},
ContainerMemoryCache: metricInfo{
Name: "container.memory.cache",
},
ContainerMemoryDirty: metricInfo{
Name: "container.memory.dirty",
},
ContainerMemoryFails: metricInfo{
Name: "container.memory.fails",
},
ContainerMemoryFile: metricInfo{
Name: "container.memory.file",
},
ContainerMemoryHierarchicalMemoryLimit: metricInfo{
Name: "container.memory.hierarchical_memory_limit",
},
ContainerMemoryHierarchicalMemswLimit: metricInfo{
Name: "container.memory.hierarchical_memsw_limit",
},
ContainerMemoryInactiveAnon: metricInfo{
Name: "container.memory.inactive_anon",
},
ContainerMemoryInactiveFile: metricInfo{
Name: "container.memory.inactive_file",
},
ContainerMemoryMappedFile: metricInfo{
Name: "container.memory.mapped_file",
},
ContainerMemoryPercent: metricInfo{
Name: "container.memory.percent",
},
ContainerMemoryPgfault: metricInfo{
Name: "container.memory.pgfault",
},
ContainerMemoryPgmajfault: metricInfo{
Name: "container.memory.pgmajfault",
},
ContainerMemoryPgpgin: metricInfo{
Name: "container.memory.pgpgin",
},
ContainerMemoryPgpgout: metricInfo{
Name: "container.memory.pgpgout",
},
ContainerMemoryRss: metricInfo{
Name: "container.memory.rss",
},
ContainerMemoryRssHuge: metricInfo{
Name: "container.memory.rss_huge",
},
ContainerMemoryTotalActiveAnon: metricInfo{
Name: "container.memory.total_active_anon",
},
ContainerMemoryTotalActiveFile: metricInfo{
Name: "container.memory.total_active_file",
},
ContainerMemoryTotalCache: metricInfo{
Name: "container.memory.total_cache",
},
ContainerMemoryTotalDirty: metricInfo{
Name: "container.memory.total_dirty",
},
ContainerMemoryTotalInactiveAnon: metricInfo{
Name: "container.memory.total_inactive_anon",
},
ContainerMemoryTotalInactiveFile: metricInfo{
Name: "container.memory.total_inactive_file",
},
ContainerMemoryTotalMappedFile: metricInfo{
Name: "container.memory.total_mapped_file",
},
ContainerMemoryTotalPgfault: metricInfo{
Name: "container.memory.total_pgfault",
},
ContainerMemoryTotalPgmajfault: metricInfo{
Name: "container.memory.total_pgmajfault",
},
ContainerMemoryTotalPgpgin: metricInfo{
Name: "container.memory.total_pgpgin",
},
ContainerMemoryTotalPgpgout: metricInfo{
Name: "container.memory.total_pgpgout",
},
ContainerMemoryTotalRss: metricInfo{
Name: "container.memory.total_rss",
},
ContainerMemoryTotalRssHuge: metricInfo{
Name: "container.memory.total_rss_huge",
},
ContainerMemoryTotalUnevictable: metricInfo{
Name: "container.memory.total_unevictable",
},
ContainerMemoryTotalWriteback: metricInfo{
Name: "container.memory.total_writeback",
},
ContainerMemoryUnevictable: metricInfo{
Name: "container.memory.unevictable",
},
ContainerMemoryUsageLimit: metricInfo{
Name: "container.memory.usage.limit",
},
ContainerMemoryUsageMax: metricInfo{
Name: "container.memory.usage.max",
},
ContainerMemoryUsageTotal: metricInfo{
Name: "container.memory.usage.total",
},
ContainerMemoryWriteback: metricInfo{
Name: "container.memory.writeback",
},
ContainerNetworkIoUsageRxBytes: metricInfo{
Name: "container.network.io.usage.rx_bytes",
},
ContainerNetworkIoUsageRxDropped: metricInfo{
Name: "container.network.io.usage.rx_dropped",
},
ContainerNetworkIoUsageRxErrors: metricInfo{
Name: "container.network.io.usage.rx_errors",
},
ContainerNetworkIoUsageRxPackets: metricInfo{
Name: "container.network.io.usage.rx_packets",
},
ContainerNetworkIoUsageTxBytes: metricInfo{
Name: "container.network.io.usage.tx_bytes",
},
ContainerNetworkIoUsageTxDropped: metricInfo{
Name: "container.network.io.usage.tx_dropped",
},
ContainerNetworkIoUsageTxErrors: metricInfo{
Name: "container.network.io.usage.tx_errors",
},
ContainerNetworkIoUsageTxPackets: metricInfo{
Name: "container.network.io.usage.tx_packets",
},
ContainerPidsCount: metricInfo{
Name: "container.pids.count",
},
ContainerPidsLimit: metricInfo{
Name: "container.pids.limit",
},
ContainerRestarts: metricInfo{
Name: "container.restarts",
},
ContainerUptime: metricInfo{
Name: "container.uptime",
},
}
type metricsInfo struct {
ContainerBlockioIoMergedRecursive metricInfo
ContainerBlockioIoQueuedRecursive metricInfo
ContainerBlockioIoServiceBytesRecursive metricInfo
ContainerBlockioIoServiceTimeRecursive metricInfo
ContainerBlockioIoServicedRecursive metricInfo
ContainerBlockioIoTimeRecursive metricInfo
ContainerBlockioIoWaitTimeRecursive metricInfo
ContainerBlockioSectorsRecursive metricInfo
ContainerCPULimit metricInfo
ContainerCPULogicalCount metricInfo
ContainerCPUShares metricInfo
ContainerCPUThrottlingDataPeriods metricInfo
ContainerCPUThrottlingDataThrottledPeriods metricInfo
ContainerCPUThrottlingDataThrottledTime metricInfo
ContainerCPUUsageKernelmode metricInfo
ContainerCPUUsagePercpu metricInfo
ContainerCPUUsageSystem metricInfo
ContainerCPUUsageTotal metricInfo
ContainerCPUUsageUsermode metricInfo
ContainerCPUUtilization metricInfo
ContainerMemoryActiveAnon metricInfo
ContainerMemoryActiveFile metricInfo
ContainerMemoryAnon metricInfo
ContainerMemoryCache metricInfo
ContainerMemoryDirty metricInfo
ContainerMemoryFails metricInfo
ContainerMemoryFile metricInfo
ContainerMemoryHierarchicalMemoryLimit metricInfo
ContainerMemoryHierarchicalMemswLimit metricInfo
ContainerMemoryInactiveAnon metricInfo
ContainerMemoryInactiveFile metricInfo
ContainerMemoryMappedFile metricInfo
ContainerMemoryPercent metricInfo
ContainerMemoryPgfault metricInfo
ContainerMemoryPgmajfault metricInfo
ContainerMemoryPgpgin metricInfo
ContainerMemoryPgpgout metricInfo
ContainerMemoryRss metricInfo
ContainerMemoryRssHuge metricInfo
ContainerMemoryTotalActiveAnon metricInfo
ContainerMemoryTotalActiveFile metricInfo
ContainerMemoryTotalCache metricInfo
ContainerMemoryTotalDirty metricInfo
ContainerMemoryTotalInactiveAnon metricInfo
ContainerMemoryTotalInactiveFile metricInfo
ContainerMemoryTotalMappedFile metricInfo
ContainerMemoryTotalPgfault metricInfo
ContainerMemoryTotalPgmajfault metricInfo
ContainerMemoryTotalPgpgin metricInfo
ContainerMemoryTotalPgpgout metricInfo
ContainerMemoryTotalRss metricInfo
ContainerMemoryTotalRssHuge metricInfo
ContainerMemoryTotalUnevictable metricInfo
ContainerMemoryTotalWriteback metricInfo
ContainerMemoryUnevictable metricInfo
ContainerMemoryUsageLimit metricInfo
ContainerMemoryUsageMax metricInfo
ContainerMemoryUsageTotal metricInfo
ContainerMemoryWriteback metricInfo
ContainerNetworkIoUsageRxBytes metricInfo
ContainerNetworkIoUsageRxDropped metricInfo
ContainerNetworkIoUsageRxErrors metricInfo
ContainerNetworkIoUsageRxPackets metricInfo
ContainerNetworkIoUsageTxBytes metricInfo
ContainerNetworkIoUsageTxDropped metricInfo
ContainerNetworkIoUsageTxErrors metricInfo
ContainerNetworkIoUsageTxPackets metricInfo
ContainerPidsCount metricInfo
ContainerPidsLimit metricInfo
ContainerRestarts metricInfo
ContainerUptime metricInfo
}
type metricInfo struct {
Name string
}
type metricContainerBlockioIoMergedRecursive struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.blockio.io_merged_recursive metric with initial data.
func (m *metricContainerBlockioIoMergedRecursive) init() {
m.data.SetName("container.blockio.io_merged_recursive")
m.data.SetDescription("Number of bios/requests merged into requests belonging to this cgroup and its descendant cgroups (Only available with cgroups v1).")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerBlockioIoMergedRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("device_major", deviceMajorAttributeValue)
dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue)
dp.Attributes().PutStr("operation", operationAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerBlockioIoMergedRecursive) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerBlockioIoMergedRecursive) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerBlockioIoMergedRecursive(cfg MetricConfig) metricContainerBlockioIoMergedRecursive {
m := metricContainerBlockioIoMergedRecursive{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerBlockioIoQueuedRecursive struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.blockio.io_queued_recursive metric with initial data.
func (m *metricContainerBlockioIoQueuedRecursive) init() {
m.data.SetName("container.blockio.io_queued_recursive")
m.data.SetDescription("Number of requests queued up for this cgroup and its descendant cgroups (Only available with cgroups v1).")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerBlockioIoQueuedRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("device_major", deviceMajorAttributeValue)
dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue)
dp.Attributes().PutStr("operation", operationAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerBlockioIoQueuedRecursive) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerBlockioIoQueuedRecursive) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerBlockioIoQueuedRecursive(cfg MetricConfig) metricContainerBlockioIoQueuedRecursive {
m := metricContainerBlockioIoQueuedRecursive{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerBlockioIoServiceBytesRecursive struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.blockio.io_service_bytes_recursive metric with initial data.
func (m *metricContainerBlockioIoServiceBytesRecursive) init() {
m.data.SetName("container.blockio.io_service_bytes_recursive")
m.data.SetDescription("Number of bytes transferred to/from the disk by the group and descendant groups.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerBlockioIoServiceBytesRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("device_major", deviceMajorAttributeValue)
dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue)
dp.Attributes().PutStr("operation", operationAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerBlockioIoServiceBytesRecursive) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerBlockioIoServiceBytesRecursive) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerBlockioIoServiceBytesRecursive(cfg MetricConfig) metricContainerBlockioIoServiceBytesRecursive {
m := metricContainerBlockioIoServiceBytesRecursive{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerBlockioIoServiceTimeRecursive struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.blockio.io_service_time_recursive metric with initial data.
func (m *metricContainerBlockioIoServiceTimeRecursive) init() {
m.data.SetName("container.blockio.io_service_time_recursive")
m.data.SetDescription("Total amount of time in nanoseconds between request dispatch and request completion for the IOs done by this cgroup and descendant cgroups (Only available with cgroups v1).")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerBlockioIoServiceTimeRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("device_major", deviceMajorAttributeValue)
dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue)
dp.Attributes().PutStr("operation", operationAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerBlockioIoServiceTimeRecursive) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerBlockioIoServiceTimeRecursive) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerBlockioIoServiceTimeRecursive(cfg MetricConfig) metricContainerBlockioIoServiceTimeRecursive {
m := metricContainerBlockioIoServiceTimeRecursive{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerBlockioIoServicedRecursive struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.blockio.io_serviced_recursive metric with initial data.
func (m *metricContainerBlockioIoServicedRecursive) init() {
m.data.SetName("container.blockio.io_serviced_recursive")
m.data.SetDescription("Number of IOs (bio) issued to the disk by the group and descendant groups (Only available with cgroups v1).")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerBlockioIoServicedRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("device_major", deviceMajorAttributeValue)
dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue)
dp.Attributes().PutStr("operation", operationAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerBlockioIoServicedRecursive) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerBlockioIoServicedRecursive) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerBlockioIoServicedRecursive(cfg MetricConfig) metricContainerBlockioIoServicedRecursive {
m := metricContainerBlockioIoServicedRecursive{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerBlockioIoTimeRecursive struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.blockio.io_time_recursive metric with initial data.
func (m *metricContainerBlockioIoTimeRecursive) init() {
m.data.SetName("container.blockio.io_time_recursive")
m.data.SetDescription("Disk time allocated to cgroup (and descendant cgroups) per device in milliseconds (Only available with cgroups v1).")
m.data.SetUnit("ms")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerBlockioIoTimeRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("device_major", deviceMajorAttributeValue)
dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue)
dp.Attributes().PutStr("operation", operationAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerBlockioIoTimeRecursive) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerBlockioIoTimeRecursive) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerBlockioIoTimeRecursive(cfg MetricConfig) metricContainerBlockioIoTimeRecursive {
m := metricContainerBlockioIoTimeRecursive{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerBlockioIoWaitTimeRecursive struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.blockio.io_wait_time_recursive metric with initial data.
func (m *metricContainerBlockioIoWaitTimeRecursive) init() {
m.data.SetName("container.blockio.io_wait_time_recursive")
m.data.SetDescription("Total amount of time the IOs for this cgroup (and descendant cgroups) spent waiting in the scheduler queues for service (Only available with cgroups v1).")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerBlockioIoWaitTimeRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("device_major", deviceMajorAttributeValue)
dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue)
dp.Attributes().PutStr("operation", operationAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerBlockioIoWaitTimeRecursive) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerBlockioIoWaitTimeRecursive) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerBlockioIoWaitTimeRecursive(cfg MetricConfig) metricContainerBlockioIoWaitTimeRecursive {
m := metricContainerBlockioIoWaitTimeRecursive{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerBlockioSectorsRecursive struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.blockio.sectors_recursive metric with initial data.
func (m *metricContainerBlockioSectorsRecursive) init() {
m.data.SetName("container.blockio.sectors_recursive")
m.data.SetDescription("Number of sectors transferred to/from disk by the group and descendant groups (Only available with cgroups v1).")
m.data.SetUnit("{sectors}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerBlockioSectorsRecursive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("device_major", deviceMajorAttributeValue)
dp.Attributes().PutStr("device_minor", deviceMinorAttributeValue)
dp.Attributes().PutStr("operation", operationAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerBlockioSectorsRecursive) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerBlockioSectorsRecursive) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerBlockioSectorsRecursive(cfg MetricConfig) metricContainerBlockioSectorsRecursive {
m := metricContainerBlockioSectorsRecursive{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPULimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.limit metric with initial data.
func (m *metricContainerCPULimit) init() {
m.data.SetName("container.cpu.limit")
m.data.SetDescription("CPU limit set for the container.")
m.data.SetUnit("{cpus}")
m.data.SetEmptyGauge()
}
func (m *metricContainerCPULimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPULimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPULimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPULimit(cfg MetricConfig) metricContainerCPULimit {
m := metricContainerCPULimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPULogicalCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.logical.count metric with initial data.
func (m *metricContainerCPULogicalCount) init() {
m.data.SetName("container.cpu.logical.count")
m.data.SetDescription("Number of cores available to the container.")
m.data.SetUnit("{cpus}")
m.data.SetEmptyGauge()
}
func (m *metricContainerCPULogicalCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPULogicalCount) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPULogicalCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPULogicalCount(cfg MetricConfig) metricContainerCPULogicalCount {
m := metricContainerCPULogicalCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPUShares struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.shares metric with initial data.
func (m *metricContainerCPUShares) init() {
m.data.SetName("container.cpu.shares")
m.data.SetDescription("CPU shares set for the container.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricContainerCPUShares) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPUShares) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPUShares) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPUShares(cfg MetricConfig) metricContainerCPUShares {
m := metricContainerCPUShares{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPUThrottlingDataPeriods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.throttling_data.periods metric with initial data.
func (m *metricContainerCPUThrottlingDataPeriods) init() {
m.data.SetName("container.cpu.throttling_data.periods")
m.data.SetDescription("Number of periods with throttling active.")
m.data.SetUnit("{periods}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerCPUThrottlingDataPeriods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPUThrottlingDataPeriods) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPUThrottlingDataPeriods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPUThrottlingDataPeriods(cfg MetricConfig) metricContainerCPUThrottlingDataPeriods {
m := metricContainerCPUThrottlingDataPeriods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPUThrottlingDataThrottledPeriods struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.throttling_data.throttled_periods metric with initial data.
func (m *metricContainerCPUThrottlingDataThrottledPeriods) init() {
m.data.SetName("container.cpu.throttling_data.throttled_periods")
m.data.SetDescription("Number of periods when the container hits its throttling limit.")
m.data.SetUnit("{periods}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerCPUThrottlingDataThrottledPeriods) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPUThrottlingDataThrottledPeriods) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPUThrottlingDataThrottledPeriods) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPUThrottlingDataThrottledPeriods(cfg MetricConfig) metricContainerCPUThrottlingDataThrottledPeriods {
m := metricContainerCPUThrottlingDataThrottledPeriods{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPUThrottlingDataThrottledTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.throttling_data.throttled_time metric with initial data.
func (m *metricContainerCPUThrottlingDataThrottledTime) init() {
m.data.SetName("container.cpu.throttling_data.throttled_time")
m.data.SetDescription("Aggregate time the container was throttled.")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerCPUThrottlingDataThrottledTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPUThrottlingDataThrottledTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPUThrottlingDataThrottledTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPUThrottlingDataThrottledTime(cfg MetricConfig) metricContainerCPUThrottlingDataThrottledTime {
m := metricContainerCPUThrottlingDataThrottledTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPUUsageKernelmode struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.usage.kernelmode metric with initial data.
func (m *metricContainerCPUUsageKernelmode) init() {
m.data.SetName("container.cpu.usage.kernelmode")
m.data.SetDescription("Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows).")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerCPUUsageKernelmode) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPUUsageKernelmode) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPUUsageKernelmode) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPUUsageKernelmode(cfg MetricConfig) metricContainerCPUUsageKernelmode {
m := metricContainerCPUUsageKernelmode{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPUUsagePercpu struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.usage.percpu metric with initial data.
func (m *metricContainerCPUUsagePercpu) init() {
m.data.SetName("container.cpu.usage.percpu")
m.data.SetDescription("Per-core CPU usage by the container (Only available with cgroups v1).")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerCPUUsagePercpu) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, coreAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("core", coreAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPUUsagePercpu) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPUUsagePercpu) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPUUsagePercpu(cfg MetricConfig) metricContainerCPUUsagePercpu {
m := metricContainerCPUUsagePercpu{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPUUsageSystem struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.usage.system metric with initial data.
func (m *metricContainerCPUUsageSystem) init() {
m.data.SetName("container.cpu.usage.system")
m.data.SetDescription("System CPU usage, as reported by docker.")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerCPUUsageSystem) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPUUsageSystem) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPUUsageSystem) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPUUsageSystem(cfg MetricConfig) metricContainerCPUUsageSystem {
m := metricContainerCPUUsageSystem{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPUUsageTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.usage.total metric with initial data.
func (m *metricContainerCPUUsageTotal) init() {
m.data.SetName("container.cpu.usage.total")
m.data.SetDescription("Total CPU time consumed.")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerCPUUsageTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPUUsageTotal) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPUUsageTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPUUsageTotal(cfg MetricConfig) metricContainerCPUUsageTotal {
m := metricContainerCPUUsageTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPUUsageUsermode struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.usage.usermode metric with initial data.
func (m *metricContainerCPUUsageUsermode) init() {
m.data.SetName("container.cpu.usage.usermode")
m.data.SetDescription("Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows).")
m.data.SetUnit("ns")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerCPUUsageUsermode) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPUUsageUsermode) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPUUsageUsermode) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPUUsageUsermode(cfg MetricConfig) metricContainerCPUUsageUsermode {
m := metricContainerCPUUsageUsermode{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerCPUUtilization struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.cpu.utilization metric with initial data.
func (m *metricContainerCPUUtilization) init() {
m.data.SetName("container.cpu.utilization")
m.data.SetDescription("Percent of CPU used by the container.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricContainerCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerCPUUtilization) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerCPUUtilization) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerCPUUtilization(cfg MetricConfig) metricContainerCPUUtilization {
m := metricContainerCPUUtilization{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryActiveAnon struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.active_anon metric with initial data.
func (m *metricContainerMemoryActiveAnon) init() {
m.data.SetName("container.memory.active_anon")
m.data.SetDescription("The amount of anonymous memory that has been identified as active by the kernel.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryActiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryActiveAnon) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryActiveAnon) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryActiveAnon(cfg MetricConfig) metricContainerMemoryActiveAnon {
m := metricContainerMemoryActiveAnon{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryActiveFile struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.active_file metric with initial data.
func (m *metricContainerMemoryActiveFile) init() {
m.data.SetName("container.memory.active_file")
m.data.SetDescription("Cache memory that has been identified as active by the kernel.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryActiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryActiveFile) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryActiveFile) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryActiveFile(cfg MetricConfig) metricContainerMemoryActiveFile {
m := metricContainerMemoryActiveFile{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryAnon struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.anon metric with initial data.
func (m *metricContainerMemoryAnon) init() {
m.data.SetName("container.memory.anon")
m.data.SetDescription("Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryAnon) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryAnon) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryAnon(cfg MetricConfig) metricContainerMemoryAnon {
m := metricContainerMemoryAnon{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryCache struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.cache metric with initial data.
func (m *metricContainerMemoryCache) init() {
m.data.SetName("container.memory.cache")
m.data.SetDescription("The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryCache) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryCache) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryCache) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryCache(cfg MetricConfig) metricContainerMemoryCache {
m := metricContainerMemoryCache{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryDirty struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.dirty metric with initial data.
func (m *metricContainerMemoryDirty) init() {
m.data.SetName("container.memory.dirty")
m.data.SetDescription("Bytes that are waiting to get written back to the disk, from this cgroup (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryDirty) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryDirty) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryDirty) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryDirty(cfg MetricConfig) metricContainerMemoryDirty {
m := metricContainerMemoryDirty{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryFails struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.fails metric with initial data.
func (m *metricContainerMemoryFails) init() {
m.data.SetName("container.memory.fails")
m.data.SetDescription("Number of times the memory limit was hit.")
m.data.SetUnit("{fails}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryFails) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryFails) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryFails) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryFails(cfg MetricConfig) metricContainerMemoryFails {
m := metricContainerMemoryFails{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryFile struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.file metric with initial data.
func (m *metricContainerMemoryFile) init() {
m.data.SetName("container.memory.file")
m.data.SetDescription("Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryFile) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryFile) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryFile(cfg MetricConfig) metricContainerMemoryFile {
m := metricContainerMemoryFile{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryHierarchicalMemoryLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.hierarchical_memory_limit metric with initial data.
func (m *metricContainerMemoryHierarchicalMemoryLimit) init() {
m.data.SetName("container.memory.hierarchical_memory_limit")
m.data.SetDescription("The maximum amount of physical memory that can be used by the processes of this control group (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryHierarchicalMemoryLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryHierarchicalMemoryLimit) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryHierarchicalMemoryLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryHierarchicalMemoryLimit(cfg MetricConfig) metricContainerMemoryHierarchicalMemoryLimit {
m := metricContainerMemoryHierarchicalMemoryLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryHierarchicalMemswLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.hierarchical_memsw_limit metric with initial data.
func (m *metricContainerMemoryHierarchicalMemswLimit) init() {
m.data.SetName("container.memory.hierarchical_memsw_limit")
m.data.SetDescription("The maximum amount of RAM + swap that can be used by the processes of this control group (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryHierarchicalMemswLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryHierarchicalMemswLimit) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryHierarchicalMemswLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryHierarchicalMemswLimit(cfg MetricConfig) metricContainerMemoryHierarchicalMemswLimit {
m := metricContainerMemoryHierarchicalMemswLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryInactiveAnon struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.inactive_anon metric with initial data.
func (m *metricContainerMemoryInactiveAnon) init() {
m.data.SetName("container.memory.inactive_anon")
m.data.SetDescription("The amount of anonymous memory that has been identified as inactive by the kernel.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryInactiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryInactiveAnon) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryInactiveAnon) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryInactiveAnon(cfg MetricConfig) metricContainerMemoryInactiveAnon {
m := metricContainerMemoryInactiveAnon{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryInactiveFile struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.inactive_file metric with initial data.
func (m *metricContainerMemoryInactiveFile) init() {
m.data.SetName("container.memory.inactive_file")
m.data.SetDescription("Cache memory that has been identified as inactive by the kernel.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryInactiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryInactiveFile) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryInactiveFile) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryInactiveFile(cfg MetricConfig) metricContainerMemoryInactiveFile {
m := metricContainerMemoryInactiveFile{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryMappedFile struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.mapped_file metric with initial data.
func (m *metricContainerMemoryMappedFile) init() {
m.data.SetName("container.memory.mapped_file")
m.data.SetDescription("Indicates the amount of memory mapped by the processes in the control group (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryMappedFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryMappedFile) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryMappedFile) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryMappedFile(cfg MetricConfig) metricContainerMemoryMappedFile {
m := metricContainerMemoryMappedFile{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryPercent struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.percent metric with initial data.
func (m *metricContainerMemoryPercent) init() {
m.data.SetName("container.memory.percent")
m.data.SetDescription("Percentage of memory used.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricContainerMemoryPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryPercent) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryPercent) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryPercent(cfg MetricConfig) metricContainerMemoryPercent {
m := metricContainerMemoryPercent{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryPgfault struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.pgfault metric with initial data.
func (m *metricContainerMemoryPgfault) init() {
m.data.SetName("container.memory.pgfault")
m.data.SetDescription("Indicate the number of times that a process of the cgroup triggered a page fault.")
m.data.SetUnit("{faults}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryPgfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryPgfault) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryPgfault) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryPgfault(cfg MetricConfig) metricContainerMemoryPgfault {
m := metricContainerMemoryPgfault{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryPgmajfault struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.pgmajfault metric with initial data.
func (m *metricContainerMemoryPgmajfault) init() {
m.data.SetName("container.memory.pgmajfault")
m.data.SetDescription("Indicate the number of times that a process of the cgroup triggered a major fault.")
m.data.SetUnit("{faults}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryPgmajfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryPgmajfault) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryPgmajfault) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryPgmajfault(cfg MetricConfig) metricContainerMemoryPgmajfault {
m := metricContainerMemoryPgmajfault{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryPgpgin struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.pgpgin metric with initial data.
func (m *metricContainerMemoryPgpgin) init() {
m.data.SetName("container.memory.pgpgin")
m.data.SetDescription("Number of pages read from disk by the cgroup (Only available with cgroups v1).")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryPgpgin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryPgpgin) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryPgpgin) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryPgpgin(cfg MetricConfig) metricContainerMemoryPgpgin {
m := metricContainerMemoryPgpgin{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryPgpgout struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.pgpgout metric with initial data.
func (m *metricContainerMemoryPgpgout) init() {
m.data.SetName("container.memory.pgpgout")
m.data.SetDescription("Number of pages written to disk by the cgroup (Only available with cgroups v1).")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryPgpgout) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryPgpgout) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryPgpgout) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryPgpgout(cfg MetricConfig) metricContainerMemoryPgpgout {
m := metricContainerMemoryPgpgout{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryRss struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.rss metric with initial data.
func (m *metricContainerMemoryRss) init() {
m.data.SetName("container.memory.rss")
m.data.SetDescription("The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryRss) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryRss) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryRss(cfg MetricConfig) metricContainerMemoryRss {
m := metricContainerMemoryRss{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryRssHuge struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.rss_huge metric with initial data.
func (m *metricContainerMemoryRssHuge) init() {
m.data.SetName("container.memory.rss_huge")
m.data.SetDescription("Number of bytes of anonymous transparent hugepages in this cgroup (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryRssHuge) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryRssHuge) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryRssHuge) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryRssHuge(cfg MetricConfig) metricContainerMemoryRssHuge {
m := metricContainerMemoryRssHuge{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalActiveAnon struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_active_anon metric with initial data.
func (m *metricContainerMemoryTotalActiveAnon) init() {
m.data.SetName("container.memory.total_active_anon")
m.data.SetDescription("The amount of anonymous memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalActiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalActiveAnon) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalActiveAnon) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalActiveAnon(cfg MetricConfig) metricContainerMemoryTotalActiveAnon {
m := metricContainerMemoryTotalActiveAnon{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalActiveFile struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_active_file metric with initial data.
func (m *metricContainerMemoryTotalActiveFile) init() {
m.data.SetName("container.memory.total_active_file")
m.data.SetDescription("Cache memory that has been identified as active by the kernel. Includes descendant cgroups (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalActiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalActiveFile) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalActiveFile) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalActiveFile(cfg MetricConfig) metricContainerMemoryTotalActiveFile {
m := metricContainerMemoryTotalActiveFile{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalCache struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_cache metric with initial data.
func (m *metricContainerMemoryTotalCache) init() {
m.data.SetName("container.memory.total_cache")
m.data.SetDescription("Total amount of memory used by the processes of this cgroup (and descendants) that can be associated with a block on a block device. Also accounts for memory used by tmpfs (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalCache) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalCache) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalCache) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalCache(cfg MetricConfig) metricContainerMemoryTotalCache {
m := metricContainerMemoryTotalCache{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalDirty struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_dirty metric with initial data.
func (m *metricContainerMemoryTotalDirty) init() {
m.data.SetName("container.memory.total_dirty")
m.data.SetDescription("Bytes that are waiting to get written back to the disk, from this cgroup and descendants (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalDirty) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalDirty) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalDirty) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalDirty(cfg MetricConfig) metricContainerMemoryTotalDirty {
m := metricContainerMemoryTotalDirty{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalInactiveAnon struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_inactive_anon metric with initial data.
func (m *metricContainerMemoryTotalInactiveAnon) init() {
m.data.SetName("container.memory.total_inactive_anon")
m.data.SetDescription("The amount of anonymous memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalInactiveAnon) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalInactiveAnon) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalInactiveAnon) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalInactiveAnon(cfg MetricConfig) metricContainerMemoryTotalInactiveAnon {
m := metricContainerMemoryTotalInactiveAnon{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalInactiveFile struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_inactive_file metric with initial data.
func (m *metricContainerMemoryTotalInactiveFile) init() {
m.data.SetName("container.memory.total_inactive_file")
m.data.SetDescription("Cache memory that has been identified as inactive by the kernel. Includes descendant cgroups (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalInactiveFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalInactiveFile) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalInactiveFile) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalInactiveFile(cfg MetricConfig) metricContainerMemoryTotalInactiveFile {
m := metricContainerMemoryTotalInactiveFile{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalMappedFile struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_mapped_file metric with initial data.
func (m *metricContainerMemoryTotalMappedFile) init() {
m.data.SetName("container.memory.total_mapped_file")
m.data.SetDescription("Indicates the amount of memory mapped by the processes in the control group and descendant groups (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalMappedFile) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalMappedFile) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalMappedFile) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalMappedFile(cfg MetricConfig) metricContainerMemoryTotalMappedFile {
m := metricContainerMemoryTotalMappedFile{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalPgfault struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_pgfault metric with initial data.
func (m *metricContainerMemoryTotalPgfault) init() {
m.data.SetName("container.memory.total_pgfault")
m.data.SetDescription("Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a page fault (Only available with cgroups v1).")
m.data.SetUnit("{faults}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalPgfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalPgfault) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalPgfault) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalPgfault(cfg MetricConfig) metricContainerMemoryTotalPgfault {
m := metricContainerMemoryTotalPgfault{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalPgmajfault struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_pgmajfault metric with initial data.
func (m *metricContainerMemoryTotalPgmajfault) init() {
m.data.SetName("container.memory.total_pgmajfault")
m.data.SetDescription("Indicate the number of times that a process of the cgroup (or descendant cgroups) triggered a major fault (Only available with cgroups v1).")
m.data.SetUnit("{faults}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalPgmajfault) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalPgmajfault) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalPgmajfault) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalPgmajfault(cfg MetricConfig) metricContainerMemoryTotalPgmajfault {
m := metricContainerMemoryTotalPgmajfault{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalPgpgin struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_pgpgin metric with initial data.
func (m *metricContainerMemoryTotalPgpgin) init() {
m.data.SetName("container.memory.total_pgpgin")
m.data.SetDescription("Number of pages read from disk by the cgroup and descendant groups (Only available with cgroups v1).")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalPgpgin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalPgpgin) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalPgpgin) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalPgpgin(cfg MetricConfig) metricContainerMemoryTotalPgpgin {
m := metricContainerMemoryTotalPgpgin{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalPgpgout struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_pgpgout metric with initial data.
func (m *metricContainerMemoryTotalPgpgout) init() {
m.data.SetName("container.memory.total_pgpgout")
m.data.SetDescription("Number of pages written to disk by the cgroup and descendant groups (Only available with cgroups v1).")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalPgpgout) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalPgpgout) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalPgpgout) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalPgpgout(cfg MetricConfig) metricContainerMemoryTotalPgpgout {
m := metricContainerMemoryTotalPgpgout{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalRss struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_rss metric with initial data.
func (m *metricContainerMemoryTotalRss) init() {
m.data.SetName("container.memory.total_rss")
m.data.SetDescription("The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps. Includes descendant cgroups (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalRss) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalRss) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalRss(cfg MetricConfig) metricContainerMemoryTotalRss {
m := metricContainerMemoryTotalRss{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalRssHuge struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_rss_huge metric with initial data.
func (m *metricContainerMemoryTotalRssHuge) init() {
m.data.SetName("container.memory.total_rss_huge")
m.data.SetDescription("Number of bytes of anonymous transparent hugepages in this cgroup and descendant cgroups (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalRssHuge) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalRssHuge) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalRssHuge) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalRssHuge(cfg MetricConfig) metricContainerMemoryTotalRssHuge {
m := metricContainerMemoryTotalRssHuge{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalUnevictable struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_unevictable metric with initial data.
func (m *metricContainerMemoryTotalUnevictable) init() {
m.data.SetName("container.memory.total_unevictable")
m.data.SetDescription("The amount of memory that cannot be reclaimed. Includes descendant cgroups (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalUnevictable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalUnevictable) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalUnevictable) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalUnevictable(cfg MetricConfig) metricContainerMemoryTotalUnevictable {
m := metricContainerMemoryTotalUnevictable{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryTotalWriteback struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.total_writeback metric with initial data.
func (m *metricContainerMemoryTotalWriteback) init() {
m.data.SetName("container.memory.total_writeback")
m.data.SetDescription("Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup and descendants (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryTotalWriteback) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryTotalWriteback) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryTotalWriteback) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryTotalWriteback(cfg MetricConfig) metricContainerMemoryTotalWriteback {
m := metricContainerMemoryTotalWriteback{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryUnevictable struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.unevictable metric with initial data.
func (m *metricContainerMemoryUnevictable) init() {
m.data.SetName("container.memory.unevictable")
m.data.SetDescription("The amount of memory that cannot be reclaimed.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryUnevictable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryUnevictable) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryUnevictable) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryUnevictable(cfg MetricConfig) metricContainerMemoryUnevictable {
m := metricContainerMemoryUnevictable{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryUsageLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.usage.limit metric with initial data.
func (m *metricContainerMemoryUsageLimit) init() {
m.data.SetName("container.memory.usage.limit")
m.data.SetDescription("Memory limit of the container.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryUsageLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryUsageLimit) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryUsageLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryUsageLimit(cfg MetricConfig) metricContainerMemoryUsageLimit {
m := metricContainerMemoryUsageLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.usage.max metric with initial data.
func (m *metricContainerMemoryUsageMax) init() {
m.data.SetName("container.memory.usage.max")
m.data.SetDescription("Maximum memory usage.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryUsageMax) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryUsageMax(cfg MetricConfig) metricContainerMemoryUsageMax {
m := metricContainerMemoryUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryUsageTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.usage.total metric with initial data.
func (m *metricContainerMemoryUsageTotal) init() {
m.data.SetName("container.memory.usage.total")
m.data.SetDescription("Memory usage of the container. This excludes the cache.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryUsageTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryUsageTotal) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryUsageTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryUsageTotal(cfg MetricConfig) metricContainerMemoryUsageTotal {
m := metricContainerMemoryUsageTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerMemoryWriteback struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.memory.writeback metric with initial data.
func (m *metricContainerMemoryWriteback) init() {
m.data.SetName("container.memory.writeback")
m.data.SetDescription("Number of bytes of file/anon cache that are queued for syncing to disk in this cgroup (Only available with cgroups v1).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerMemoryWriteback) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerMemoryWriteback) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerMemoryWriteback) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerMemoryWriteback(cfg MetricConfig) metricContainerMemoryWriteback {
m := metricContainerMemoryWriteback{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerNetworkIoUsageRxBytes struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.network.io.usage.rx_bytes metric with initial data.
func (m *metricContainerNetworkIoUsageRxBytes) init() {
m.data.SetName("container.network.io.usage.rx_bytes")
m.data.SetDescription("Bytes received by the container.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerNetworkIoUsageRxBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("interface", interfaceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerNetworkIoUsageRxBytes) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerNetworkIoUsageRxBytes) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerNetworkIoUsageRxBytes(cfg MetricConfig) metricContainerNetworkIoUsageRxBytes {
m := metricContainerNetworkIoUsageRxBytes{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerNetworkIoUsageRxDropped struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.network.io.usage.rx_dropped metric with initial data.
func (m *metricContainerNetworkIoUsageRxDropped) init() {
m.data.SetName("container.network.io.usage.rx_dropped")
m.data.SetDescription("Incoming packets dropped.")
m.data.SetUnit("{packets}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerNetworkIoUsageRxDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("interface", interfaceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerNetworkIoUsageRxDropped) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerNetworkIoUsageRxDropped) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerNetworkIoUsageRxDropped(cfg MetricConfig) metricContainerNetworkIoUsageRxDropped {
m := metricContainerNetworkIoUsageRxDropped{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerNetworkIoUsageRxErrors struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.network.io.usage.rx_errors metric with initial data.
func (m *metricContainerNetworkIoUsageRxErrors) init() {
m.data.SetName("container.network.io.usage.rx_errors")
m.data.SetDescription("Received errors.")
m.data.SetUnit("{errors}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerNetworkIoUsageRxErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("interface", interfaceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerNetworkIoUsageRxErrors) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerNetworkIoUsageRxErrors) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerNetworkIoUsageRxErrors(cfg MetricConfig) metricContainerNetworkIoUsageRxErrors {
m := metricContainerNetworkIoUsageRxErrors{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerNetworkIoUsageRxPackets struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.network.io.usage.rx_packets metric with initial data.
func (m *metricContainerNetworkIoUsageRxPackets) init() {
m.data.SetName("container.network.io.usage.rx_packets")
m.data.SetDescription("Packets received.")
m.data.SetUnit("{packets}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerNetworkIoUsageRxPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("interface", interfaceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerNetworkIoUsageRxPackets) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerNetworkIoUsageRxPackets) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerNetworkIoUsageRxPackets(cfg MetricConfig) metricContainerNetworkIoUsageRxPackets {
m := metricContainerNetworkIoUsageRxPackets{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerNetworkIoUsageTxBytes struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.network.io.usage.tx_bytes metric with initial data.
func (m *metricContainerNetworkIoUsageTxBytes) init() {
m.data.SetName("container.network.io.usage.tx_bytes")
m.data.SetDescription("Bytes sent.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerNetworkIoUsageTxBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("interface", interfaceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerNetworkIoUsageTxBytes) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerNetworkIoUsageTxBytes) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerNetworkIoUsageTxBytes(cfg MetricConfig) metricContainerNetworkIoUsageTxBytes {
m := metricContainerNetworkIoUsageTxBytes{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerNetworkIoUsageTxDropped struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.network.io.usage.tx_dropped metric with initial data.
func (m *metricContainerNetworkIoUsageTxDropped) init() {
m.data.SetName("container.network.io.usage.tx_dropped")
m.data.SetDescription("Outgoing packets dropped.")
m.data.SetUnit("{packets}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerNetworkIoUsageTxDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("interface", interfaceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerNetworkIoUsageTxDropped) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerNetworkIoUsageTxDropped) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerNetworkIoUsageTxDropped(cfg MetricConfig) metricContainerNetworkIoUsageTxDropped {
m := metricContainerNetworkIoUsageTxDropped{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerNetworkIoUsageTxErrors struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.network.io.usage.tx_errors metric with initial data.
func (m *metricContainerNetworkIoUsageTxErrors) init() {
m.data.SetName("container.network.io.usage.tx_errors")
m.data.SetDescription("Sent errors.")
m.data.SetUnit("{errors}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerNetworkIoUsageTxErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("interface", interfaceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerNetworkIoUsageTxErrors) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerNetworkIoUsageTxErrors) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerNetworkIoUsageTxErrors(cfg MetricConfig) metricContainerNetworkIoUsageTxErrors {
m := metricContainerNetworkIoUsageTxErrors{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerNetworkIoUsageTxPackets struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.network.io.usage.tx_packets metric with initial data.
func (m *metricContainerNetworkIoUsageTxPackets) init() {
m.data.SetName("container.network.io.usage.tx_packets")
m.data.SetDescription("Packets sent.")
m.data.SetUnit("{packets}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricContainerNetworkIoUsageTxPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("interface", interfaceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerNetworkIoUsageTxPackets) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerNetworkIoUsageTxPackets) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerNetworkIoUsageTxPackets(cfg MetricConfig) metricContainerNetworkIoUsageTxPackets {
m := metricContainerNetworkIoUsageTxPackets{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerPidsCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.pids.count metric with initial data.
func (m *metricContainerPidsCount) init() {
m.data.SetName("container.pids.count")
m.data.SetDescription("Number of pids in the container's cgroup.")
m.data.SetUnit("{pids}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerPidsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerPidsCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerPidsCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerPidsCount(cfg MetricConfig) metricContainerPidsCount {
m := metricContainerPidsCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerPidsLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.pids.limit metric with initial data.
func (m *metricContainerPidsLimit) init() {
m.data.SetName("container.pids.limit")
m.data.SetDescription("Maximum number of pids in the container's cgroup.")
m.data.SetUnit("{pids}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerPidsLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerPidsLimit) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerPidsLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerPidsLimit(cfg MetricConfig) metricContainerPidsLimit {
m := metricContainerPidsLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerRestarts struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.restarts metric with initial data.
func (m *metricContainerRestarts) init() {
m.data.SetName("container.restarts")
m.data.SetDescription("Number of restarts for the container.")
m.data.SetUnit("{restarts}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricContainerRestarts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerRestarts) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerRestarts) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerRestarts(cfg MetricConfig) metricContainerRestarts {
m := metricContainerRestarts{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricContainerUptime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills container.uptime metric with initial data.
func (m *metricContainerUptime) init() {
m.data.SetName("container.uptime")
m.data.SetDescription("Time elapsed since container start time.")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
}
func (m *metricContainerUptime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricContainerUptime) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricContainerUptime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricContainerUptime(cfg MetricConfig) metricContainerUptime {
m := metricContainerUptime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user config.
type MetricsBuilder struct {
config MetricsBuilderConfig // config of the metrics builder.
startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
metricsCapacity int // maximum observed number of metrics per resource.
metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
buildInfo component.BuildInfo // contains version information.
resourceAttributeIncludeFilter map[string]filter.Filter
resourceAttributeExcludeFilter map[string]filter.Filter
metricContainerBlockioIoMergedRecursive metricContainerBlockioIoMergedRecursive
metricContainerBlockioIoQueuedRecursive metricContainerBlockioIoQueuedRecursive
metricContainerBlockioIoServiceBytesRecursive metricContainerBlockioIoServiceBytesRecursive
metricContainerBlockioIoServiceTimeRecursive metricContainerBlockioIoServiceTimeRecursive
metricContainerBlockioIoServicedRecursive metricContainerBlockioIoServicedRecursive
metricContainerBlockioIoTimeRecursive metricContainerBlockioIoTimeRecursive
metricContainerBlockioIoWaitTimeRecursive metricContainerBlockioIoWaitTimeRecursive
metricContainerBlockioSectorsRecursive metricContainerBlockioSectorsRecursive
metricContainerCPULimit metricContainerCPULimit
metricContainerCPULogicalCount metricContainerCPULogicalCount
metricContainerCPUShares metricContainerCPUShares
metricContainerCPUThrottlingDataPeriods metricContainerCPUThrottlingDataPeriods
metricContainerCPUThrottlingDataThrottledPeriods metricContainerCPUThrottlingDataThrottledPeriods
metricContainerCPUThrottlingDataThrottledTime metricContainerCPUThrottlingDataThrottledTime
metricContainerCPUUsageKernelmode metricContainerCPUUsageKernelmode
metricContainerCPUUsagePercpu metricContainerCPUUsagePercpu
metricContainerCPUUsageSystem metricContainerCPUUsageSystem
metricContainerCPUUsageTotal metricContainerCPUUsageTotal
metricContainerCPUUsageUsermode metricContainerCPUUsageUsermode
metricContainerCPUUtilization metricContainerCPUUtilization
metricContainerMemoryActiveAnon metricContainerMemoryActiveAnon
metricContainerMemoryActiveFile metricContainerMemoryActiveFile
metricContainerMemoryAnon metricContainerMemoryAnon
metricContainerMemoryCache metricContainerMemoryCache
metricContainerMemoryDirty metricContainerMemoryDirty
metricContainerMemoryFails metricContainerMemoryFails
metricContainerMemoryFile metricContainerMemoryFile
metricContainerMemoryHierarchicalMemoryLimit metricContainerMemoryHierarchicalMemoryLimit
metricContainerMemoryHierarchicalMemswLimit metricContainerMemoryHierarchicalMemswLimit
metricContainerMemoryInactiveAnon metricContainerMemoryInactiveAnon
metricContainerMemoryInactiveFile metricContainerMemoryInactiveFile
metricContainerMemoryMappedFile metricContainerMemoryMappedFile
metricContainerMemoryPercent metricContainerMemoryPercent
metricContainerMemoryPgfault metricContainerMemoryPgfault
metricContainerMemoryPgmajfault metricContainerMemoryPgmajfault
metricContainerMemoryPgpgin metricContainerMemoryPgpgin
metricContainerMemoryPgpgout metricContainerMemoryPgpgout
metricContainerMemoryRss metricContainerMemoryRss
metricContainerMemoryRssHuge metricContainerMemoryRssHuge
metricContainerMemoryTotalActiveAnon metricContainerMemoryTotalActiveAnon
metricContainerMemoryTotalActiveFile metricContainerMemoryTotalActiveFile
metricContainerMemoryTotalCache metricContainerMemoryTotalCache
metricContainerMemoryTotalDirty metricContainerMemoryTotalDirty
metricContainerMemoryTotalInactiveAnon metricContainerMemoryTotalInactiveAnon
metricContainerMemoryTotalInactiveFile metricContainerMemoryTotalInactiveFile
metricContainerMemoryTotalMappedFile metricContainerMemoryTotalMappedFile
metricContainerMemoryTotalPgfault metricContainerMemoryTotalPgfault
metricContainerMemoryTotalPgmajfault metricContainerMemoryTotalPgmajfault
metricContainerMemoryTotalPgpgin metricContainerMemoryTotalPgpgin
metricContainerMemoryTotalPgpgout metricContainerMemoryTotalPgpgout
metricContainerMemoryTotalRss metricContainerMemoryTotalRss
metricContainerMemoryTotalRssHuge metricContainerMemoryTotalRssHuge
metricContainerMemoryTotalUnevictable metricContainerMemoryTotalUnevictable
metricContainerMemoryTotalWriteback metricContainerMemoryTotalWriteback
metricContainerMemoryUnevictable metricContainerMemoryUnevictable
metricContainerMemoryUsageLimit metricContainerMemoryUsageLimit
metricContainerMemoryUsageMax metricContainerMemoryUsageMax
metricContainerMemoryUsageTotal metricContainerMemoryUsageTotal
metricContainerMemoryWriteback metricContainerMemoryWriteback
metricContainerNetworkIoUsageRxBytes metricContainerNetworkIoUsageRxBytes
metricContainerNetworkIoUsageRxDropped metricContainerNetworkIoUsageRxDropped
metricContainerNetworkIoUsageRxErrors metricContainerNetworkIoUsageRxErrors
metricContainerNetworkIoUsageRxPackets metricContainerNetworkIoUsageRxPackets
metricContainerNetworkIoUsageTxBytes metricContainerNetworkIoUsageTxBytes
metricContainerNetworkIoUsageTxDropped metricContainerNetworkIoUsageTxDropped
metricContainerNetworkIoUsageTxErrors metricContainerNetworkIoUsageTxErrors
metricContainerNetworkIoUsageTxPackets metricContainerNetworkIoUsageTxPackets
metricContainerPidsCount metricContainerPidsCount
metricContainerPidsLimit metricContainerPidsLimit
metricContainerRestarts metricContainerRestarts
metricContainerUptime metricContainerUptime
}
// MetricBuilderOption applies changes to default metrics builder.
type MetricBuilderOption interface {
apply(*MetricsBuilder)
}
type metricBuilderOptionFunc func(mb *MetricsBuilder)
func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) {
mbof(mb)
}
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
return metricBuilderOptionFunc(func(mb *MetricsBuilder) {
mb.startTime = startTime
})
}
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
config: mbc,
startTime: pcommon.NewTimestampFromTime(time.Now()),
metricsBuffer: pmetric.NewMetrics(),
buildInfo: settings.BuildInfo,
metricContainerBlockioIoMergedRecursive: newMetricContainerBlockioIoMergedRecursive(mbc.Metrics.ContainerBlockioIoMergedRecursive),
metricContainerBlockioIoQueuedRecursive: newMetricContainerBlockioIoQueuedRecursive(mbc.Metrics.ContainerBlockioIoQueuedRecursive),
metricContainerBlockioIoServiceBytesRecursive: newMetricContainerBlockioIoServiceBytesRecursive(mbc.Metrics.ContainerBlockioIoServiceBytesRecursive),
metricContainerBlockioIoServiceTimeRecursive: newMetricContainerBlockioIoServiceTimeRecursive(mbc.Metrics.ContainerBlockioIoServiceTimeRecursive),
metricContainerBlockioIoServicedRecursive: newMetricContainerBlockioIoServicedRecursive(mbc.Metrics.ContainerBlockioIoServicedRecursive),
metricContainerBlockioIoTimeRecursive: newMetricContainerBlockioIoTimeRecursive(mbc.Metrics.ContainerBlockioIoTimeRecursive),
metricContainerBlockioIoWaitTimeRecursive: newMetricContainerBlockioIoWaitTimeRecursive(mbc.Metrics.ContainerBlockioIoWaitTimeRecursive),
metricContainerBlockioSectorsRecursive: newMetricContainerBlockioSectorsRecursive(mbc.Metrics.ContainerBlockioSectorsRecursive),
metricContainerCPULimit: newMetricContainerCPULimit(mbc.Metrics.ContainerCPULimit),
metricContainerCPULogicalCount: newMetricContainerCPULogicalCount(mbc.Metrics.ContainerCPULogicalCount),
metricContainerCPUShares: newMetricContainerCPUShares(mbc.Metrics.ContainerCPUShares),
metricContainerCPUThrottlingDataPeriods: newMetricContainerCPUThrottlingDataPeriods(mbc.Metrics.ContainerCPUThrottlingDataPeriods),
metricContainerCPUThrottlingDataThrottledPeriods: newMetricContainerCPUThrottlingDataThrottledPeriods(mbc.Metrics.ContainerCPUThrottlingDataThrottledPeriods),
metricContainerCPUThrottlingDataThrottledTime: newMetricContainerCPUThrottlingDataThrottledTime(mbc.Metrics.ContainerCPUThrottlingDataThrottledTime),
metricContainerCPUUsageKernelmode: newMetricContainerCPUUsageKernelmode(mbc.Metrics.ContainerCPUUsageKernelmode),
metricContainerCPUUsagePercpu: newMetricContainerCPUUsagePercpu(mbc.Metrics.ContainerCPUUsagePercpu),
metricContainerCPUUsageSystem: newMetricContainerCPUUsageSystem(mbc.Metrics.ContainerCPUUsageSystem),
metricContainerCPUUsageTotal: newMetricContainerCPUUsageTotal(mbc.Metrics.ContainerCPUUsageTotal),
metricContainerCPUUsageUsermode: newMetricContainerCPUUsageUsermode(mbc.Metrics.ContainerCPUUsageUsermode),
metricContainerCPUUtilization: newMetricContainerCPUUtilization(mbc.Metrics.ContainerCPUUtilization),
metricContainerMemoryActiveAnon: newMetricContainerMemoryActiveAnon(mbc.Metrics.ContainerMemoryActiveAnon),
metricContainerMemoryActiveFile: newMetricContainerMemoryActiveFile(mbc.Metrics.ContainerMemoryActiveFile),
metricContainerMemoryAnon: newMetricContainerMemoryAnon(mbc.Metrics.ContainerMemoryAnon),
metricContainerMemoryCache: newMetricContainerMemoryCache(mbc.Metrics.ContainerMemoryCache),
metricContainerMemoryDirty: newMetricContainerMemoryDirty(mbc.Metrics.ContainerMemoryDirty),
metricContainerMemoryFails: newMetricContainerMemoryFails(mbc.Metrics.ContainerMemoryFails),
metricContainerMemoryFile: newMetricContainerMemoryFile(mbc.Metrics.ContainerMemoryFile),
metricContainerMemoryHierarchicalMemoryLimit: newMetricContainerMemoryHierarchicalMemoryLimit(mbc.Metrics.ContainerMemoryHierarchicalMemoryLimit),
metricContainerMemoryHierarchicalMemswLimit: newMetricContainerMemoryHierarchicalMemswLimit(mbc.Metrics.ContainerMemoryHierarchicalMemswLimit),
metricContainerMemoryInactiveAnon: newMetricContainerMemoryInactiveAnon(mbc.Metrics.ContainerMemoryInactiveAnon),
metricContainerMemoryInactiveFile: newMetricContainerMemoryInactiveFile(mbc.Metrics.ContainerMemoryInactiveFile),
metricContainerMemoryMappedFile: newMetricContainerMemoryMappedFile(mbc.Metrics.ContainerMemoryMappedFile),
metricContainerMemoryPercent: newMetricContainerMemoryPercent(mbc.Metrics.ContainerMemoryPercent),
metricContainerMemoryPgfault: newMetricContainerMemoryPgfault(mbc.Metrics.ContainerMemoryPgfault),
metricContainerMemoryPgmajfault: newMetricContainerMemoryPgmajfault(mbc.Metrics.ContainerMemoryPgmajfault),
metricContainerMemoryPgpgin: newMetricContainerMemoryPgpgin(mbc.Metrics.ContainerMemoryPgpgin),
metricContainerMemoryPgpgout: newMetricContainerMemoryPgpgout(mbc.Metrics.ContainerMemoryPgpgout),
metricContainerMemoryRss: newMetricContainerMemoryRss(mbc.Metrics.ContainerMemoryRss),
metricContainerMemoryRssHuge: newMetricContainerMemoryRssHuge(mbc.Metrics.ContainerMemoryRssHuge),
metricContainerMemoryTotalActiveAnon: newMetricContainerMemoryTotalActiveAnon(mbc.Metrics.ContainerMemoryTotalActiveAnon),
metricContainerMemoryTotalActiveFile: newMetricContainerMemoryTotalActiveFile(mbc.Metrics.ContainerMemoryTotalActiveFile),
metricContainerMemoryTotalCache: newMetricContainerMemoryTotalCache(mbc.Metrics.ContainerMemoryTotalCache),
metricContainerMemoryTotalDirty: newMetricContainerMemoryTotalDirty(mbc.Metrics.ContainerMemoryTotalDirty),
metricContainerMemoryTotalInactiveAnon: newMetricContainerMemoryTotalInactiveAnon(mbc.Metrics.ContainerMemoryTotalInactiveAnon),
metricContainerMemoryTotalInactiveFile: newMetricContainerMemoryTotalInactiveFile(mbc.Metrics.ContainerMemoryTotalInactiveFile),
metricContainerMemoryTotalMappedFile: newMetricContainerMemoryTotalMappedFile(mbc.Metrics.ContainerMemoryTotalMappedFile),
metricContainerMemoryTotalPgfault: newMetricContainerMemoryTotalPgfault(mbc.Metrics.ContainerMemoryTotalPgfault),
metricContainerMemoryTotalPgmajfault: newMetricContainerMemoryTotalPgmajfault(mbc.Metrics.ContainerMemoryTotalPgmajfault),
metricContainerMemoryTotalPgpgin: newMetricContainerMemoryTotalPgpgin(mbc.Metrics.ContainerMemoryTotalPgpgin),
metricContainerMemoryTotalPgpgout: newMetricContainerMemoryTotalPgpgout(mbc.Metrics.ContainerMemoryTotalPgpgout),
metricContainerMemoryTotalRss: newMetricContainerMemoryTotalRss(mbc.Metrics.ContainerMemoryTotalRss),
metricContainerMemoryTotalRssHuge: newMetricContainerMemoryTotalRssHuge(mbc.Metrics.ContainerMemoryTotalRssHuge),
metricContainerMemoryTotalUnevictable: newMetricContainerMemoryTotalUnevictable(mbc.Metrics.ContainerMemoryTotalUnevictable),
metricContainerMemoryTotalWriteback: newMetricContainerMemoryTotalWriteback(mbc.Metrics.ContainerMemoryTotalWriteback),
metricContainerMemoryUnevictable: newMetricContainerMemoryUnevictable(mbc.Metrics.ContainerMemoryUnevictable),
metricContainerMemoryUsageLimit: newMetricContainerMemoryUsageLimit(mbc.Metrics.ContainerMemoryUsageLimit),
metricContainerMemoryUsageMax: newMetricContainerMemoryUsageMax(mbc.Metrics.ContainerMemoryUsageMax),
metricContainerMemoryUsageTotal: newMetricContainerMemoryUsageTotal(mbc.Metrics.ContainerMemoryUsageTotal),
metricContainerMemoryWriteback: newMetricContainerMemoryWriteback(mbc.Metrics.ContainerMemoryWriteback),
metricContainerNetworkIoUsageRxBytes: newMetricContainerNetworkIoUsageRxBytes(mbc.Metrics.ContainerNetworkIoUsageRxBytes),
metricContainerNetworkIoUsageRxDropped: newMetricContainerNetworkIoUsageRxDropped(mbc.Metrics.ContainerNetworkIoUsageRxDropped),
metricContainerNetworkIoUsageRxErrors: newMetricContainerNetworkIoUsageRxErrors(mbc.Metrics.ContainerNetworkIoUsageRxErrors),
metricContainerNetworkIoUsageRxPackets: newMetricContainerNetworkIoUsageRxPackets(mbc.Metrics.ContainerNetworkIoUsageRxPackets),
metricContainerNetworkIoUsageTxBytes: newMetricContainerNetworkIoUsageTxBytes(mbc.Metrics.ContainerNetworkIoUsageTxBytes),
metricContainerNetworkIoUsageTxDropped: newMetricContainerNetworkIoUsageTxDropped(mbc.Metrics.ContainerNetworkIoUsageTxDropped),
metricContainerNetworkIoUsageTxErrors: newMetricContainerNetworkIoUsageTxErrors(mbc.Metrics.ContainerNetworkIoUsageTxErrors),
metricContainerNetworkIoUsageTxPackets: newMetricContainerNetworkIoUsageTxPackets(mbc.Metrics.ContainerNetworkIoUsageTxPackets),
metricContainerPidsCount: newMetricContainerPidsCount(mbc.Metrics.ContainerPidsCount),
metricContainerPidsLimit: newMetricContainerPidsLimit(mbc.Metrics.ContainerPidsLimit),
metricContainerRestarts: newMetricContainerRestarts(mbc.Metrics.ContainerRestarts),
metricContainerUptime: newMetricContainerUptime(mbc.Metrics.ContainerUptime),
resourceAttributeIncludeFilter: make(map[string]filter.Filter),
resourceAttributeExcludeFilter: make(map[string]filter.Filter),
}
if mbc.ResourceAttributes.ContainerCommandLine.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.command_line"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerCommandLine.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerCommandLine.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.command_line"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerCommandLine.MetricsExclude)
}
if mbc.ResourceAttributes.ContainerHostname.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.hostname"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerHostname.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerHostname.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.hostname"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerHostname.MetricsExclude)
}
if mbc.ResourceAttributes.ContainerID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerID.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerID.MetricsExclude)
}
if mbc.ResourceAttributes.ContainerImageID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.image.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageID.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerImageID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.image.id"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageID.MetricsExclude)
}
if mbc.ResourceAttributes.ContainerImageName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.image.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageName.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerImageName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.image.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerImageName.MetricsExclude)
}
if mbc.ResourceAttributes.ContainerName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerName.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.name"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerName.MetricsExclude)
}
if mbc.ResourceAttributes.ContainerRuntime.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["container.runtime"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerRuntime.MetricsInclude)
}
if mbc.ResourceAttributes.ContainerRuntime.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["container.runtime"] = filter.CreateFilter(mbc.ResourceAttributes.ContainerRuntime.MetricsExclude)
}
for _, op := range options {
op.apply(mb)
}
return mb
}
// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
return NewResourceBuilder(mb.config.ResourceAttributes)
}
// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
}
}
// ResourceMetricsOption applies changes to provided resource metrics.
type ResourceMetricsOption interface {
apply(pmetric.ResourceMetrics)
}
type resourceMetricsOptionFunc func(pmetric.ResourceMetrics)
func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) {
rmof(rm)
}
// WithResource sets the provided resource on the emitted ResourceMetrics.
// It's recommended to use ResourceBuilder to create the resource.
func WithResource(res pcommon.Resource) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
res.CopyTo(rm.Resource())
})
}
// WithStartTimeOverride overrides start time for all the resource metrics data points.
// This option should be only used if different start time has to be set on metrics coming from different resources.
func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
var dps pmetric.NumberDataPointSlice
metrics := rm.ScopeMetrics().At(0).Metrics()
for i := 0; i < metrics.Len(); i++ {
switch metrics.At(i).Type() {
case pmetric.MetricTypeGauge:
dps = metrics.At(i).Gauge().DataPoints()
case pmetric.MetricTypeSum:
dps = metrics.At(i).Sum().DataPoints()
}
for j := 0; j < dps.Len(); j++ {
dps.At(j).SetStartTimestamp(start)
}
}
})
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceMetricsOption arguments.
func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
rm.SetSchemaUrl(conventions.SchemaURL)
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName(ScopeName)
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricContainerBlockioIoMergedRecursive.emit(ils.Metrics())
mb.metricContainerBlockioIoQueuedRecursive.emit(ils.Metrics())
mb.metricContainerBlockioIoServiceBytesRecursive.emit(ils.Metrics())
mb.metricContainerBlockioIoServiceTimeRecursive.emit(ils.Metrics())
mb.metricContainerBlockioIoServicedRecursive.emit(ils.Metrics())
mb.metricContainerBlockioIoTimeRecursive.emit(ils.Metrics())
mb.metricContainerBlockioIoWaitTimeRecursive.emit(ils.Metrics())
mb.metricContainerBlockioSectorsRecursive.emit(ils.Metrics())
mb.metricContainerCPULimit.emit(ils.Metrics())
mb.metricContainerCPULogicalCount.emit(ils.Metrics())
mb.metricContainerCPUShares.emit(ils.Metrics())
mb.metricContainerCPUThrottlingDataPeriods.emit(ils.Metrics())
mb.metricContainerCPUThrottlingDataThrottledPeriods.emit(ils.Metrics())
mb.metricContainerCPUThrottlingDataThrottledTime.emit(ils.Metrics())
mb.metricContainerCPUUsageKernelmode.emit(ils.Metrics())
mb.metricContainerCPUUsagePercpu.emit(ils.Metrics())
mb.metricContainerCPUUsageSystem.emit(ils.Metrics())
mb.metricContainerCPUUsageTotal.emit(ils.Metrics())
mb.metricContainerCPUUsageUsermode.emit(ils.Metrics())
mb.metricContainerCPUUtilization.emit(ils.Metrics())
mb.metricContainerMemoryActiveAnon.emit(ils.Metrics())
mb.metricContainerMemoryActiveFile.emit(ils.Metrics())
mb.metricContainerMemoryAnon.emit(ils.Metrics())
mb.metricContainerMemoryCache.emit(ils.Metrics())
mb.metricContainerMemoryDirty.emit(ils.Metrics())
mb.metricContainerMemoryFails.emit(ils.Metrics())
mb.metricContainerMemoryFile.emit(ils.Metrics())
mb.metricContainerMemoryHierarchicalMemoryLimit.emit(ils.Metrics())
mb.metricContainerMemoryHierarchicalMemswLimit.emit(ils.Metrics())
mb.metricContainerMemoryInactiveAnon.emit(ils.Metrics())
mb.metricContainerMemoryInactiveFile.emit(ils.Metrics())
mb.metricContainerMemoryMappedFile.emit(ils.Metrics())
mb.metricContainerMemoryPercent.emit(ils.Metrics())
mb.metricContainerMemoryPgfault.emit(ils.Metrics())
mb.metricContainerMemoryPgmajfault.emit(ils.Metrics())
mb.metricContainerMemoryPgpgin.emit(ils.Metrics())
mb.metricContainerMemoryPgpgout.emit(ils.Metrics())
mb.metricContainerMemoryRss.emit(ils.Metrics())
mb.metricContainerMemoryRssHuge.emit(ils.Metrics())
mb.metricContainerMemoryTotalActiveAnon.emit(ils.Metrics())
mb.metricContainerMemoryTotalActiveFile.emit(ils.Metrics())
mb.metricContainerMemoryTotalCache.emit(ils.Metrics())
mb.metricContainerMemoryTotalDirty.emit(ils.Metrics())
mb.metricContainerMemoryTotalInactiveAnon.emit(ils.Metrics())
mb.metricContainerMemoryTotalInactiveFile.emit(ils.Metrics())
mb.metricContainerMemoryTotalMappedFile.emit(ils.Metrics())
mb.metricContainerMemoryTotalPgfault.emit(ils.Metrics())
mb.metricContainerMemoryTotalPgmajfault.emit(ils.Metrics())
mb.metricContainerMemoryTotalPgpgin.emit(ils.Metrics())
mb.metricContainerMemoryTotalPgpgout.emit(ils.Metrics())
mb.metricContainerMemoryTotalRss.emit(ils.Metrics())
mb.metricContainerMemoryTotalRssHuge.emit(ils.Metrics())
mb.metricContainerMemoryTotalUnevictable.emit(ils.Metrics())
mb.metricContainerMemoryTotalWriteback.emit(ils.Metrics())
mb.metricContainerMemoryUnevictable.emit(ils.Metrics())
mb.metricContainerMemoryUsageLimit.emit(ils.Metrics())
mb.metricContainerMemoryUsageMax.emit(ils.Metrics())
mb.metricContainerMemoryUsageTotal.emit(ils.Metrics())
mb.metricContainerMemoryWriteback.emit(ils.Metrics())
mb.metricContainerNetworkIoUsageRxBytes.emit(ils.Metrics())
mb.metricContainerNetworkIoUsageRxDropped.emit(ils.Metrics())
mb.metricContainerNetworkIoUsageRxErrors.emit(ils.Metrics())
mb.metricContainerNetworkIoUsageRxPackets.emit(ils.Metrics())
mb.metricContainerNetworkIoUsageTxBytes.emit(ils.Metrics())
mb.metricContainerNetworkIoUsageTxDropped.emit(ils.Metrics())
mb.metricContainerNetworkIoUsageTxErrors.emit(ils.Metrics())
mb.metricContainerNetworkIoUsageTxPackets.emit(ils.Metrics())
mb.metricContainerPidsCount.emit(ils.Metrics())
mb.metricContainerPidsLimit.emit(ils.Metrics())
mb.metricContainerRestarts.emit(ils.Metrics())
mb.metricContainerUptime.emit(ils.Metrics())
for _, op := range options {
op.apply(rm)
}
for attr, filter := range mb.resourceAttributeIncludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) {
return
}
}
for attr, filter := range mb.resourceAttributeExcludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) {
return
}
}
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
}
}
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user config, e.g. delta or cumulative.
func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics {
mb.EmitForResource(options...)
metrics := mb.metricsBuffer
mb.metricsBuffer = pmetric.NewMetrics()
return metrics
}
// RecordContainerBlockioIoMergedRecursiveDataPoint adds a data point to container.blockio.io_merged_recursive metric.
func (mb *MetricsBuilder) RecordContainerBlockioIoMergedRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
mb.metricContainerBlockioIoMergedRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue)
}
// RecordContainerBlockioIoQueuedRecursiveDataPoint adds a data point to container.blockio.io_queued_recursive metric.
func (mb *MetricsBuilder) RecordContainerBlockioIoQueuedRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
mb.metricContainerBlockioIoQueuedRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue)
}
// RecordContainerBlockioIoServiceBytesRecursiveDataPoint adds a data point to container.blockio.io_service_bytes_recursive metric.
func (mb *MetricsBuilder) RecordContainerBlockioIoServiceBytesRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
mb.metricContainerBlockioIoServiceBytesRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue)
}
// RecordContainerBlockioIoServiceTimeRecursiveDataPoint adds a data point to container.blockio.io_service_time_recursive metric.
func (mb *MetricsBuilder) RecordContainerBlockioIoServiceTimeRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
mb.metricContainerBlockioIoServiceTimeRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue)
}
// RecordContainerBlockioIoServicedRecursiveDataPoint adds a data point to container.blockio.io_serviced_recursive metric.
func (mb *MetricsBuilder) RecordContainerBlockioIoServicedRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
mb.metricContainerBlockioIoServicedRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue)
}
// RecordContainerBlockioIoTimeRecursiveDataPoint adds a data point to container.blockio.io_time_recursive metric.
func (mb *MetricsBuilder) RecordContainerBlockioIoTimeRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
mb.metricContainerBlockioIoTimeRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue)
}
// RecordContainerBlockioIoWaitTimeRecursiveDataPoint adds a data point to container.blockio.io_wait_time_recursive metric.
func (mb *MetricsBuilder) RecordContainerBlockioIoWaitTimeRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
mb.metricContainerBlockioIoWaitTimeRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue)
}
// RecordContainerBlockioSectorsRecursiveDataPoint adds a data point to container.blockio.sectors_recursive metric.
func (mb *MetricsBuilder) RecordContainerBlockioSectorsRecursiveDataPoint(ts pcommon.Timestamp, val int64, deviceMajorAttributeValue string, deviceMinorAttributeValue string, operationAttributeValue string) {
mb.metricContainerBlockioSectorsRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue)
}
// RecordContainerCPULimitDataPoint adds a data point to container.cpu.limit metric.
func (mb *MetricsBuilder) RecordContainerCPULimitDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricContainerCPULimit.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerCPULogicalCountDataPoint adds a data point to container.cpu.logical.count metric.
func (mb *MetricsBuilder) RecordContainerCPULogicalCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerCPULogicalCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerCPUSharesDataPoint adds a data point to container.cpu.shares metric.
func (mb *MetricsBuilder) RecordContainerCPUSharesDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerCPUShares.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerCPUThrottlingDataPeriodsDataPoint adds a data point to container.cpu.throttling_data.periods metric.
func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataPeriodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerCPUThrottlingDataPeriods.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint adds a data point to container.cpu.throttling_data.throttled_periods metric.
func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataThrottledPeriodsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerCPUThrottlingDataThrottledPeriods.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerCPUThrottlingDataThrottledTimeDataPoint adds a data point to container.cpu.throttling_data.throttled_time metric.
func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataThrottledTimeDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerCPUThrottlingDataThrottledTime.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerCPUUsageKernelmodeDataPoint adds a data point to container.cpu.usage.kernelmode metric.
func (mb *MetricsBuilder) RecordContainerCPUUsageKernelmodeDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerCPUUsageKernelmode.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerCPUUsagePercpuDataPoint adds a data point to container.cpu.usage.percpu metric.
func (mb *MetricsBuilder) RecordContainerCPUUsagePercpuDataPoint(ts pcommon.Timestamp, val int64, coreAttributeValue string) {
mb.metricContainerCPUUsagePercpu.recordDataPoint(mb.startTime, ts, val, coreAttributeValue)
}
// RecordContainerCPUUsageSystemDataPoint adds a data point to container.cpu.usage.system metric.
func (mb *MetricsBuilder) RecordContainerCPUUsageSystemDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerCPUUsageSystem.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerCPUUsageTotalDataPoint adds a data point to container.cpu.usage.total metric.
func (mb *MetricsBuilder) RecordContainerCPUUsageTotalDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerCPUUsageTotal.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerCPUUsageUsermodeDataPoint adds a data point to container.cpu.usage.usermode metric.
func (mb *MetricsBuilder) RecordContainerCPUUsageUsermodeDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerCPUUsageUsermode.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerCPUUtilizationDataPoint adds a data point to container.cpu.utilization metric.
func (mb *MetricsBuilder) RecordContainerCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricContainerCPUUtilization.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryActiveAnonDataPoint adds a data point to container.memory.active_anon metric.
func (mb *MetricsBuilder) RecordContainerMemoryActiveAnonDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryActiveAnon.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryActiveFileDataPoint adds a data point to container.memory.active_file metric.
func (mb *MetricsBuilder) RecordContainerMemoryActiveFileDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryActiveFile.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryAnonDataPoint adds a data point to container.memory.anon metric.
func (mb *MetricsBuilder) RecordContainerMemoryAnonDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryAnon.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryCacheDataPoint adds a data point to container.memory.cache metric.
func (mb *MetricsBuilder) RecordContainerMemoryCacheDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryCache.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryDirtyDataPoint adds a data point to container.memory.dirty metric.
func (mb *MetricsBuilder) RecordContainerMemoryDirtyDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryDirty.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryFailsDataPoint adds a data point to container.memory.fails metric.
func (mb *MetricsBuilder) RecordContainerMemoryFailsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryFails.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryFileDataPoint adds a data point to container.memory.file metric.
func (mb *MetricsBuilder) RecordContainerMemoryFileDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryFile.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryHierarchicalMemoryLimitDataPoint adds a data point to container.memory.hierarchical_memory_limit metric.
func (mb *MetricsBuilder) RecordContainerMemoryHierarchicalMemoryLimitDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryHierarchicalMemoryLimit.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryHierarchicalMemswLimitDataPoint adds a data point to container.memory.hierarchical_memsw_limit metric.
func (mb *MetricsBuilder) RecordContainerMemoryHierarchicalMemswLimitDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryHierarchicalMemswLimit.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryInactiveAnonDataPoint adds a data point to container.memory.inactive_anon metric.
func (mb *MetricsBuilder) RecordContainerMemoryInactiveAnonDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryInactiveAnon.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryInactiveFileDataPoint adds a data point to container.memory.inactive_file metric.
func (mb *MetricsBuilder) RecordContainerMemoryInactiveFileDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryInactiveFile.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryMappedFileDataPoint adds a data point to container.memory.mapped_file metric.
func (mb *MetricsBuilder) RecordContainerMemoryMappedFileDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryMappedFile.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryPercentDataPoint adds a data point to container.memory.percent metric.
func (mb *MetricsBuilder) RecordContainerMemoryPercentDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricContainerMemoryPercent.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryPgfaultDataPoint adds a data point to container.memory.pgfault metric.
func (mb *MetricsBuilder) RecordContainerMemoryPgfaultDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryPgfault.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryPgmajfaultDataPoint adds a data point to container.memory.pgmajfault metric.
func (mb *MetricsBuilder) RecordContainerMemoryPgmajfaultDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryPgmajfault.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryPgpginDataPoint adds a data point to container.memory.pgpgin metric.
func (mb *MetricsBuilder) RecordContainerMemoryPgpginDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryPgpgin.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryPgpgoutDataPoint adds a data point to container.memory.pgpgout metric.
func (mb *MetricsBuilder) RecordContainerMemoryPgpgoutDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryPgpgout.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryRssDataPoint adds a data point to container.memory.rss metric.
func (mb *MetricsBuilder) RecordContainerMemoryRssDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryRss.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryRssHugeDataPoint adds a data point to container.memory.rss_huge metric.
func (mb *MetricsBuilder) RecordContainerMemoryRssHugeDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryRssHuge.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalActiveAnonDataPoint adds a data point to container.memory.total_active_anon metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalActiveAnonDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalActiveAnon.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalActiveFileDataPoint adds a data point to container.memory.total_active_file metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalActiveFileDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalActiveFile.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalCacheDataPoint adds a data point to container.memory.total_cache metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalCacheDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalCache.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalDirtyDataPoint adds a data point to container.memory.total_dirty metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalDirtyDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalDirty.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalInactiveAnonDataPoint adds a data point to container.memory.total_inactive_anon metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalInactiveAnonDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalInactiveAnon.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalInactiveFileDataPoint adds a data point to container.memory.total_inactive_file metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalInactiveFileDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalInactiveFile.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalMappedFileDataPoint adds a data point to container.memory.total_mapped_file metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalMappedFileDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalMappedFile.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalPgfaultDataPoint adds a data point to container.memory.total_pgfault metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalPgfaultDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalPgfault.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalPgmajfaultDataPoint adds a data point to container.memory.total_pgmajfault metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalPgmajfaultDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalPgmajfault.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalPgpginDataPoint adds a data point to container.memory.total_pgpgin metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalPgpginDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalPgpgin.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalPgpgoutDataPoint adds a data point to container.memory.total_pgpgout metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalPgpgoutDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalPgpgout.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalRssDataPoint adds a data point to container.memory.total_rss metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalRssDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalRss.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalRssHugeDataPoint adds a data point to container.memory.total_rss_huge metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalRssHugeDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalRssHuge.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalUnevictableDataPoint adds a data point to container.memory.total_unevictable metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalUnevictableDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalUnevictable.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryTotalWritebackDataPoint adds a data point to container.memory.total_writeback metric.
func (mb *MetricsBuilder) RecordContainerMemoryTotalWritebackDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryTotalWriteback.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryUnevictableDataPoint adds a data point to container.memory.unevictable metric.
func (mb *MetricsBuilder) RecordContainerMemoryUnevictableDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryUnevictable.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryUsageLimitDataPoint adds a data point to container.memory.usage.limit metric.
func (mb *MetricsBuilder) RecordContainerMemoryUsageLimitDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryUsageLimit.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryUsageMaxDataPoint adds a data point to container.memory.usage.max metric.
func (mb *MetricsBuilder) RecordContainerMemoryUsageMaxDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryUsageMax.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryUsageTotalDataPoint adds a data point to container.memory.usage.total metric.
func (mb *MetricsBuilder) RecordContainerMemoryUsageTotalDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryUsageTotal.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerMemoryWritebackDataPoint adds a data point to container.memory.writeback metric.
func (mb *MetricsBuilder) RecordContainerMemoryWritebackDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerMemoryWriteback.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerNetworkIoUsageRxBytesDataPoint adds a data point to container.network.io.usage.rx_bytes metric.
func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxBytesDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
mb.metricContainerNetworkIoUsageRxBytes.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
}
// RecordContainerNetworkIoUsageRxDroppedDataPoint adds a data point to container.network.io.usage.rx_dropped metric.
func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxDroppedDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
mb.metricContainerNetworkIoUsageRxDropped.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
}
// RecordContainerNetworkIoUsageRxErrorsDataPoint adds a data point to container.network.io.usage.rx_errors metric.
func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
mb.metricContainerNetworkIoUsageRxErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
}
// RecordContainerNetworkIoUsageRxPacketsDataPoint adds a data point to container.network.io.usage.rx_packets metric.
func (mb *MetricsBuilder) RecordContainerNetworkIoUsageRxPacketsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
mb.metricContainerNetworkIoUsageRxPackets.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
}
// RecordContainerNetworkIoUsageTxBytesDataPoint adds a data point to container.network.io.usage.tx_bytes metric.
func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxBytesDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
mb.metricContainerNetworkIoUsageTxBytes.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
}
// RecordContainerNetworkIoUsageTxDroppedDataPoint adds a data point to container.network.io.usage.tx_dropped metric.
func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxDroppedDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
mb.metricContainerNetworkIoUsageTxDropped.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
}
// RecordContainerNetworkIoUsageTxErrorsDataPoint adds a data point to container.network.io.usage.tx_errors metric.
func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxErrorsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
mb.metricContainerNetworkIoUsageTxErrors.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
}
// RecordContainerNetworkIoUsageTxPacketsDataPoint adds a data point to container.network.io.usage.tx_packets metric.
func (mb *MetricsBuilder) RecordContainerNetworkIoUsageTxPacketsDataPoint(ts pcommon.Timestamp, val int64, interfaceAttributeValue string) {
mb.metricContainerNetworkIoUsageTxPackets.recordDataPoint(mb.startTime, ts, val, interfaceAttributeValue)
}
// RecordContainerPidsCountDataPoint adds a data point to container.pids.count metric.
func (mb *MetricsBuilder) RecordContainerPidsCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerPidsCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerPidsLimitDataPoint adds a data point to container.pids.limit metric.
func (mb *MetricsBuilder) RecordContainerPidsLimitDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerPidsLimit.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerRestartsDataPoint adds a data point to container.restarts metric.
func (mb *MetricsBuilder) RecordContainerRestartsDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricContainerRestarts.recordDataPoint(mb.startTime, ts, val)
}
// RecordContainerUptimeDataPoint adds a data point to container.uptime metric.
func (mb *MetricsBuilder) RecordContainerUptimeDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricContainerUptime.recordDataPoint(mb.startTime, ts, val)
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) {
mb.startTime = pcommon.NewTimestampFromTime(time.Now())
for _, op := range options {
op.apply(mb)
}
}