receiver/bigipreceiver/internal/metadata/generated_metrics.go (1,668 lines of code) (raw):
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/filter"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
)
// AttributeActiveStatus specifies the value active.status attribute.
type AttributeActiveStatus int
const (
_ AttributeActiveStatus = iota
AttributeActiveStatusActive
AttributeActiveStatusInactive
)
// String returns the string representation of the AttributeActiveStatus.
func (av AttributeActiveStatus) String() string {
switch av {
case AttributeActiveStatusActive:
return "active"
case AttributeActiveStatusInactive:
return "inactive"
}
return ""
}
// MapAttributeActiveStatus is a helper map of string to AttributeActiveStatus attribute value.
var MapAttributeActiveStatus = map[string]AttributeActiveStatus{
"active": AttributeActiveStatusActive,
"inactive": AttributeActiveStatusInactive,
}
// AttributeAvailabilityStatus specifies the value availability.status attribute.
type AttributeAvailabilityStatus int
const (
_ AttributeAvailabilityStatus = iota
AttributeAvailabilityStatusOffline
AttributeAvailabilityStatusUnknown
AttributeAvailabilityStatusAvailable
)
// String returns the string representation of the AttributeAvailabilityStatus.
func (av AttributeAvailabilityStatus) String() string {
switch av {
case AttributeAvailabilityStatusOffline:
return "offline"
case AttributeAvailabilityStatusUnknown:
return "unknown"
case AttributeAvailabilityStatusAvailable:
return "available"
}
return ""
}
// MapAttributeAvailabilityStatus is a helper map of string to AttributeAvailabilityStatus attribute value.
var MapAttributeAvailabilityStatus = map[string]AttributeAvailabilityStatus{
"offline": AttributeAvailabilityStatusOffline,
"unknown": AttributeAvailabilityStatusUnknown,
"available": AttributeAvailabilityStatusAvailable,
}
// AttributeDirection specifies the value direction attribute.
type AttributeDirection int
const (
_ AttributeDirection = iota
AttributeDirectionSent
AttributeDirectionReceived
)
// String returns the string representation of the AttributeDirection.
func (av AttributeDirection) String() string {
switch av {
case AttributeDirectionSent:
return "sent"
case AttributeDirectionReceived:
return "received"
}
return ""
}
// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
var MapAttributeDirection = map[string]AttributeDirection{
"sent": AttributeDirectionSent,
"received": AttributeDirectionReceived,
}
// AttributeEnabledStatus specifies the value enabled.status attribute.
type AttributeEnabledStatus int
const (
_ AttributeEnabledStatus = iota
AttributeEnabledStatusDisabled
AttributeEnabledStatusEnabled
)
// String returns the string representation of the AttributeEnabledStatus.
func (av AttributeEnabledStatus) String() string {
switch av {
case AttributeEnabledStatusDisabled:
return "disabled"
case AttributeEnabledStatusEnabled:
return "enabled"
}
return ""
}
// MapAttributeEnabledStatus is a helper map of string to AttributeEnabledStatus attribute value.
var MapAttributeEnabledStatus = map[string]AttributeEnabledStatus{
"disabled": AttributeEnabledStatusDisabled,
"enabled": AttributeEnabledStatusEnabled,
}
var MetricsInfo = metricsInfo{
BigipNodeAvailability: metricInfo{
Name: "bigip.node.availability",
},
BigipNodeConnectionCount: metricInfo{
Name: "bigip.node.connection.count",
},
BigipNodeDataTransmitted: metricInfo{
Name: "bigip.node.data.transmitted",
},
BigipNodeEnabled: metricInfo{
Name: "bigip.node.enabled",
},
BigipNodePacketCount: metricInfo{
Name: "bigip.node.packet.count",
},
BigipNodeRequestCount: metricInfo{
Name: "bigip.node.request.count",
},
BigipNodeSessionCount: metricInfo{
Name: "bigip.node.session.count",
},
BigipPoolAvailability: metricInfo{
Name: "bigip.pool.availability",
},
BigipPoolConnectionCount: metricInfo{
Name: "bigip.pool.connection.count",
},
BigipPoolDataTransmitted: metricInfo{
Name: "bigip.pool.data.transmitted",
},
BigipPoolEnabled: metricInfo{
Name: "bigip.pool.enabled",
},
BigipPoolMemberCount: metricInfo{
Name: "bigip.pool.member.count",
},
BigipPoolPacketCount: metricInfo{
Name: "bigip.pool.packet.count",
},
BigipPoolRequestCount: metricInfo{
Name: "bigip.pool.request.count",
},
BigipPoolMemberAvailability: metricInfo{
Name: "bigip.pool_member.availability",
},
BigipPoolMemberConnectionCount: metricInfo{
Name: "bigip.pool_member.connection.count",
},
BigipPoolMemberDataTransmitted: metricInfo{
Name: "bigip.pool_member.data.transmitted",
},
BigipPoolMemberEnabled: metricInfo{
Name: "bigip.pool_member.enabled",
},
BigipPoolMemberPacketCount: metricInfo{
Name: "bigip.pool_member.packet.count",
},
BigipPoolMemberRequestCount: metricInfo{
Name: "bigip.pool_member.request.count",
},
BigipPoolMemberSessionCount: metricInfo{
Name: "bigip.pool_member.session.count",
},
BigipVirtualServerAvailability: metricInfo{
Name: "bigip.virtual_server.availability",
},
BigipVirtualServerConnectionCount: metricInfo{
Name: "bigip.virtual_server.connection.count",
},
BigipVirtualServerDataTransmitted: metricInfo{
Name: "bigip.virtual_server.data.transmitted",
},
BigipVirtualServerEnabled: metricInfo{
Name: "bigip.virtual_server.enabled",
},
BigipVirtualServerPacketCount: metricInfo{
Name: "bigip.virtual_server.packet.count",
},
BigipVirtualServerRequestCount: metricInfo{
Name: "bigip.virtual_server.request.count",
},
}
type metricsInfo struct {
BigipNodeAvailability metricInfo
BigipNodeConnectionCount metricInfo
BigipNodeDataTransmitted metricInfo
BigipNodeEnabled metricInfo
BigipNodePacketCount metricInfo
BigipNodeRequestCount metricInfo
BigipNodeSessionCount metricInfo
BigipPoolAvailability metricInfo
BigipPoolConnectionCount metricInfo
BigipPoolDataTransmitted metricInfo
BigipPoolEnabled metricInfo
BigipPoolMemberCount metricInfo
BigipPoolPacketCount metricInfo
BigipPoolRequestCount metricInfo
BigipPoolMemberAvailability metricInfo
BigipPoolMemberConnectionCount metricInfo
BigipPoolMemberDataTransmitted metricInfo
BigipPoolMemberEnabled metricInfo
BigipPoolMemberPacketCount metricInfo
BigipPoolMemberRequestCount metricInfo
BigipPoolMemberSessionCount metricInfo
BigipVirtualServerAvailability metricInfo
BigipVirtualServerConnectionCount metricInfo
BigipVirtualServerDataTransmitted metricInfo
BigipVirtualServerEnabled metricInfo
BigipVirtualServerPacketCount metricInfo
BigipVirtualServerRequestCount metricInfo
}
type metricInfo struct {
Name string
}
type metricBigipNodeAvailability struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.node.availability metric with initial data.
func (m *metricBigipNodeAvailability) init() {
m.data.SetName("bigip.node.availability")
m.data.SetDescription("Availability of the node.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipNodeAvailability) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, availabilityStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", availabilityStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipNodeAvailability) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipNodeAvailability) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipNodeAvailability(cfg MetricConfig) metricBigipNodeAvailability {
m := metricBigipNodeAvailability{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipNodeConnectionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.node.connection.count metric with initial data.
func (m *metricBigipNodeConnectionCount) init() {
m.data.SetName("bigip.node.connection.count")
m.data.SetDescription("Current number of connections to the node.")
m.data.SetUnit("{connections}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricBigipNodeConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipNodeConnectionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipNodeConnectionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipNodeConnectionCount(cfg MetricConfig) metricBigipNodeConnectionCount {
m := metricBigipNodeConnectionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipNodeDataTransmitted struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.node.data.transmitted metric with initial data.
func (m *metricBigipNodeDataTransmitted) init() {
m.data.SetName("bigip.node.data.transmitted")
m.data.SetDescription("Amount of data transmitted to and from the node.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipNodeDataTransmitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipNodeDataTransmitted) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipNodeDataTransmitted) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipNodeDataTransmitted(cfg MetricConfig) metricBigipNodeDataTransmitted {
m := metricBigipNodeDataTransmitted{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipNodeEnabled struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.node.enabled metric with initial data.
func (m *metricBigipNodeEnabled) init() {
m.data.SetName("bigip.node.enabled")
m.data.SetDescription("Enabled state of of the node.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipNodeEnabled) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, enabledStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", enabledStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipNodeEnabled) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipNodeEnabled) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipNodeEnabled(cfg MetricConfig) metricBigipNodeEnabled {
m := metricBigipNodeEnabled{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipNodePacketCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.node.packet.count metric with initial data.
func (m *metricBigipNodePacketCount) init() {
m.data.SetName("bigip.node.packet.count")
m.data.SetDescription("Number of packets transmitted to and from the node.")
m.data.SetUnit("{packets}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipNodePacketCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipNodePacketCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipNodePacketCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipNodePacketCount(cfg MetricConfig) metricBigipNodePacketCount {
m := metricBigipNodePacketCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipNodeRequestCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.node.request.count metric with initial data.
func (m *metricBigipNodeRequestCount) init() {
m.data.SetName("bigip.node.request.count")
m.data.SetDescription("Number of requests to the node.")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricBigipNodeRequestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipNodeRequestCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipNodeRequestCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipNodeRequestCount(cfg MetricConfig) metricBigipNodeRequestCount {
m := metricBigipNodeRequestCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipNodeSessionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.node.session.count metric with initial data.
func (m *metricBigipNodeSessionCount) init() {
m.data.SetName("bigip.node.session.count")
m.data.SetDescription("Current number of sessions for the node.")
m.data.SetUnit("{sessions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricBigipNodeSessionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipNodeSessionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipNodeSessionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipNodeSessionCount(cfg MetricConfig) metricBigipNodeSessionCount {
m := metricBigipNodeSessionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolAvailability struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool.availability metric with initial data.
func (m *metricBigipPoolAvailability) init() {
m.data.SetName("bigip.pool.availability")
m.data.SetDescription("Availability of the pool.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipPoolAvailability) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, availabilityStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", availabilityStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolAvailability) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolAvailability) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolAvailability(cfg MetricConfig) metricBigipPoolAvailability {
m := metricBigipPoolAvailability{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolConnectionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool.connection.count metric with initial data.
func (m *metricBigipPoolConnectionCount) init() {
m.data.SetName("bigip.pool.connection.count")
m.data.SetDescription("Current number of connections to the pool.")
m.data.SetUnit("{connections}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricBigipPoolConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolConnectionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolConnectionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolConnectionCount(cfg MetricConfig) metricBigipPoolConnectionCount {
m := metricBigipPoolConnectionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolDataTransmitted struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool.data.transmitted metric with initial data.
func (m *metricBigipPoolDataTransmitted) init() {
m.data.SetName("bigip.pool.data.transmitted")
m.data.SetDescription("Amount of data transmitted to and from the pool.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipPoolDataTransmitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolDataTransmitted) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolDataTransmitted) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolDataTransmitted(cfg MetricConfig) metricBigipPoolDataTransmitted {
m := metricBigipPoolDataTransmitted{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolEnabled struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool.enabled metric with initial data.
func (m *metricBigipPoolEnabled) init() {
m.data.SetName("bigip.pool.enabled")
m.data.SetDescription("Enabled state of of the pool.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipPoolEnabled) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, enabledStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", enabledStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolEnabled) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolEnabled) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolEnabled(cfg MetricConfig) metricBigipPoolEnabled {
m := metricBigipPoolEnabled{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolMemberCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool.member.count metric with initial data.
func (m *metricBigipPoolMemberCount) init() {
m.data.SetName("bigip.pool.member.count")
m.data.SetDescription("Total number of pool members.")
m.data.SetUnit("{members}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipPoolMemberCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, activeStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", activeStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolMemberCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolMemberCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolMemberCount(cfg MetricConfig) metricBigipPoolMemberCount {
m := metricBigipPoolMemberCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolPacketCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool.packet.count metric with initial data.
func (m *metricBigipPoolPacketCount) init() {
m.data.SetName("bigip.pool.packet.count")
m.data.SetDescription("Number of packets transmitted to and from the pool.")
m.data.SetUnit("{packets}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipPoolPacketCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolPacketCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolPacketCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolPacketCount(cfg MetricConfig) metricBigipPoolPacketCount {
m := metricBigipPoolPacketCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolRequestCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool.request.count metric with initial data.
func (m *metricBigipPoolRequestCount) init() {
m.data.SetName("bigip.pool.request.count")
m.data.SetDescription("Number of requests to the pool.")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricBigipPoolRequestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolRequestCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolRequestCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolRequestCount(cfg MetricConfig) metricBigipPoolRequestCount {
m := metricBigipPoolRequestCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolMemberAvailability struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool_member.availability metric with initial data.
func (m *metricBigipPoolMemberAvailability) init() {
m.data.SetName("bigip.pool_member.availability")
m.data.SetDescription("Availability of the pool member.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipPoolMemberAvailability) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, availabilityStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", availabilityStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolMemberAvailability) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolMemberAvailability) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolMemberAvailability(cfg MetricConfig) metricBigipPoolMemberAvailability {
m := metricBigipPoolMemberAvailability{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolMemberConnectionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool_member.connection.count metric with initial data.
func (m *metricBigipPoolMemberConnectionCount) init() {
m.data.SetName("bigip.pool_member.connection.count")
m.data.SetDescription("Current number of connections to the pool member.")
m.data.SetUnit("{connections}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricBigipPoolMemberConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolMemberConnectionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolMemberConnectionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolMemberConnectionCount(cfg MetricConfig) metricBigipPoolMemberConnectionCount {
m := metricBigipPoolMemberConnectionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolMemberDataTransmitted struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool_member.data.transmitted metric with initial data.
func (m *metricBigipPoolMemberDataTransmitted) init() {
m.data.SetName("bigip.pool_member.data.transmitted")
m.data.SetDescription("Amount of data transmitted to and from the pool member.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipPoolMemberDataTransmitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolMemberDataTransmitted) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolMemberDataTransmitted) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolMemberDataTransmitted(cfg MetricConfig) metricBigipPoolMemberDataTransmitted {
m := metricBigipPoolMemberDataTransmitted{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolMemberEnabled struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool_member.enabled metric with initial data.
func (m *metricBigipPoolMemberEnabled) init() {
m.data.SetName("bigip.pool_member.enabled")
m.data.SetDescription("Enabled state of of the pool member.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipPoolMemberEnabled) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, enabledStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", enabledStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolMemberEnabled) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolMemberEnabled) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolMemberEnabled(cfg MetricConfig) metricBigipPoolMemberEnabled {
m := metricBigipPoolMemberEnabled{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolMemberPacketCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool_member.packet.count metric with initial data.
func (m *metricBigipPoolMemberPacketCount) init() {
m.data.SetName("bigip.pool_member.packet.count")
m.data.SetDescription("Number of packets transmitted to and from the pool member.")
m.data.SetUnit("{packets}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipPoolMemberPacketCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolMemberPacketCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolMemberPacketCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolMemberPacketCount(cfg MetricConfig) metricBigipPoolMemberPacketCount {
m := metricBigipPoolMemberPacketCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolMemberRequestCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool_member.request.count metric with initial data.
func (m *metricBigipPoolMemberRequestCount) init() {
m.data.SetName("bigip.pool_member.request.count")
m.data.SetDescription("Number of requests to the pool member.")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricBigipPoolMemberRequestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolMemberRequestCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolMemberRequestCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolMemberRequestCount(cfg MetricConfig) metricBigipPoolMemberRequestCount {
m := metricBigipPoolMemberRequestCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipPoolMemberSessionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.pool_member.session.count metric with initial data.
func (m *metricBigipPoolMemberSessionCount) init() {
m.data.SetName("bigip.pool_member.session.count")
m.data.SetDescription("Current number of sessions for the pool member.")
m.data.SetUnit("{sessions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricBigipPoolMemberSessionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipPoolMemberSessionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipPoolMemberSessionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipPoolMemberSessionCount(cfg MetricConfig) metricBigipPoolMemberSessionCount {
m := metricBigipPoolMemberSessionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipVirtualServerAvailability struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.virtual_server.availability metric with initial data.
func (m *metricBigipVirtualServerAvailability) init() {
m.data.SetName("bigip.virtual_server.availability")
m.data.SetDescription("Availability of the virtual server.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipVirtualServerAvailability) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, availabilityStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", availabilityStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipVirtualServerAvailability) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipVirtualServerAvailability) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipVirtualServerAvailability(cfg MetricConfig) metricBigipVirtualServerAvailability {
m := metricBigipVirtualServerAvailability{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipVirtualServerConnectionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.virtual_server.connection.count metric with initial data.
func (m *metricBigipVirtualServerConnectionCount) init() {
m.data.SetName("bigip.virtual_server.connection.count")
m.data.SetDescription("Current number of connections to the virtual server.")
m.data.SetUnit("{connections}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricBigipVirtualServerConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipVirtualServerConnectionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipVirtualServerConnectionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipVirtualServerConnectionCount(cfg MetricConfig) metricBigipVirtualServerConnectionCount {
m := metricBigipVirtualServerConnectionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipVirtualServerDataTransmitted struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.virtual_server.data.transmitted metric with initial data.
func (m *metricBigipVirtualServerDataTransmitted) init() {
m.data.SetName("bigip.virtual_server.data.transmitted")
m.data.SetDescription("Amount of data transmitted to and from the virtual server.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipVirtualServerDataTransmitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipVirtualServerDataTransmitted) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipVirtualServerDataTransmitted) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipVirtualServerDataTransmitted(cfg MetricConfig) metricBigipVirtualServerDataTransmitted {
m := metricBigipVirtualServerDataTransmitted{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipVirtualServerEnabled struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.virtual_server.enabled metric with initial data.
func (m *metricBigipVirtualServerEnabled) init() {
m.data.SetName("bigip.virtual_server.enabled")
m.data.SetDescription("Enabled state of of the virtual server.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipVirtualServerEnabled) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, enabledStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", enabledStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipVirtualServerEnabled) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipVirtualServerEnabled) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipVirtualServerEnabled(cfg MetricConfig) metricBigipVirtualServerEnabled {
m := metricBigipVirtualServerEnabled{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipVirtualServerPacketCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.virtual_server.packet.count metric with initial data.
func (m *metricBigipVirtualServerPacketCount) init() {
m.data.SetName("bigip.virtual_server.packet.count")
m.data.SetDescription("Number of packets transmitted to and from the virtual server.")
m.data.SetUnit("{packets}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricBigipVirtualServerPacketCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipVirtualServerPacketCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipVirtualServerPacketCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipVirtualServerPacketCount(cfg MetricConfig) metricBigipVirtualServerPacketCount {
m := metricBigipVirtualServerPacketCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricBigipVirtualServerRequestCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills bigip.virtual_server.request.count metric with initial data.
func (m *metricBigipVirtualServerRequestCount) init() {
m.data.SetName("bigip.virtual_server.request.count")
m.data.SetDescription("Number of requests to the virtual server.")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricBigipVirtualServerRequestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricBigipVirtualServerRequestCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricBigipVirtualServerRequestCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricBigipVirtualServerRequestCount(cfg MetricConfig) metricBigipVirtualServerRequestCount {
m := metricBigipVirtualServerRequestCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user config.
type MetricsBuilder struct {
config MetricsBuilderConfig // config of the metrics builder.
startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
metricsCapacity int // maximum observed number of metrics per resource.
metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
buildInfo component.BuildInfo // contains version information.
resourceAttributeIncludeFilter map[string]filter.Filter
resourceAttributeExcludeFilter map[string]filter.Filter
metricBigipNodeAvailability metricBigipNodeAvailability
metricBigipNodeConnectionCount metricBigipNodeConnectionCount
metricBigipNodeDataTransmitted metricBigipNodeDataTransmitted
metricBigipNodeEnabled metricBigipNodeEnabled
metricBigipNodePacketCount metricBigipNodePacketCount
metricBigipNodeRequestCount metricBigipNodeRequestCount
metricBigipNodeSessionCount metricBigipNodeSessionCount
metricBigipPoolAvailability metricBigipPoolAvailability
metricBigipPoolConnectionCount metricBigipPoolConnectionCount
metricBigipPoolDataTransmitted metricBigipPoolDataTransmitted
metricBigipPoolEnabled metricBigipPoolEnabled
metricBigipPoolMemberCount metricBigipPoolMemberCount
metricBigipPoolPacketCount metricBigipPoolPacketCount
metricBigipPoolRequestCount metricBigipPoolRequestCount
metricBigipPoolMemberAvailability metricBigipPoolMemberAvailability
metricBigipPoolMemberConnectionCount metricBigipPoolMemberConnectionCount
metricBigipPoolMemberDataTransmitted metricBigipPoolMemberDataTransmitted
metricBigipPoolMemberEnabled metricBigipPoolMemberEnabled
metricBigipPoolMemberPacketCount metricBigipPoolMemberPacketCount
metricBigipPoolMemberRequestCount metricBigipPoolMemberRequestCount
metricBigipPoolMemberSessionCount metricBigipPoolMemberSessionCount
metricBigipVirtualServerAvailability metricBigipVirtualServerAvailability
metricBigipVirtualServerConnectionCount metricBigipVirtualServerConnectionCount
metricBigipVirtualServerDataTransmitted metricBigipVirtualServerDataTransmitted
metricBigipVirtualServerEnabled metricBigipVirtualServerEnabled
metricBigipVirtualServerPacketCount metricBigipVirtualServerPacketCount
metricBigipVirtualServerRequestCount metricBigipVirtualServerRequestCount
}
// MetricBuilderOption applies changes to default metrics builder.
type MetricBuilderOption interface {
apply(*MetricsBuilder)
}
type metricBuilderOptionFunc func(mb *MetricsBuilder)
func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) {
mbof(mb)
}
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
return metricBuilderOptionFunc(func(mb *MetricsBuilder) {
mb.startTime = startTime
})
}
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
config: mbc,
startTime: pcommon.NewTimestampFromTime(time.Now()),
metricsBuffer: pmetric.NewMetrics(),
buildInfo: settings.BuildInfo,
metricBigipNodeAvailability: newMetricBigipNodeAvailability(mbc.Metrics.BigipNodeAvailability),
metricBigipNodeConnectionCount: newMetricBigipNodeConnectionCount(mbc.Metrics.BigipNodeConnectionCount),
metricBigipNodeDataTransmitted: newMetricBigipNodeDataTransmitted(mbc.Metrics.BigipNodeDataTransmitted),
metricBigipNodeEnabled: newMetricBigipNodeEnabled(mbc.Metrics.BigipNodeEnabled),
metricBigipNodePacketCount: newMetricBigipNodePacketCount(mbc.Metrics.BigipNodePacketCount),
metricBigipNodeRequestCount: newMetricBigipNodeRequestCount(mbc.Metrics.BigipNodeRequestCount),
metricBigipNodeSessionCount: newMetricBigipNodeSessionCount(mbc.Metrics.BigipNodeSessionCount),
metricBigipPoolAvailability: newMetricBigipPoolAvailability(mbc.Metrics.BigipPoolAvailability),
metricBigipPoolConnectionCount: newMetricBigipPoolConnectionCount(mbc.Metrics.BigipPoolConnectionCount),
metricBigipPoolDataTransmitted: newMetricBigipPoolDataTransmitted(mbc.Metrics.BigipPoolDataTransmitted),
metricBigipPoolEnabled: newMetricBigipPoolEnabled(mbc.Metrics.BigipPoolEnabled),
metricBigipPoolMemberCount: newMetricBigipPoolMemberCount(mbc.Metrics.BigipPoolMemberCount),
metricBigipPoolPacketCount: newMetricBigipPoolPacketCount(mbc.Metrics.BigipPoolPacketCount),
metricBigipPoolRequestCount: newMetricBigipPoolRequestCount(mbc.Metrics.BigipPoolRequestCount),
metricBigipPoolMemberAvailability: newMetricBigipPoolMemberAvailability(mbc.Metrics.BigipPoolMemberAvailability),
metricBigipPoolMemberConnectionCount: newMetricBigipPoolMemberConnectionCount(mbc.Metrics.BigipPoolMemberConnectionCount),
metricBigipPoolMemberDataTransmitted: newMetricBigipPoolMemberDataTransmitted(mbc.Metrics.BigipPoolMemberDataTransmitted),
metricBigipPoolMemberEnabled: newMetricBigipPoolMemberEnabled(mbc.Metrics.BigipPoolMemberEnabled),
metricBigipPoolMemberPacketCount: newMetricBigipPoolMemberPacketCount(mbc.Metrics.BigipPoolMemberPacketCount),
metricBigipPoolMemberRequestCount: newMetricBigipPoolMemberRequestCount(mbc.Metrics.BigipPoolMemberRequestCount),
metricBigipPoolMemberSessionCount: newMetricBigipPoolMemberSessionCount(mbc.Metrics.BigipPoolMemberSessionCount),
metricBigipVirtualServerAvailability: newMetricBigipVirtualServerAvailability(mbc.Metrics.BigipVirtualServerAvailability),
metricBigipVirtualServerConnectionCount: newMetricBigipVirtualServerConnectionCount(mbc.Metrics.BigipVirtualServerConnectionCount),
metricBigipVirtualServerDataTransmitted: newMetricBigipVirtualServerDataTransmitted(mbc.Metrics.BigipVirtualServerDataTransmitted),
metricBigipVirtualServerEnabled: newMetricBigipVirtualServerEnabled(mbc.Metrics.BigipVirtualServerEnabled),
metricBigipVirtualServerPacketCount: newMetricBigipVirtualServerPacketCount(mbc.Metrics.BigipVirtualServerPacketCount),
metricBigipVirtualServerRequestCount: newMetricBigipVirtualServerRequestCount(mbc.Metrics.BigipVirtualServerRequestCount),
resourceAttributeIncludeFilter: make(map[string]filter.Filter),
resourceAttributeExcludeFilter: make(map[string]filter.Filter),
}
if mbc.ResourceAttributes.BigipNodeIPAddress.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["bigip.node.ip_address"] = filter.CreateFilter(mbc.ResourceAttributes.BigipNodeIPAddress.MetricsInclude)
}
if mbc.ResourceAttributes.BigipNodeIPAddress.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["bigip.node.ip_address"] = filter.CreateFilter(mbc.ResourceAttributes.BigipNodeIPAddress.MetricsExclude)
}
if mbc.ResourceAttributes.BigipNodeName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["bigip.node.name"] = filter.CreateFilter(mbc.ResourceAttributes.BigipNodeName.MetricsInclude)
}
if mbc.ResourceAttributes.BigipNodeName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["bigip.node.name"] = filter.CreateFilter(mbc.ResourceAttributes.BigipNodeName.MetricsExclude)
}
if mbc.ResourceAttributes.BigipPoolName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["bigip.pool.name"] = filter.CreateFilter(mbc.ResourceAttributes.BigipPoolName.MetricsInclude)
}
if mbc.ResourceAttributes.BigipPoolName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["bigip.pool.name"] = filter.CreateFilter(mbc.ResourceAttributes.BigipPoolName.MetricsExclude)
}
if mbc.ResourceAttributes.BigipPoolMemberIPAddress.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["bigip.pool_member.ip_address"] = filter.CreateFilter(mbc.ResourceAttributes.BigipPoolMemberIPAddress.MetricsInclude)
}
if mbc.ResourceAttributes.BigipPoolMemberIPAddress.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["bigip.pool_member.ip_address"] = filter.CreateFilter(mbc.ResourceAttributes.BigipPoolMemberIPAddress.MetricsExclude)
}
if mbc.ResourceAttributes.BigipPoolMemberName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["bigip.pool_member.name"] = filter.CreateFilter(mbc.ResourceAttributes.BigipPoolMemberName.MetricsInclude)
}
if mbc.ResourceAttributes.BigipPoolMemberName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["bigip.pool_member.name"] = filter.CreateFilter(mbc.ResourceAttributes.BigipPoolMemberName.MetricsExclude)
}
if mbc.ResourceAttributes.BigipVirtualServerDestination.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["bigip.virtual_server.destination"] = filter.CreateFilter(mbc.ResourceAttributes.BigipVirtualServerDestination.MetricsInclude)
}
if mbc.ResourceAttributes.BigipVirtualServerDestination.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["bigip.virtual_server.destination"] = filter.CreateFilter(mbc.ResourceAttributes.BigipVirtualServerDestination.MetricsExclude)
}
if mbc.ResourceAttributes.BigipVirtualServerName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["bigip.virtual_server.name"] = filter.CreateFilter(mbc.ResourceAttributes.BigipVirtualServerName.MetricsInclude)
}
if mbc.ResourceAttributes.BigipVirtualServerName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["bigip.virtual_server.name"] = filter.CreateFilter(mbc.ResourceAttributes.BigipVirtualServerName.MetricsExclude)
}
for _, op := range options {
op.apply(mb)
}
return mb
}
// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
return NewResourceBuilder(mb.config.ResourceAttributes)
}
// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
}
}
// ResourceMetricsOption applies changes to provided resource metrics.
type ResourceMetricsOption interface {
apply(pmetric.ResourceMetrics)
}
type resourceMetricsOptionFunc func(pmetric.ResourceMetrics)
func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) {
rmof(rm)
}
// WithResource sets the provided resource on the emitted ResourceMetrics.
// It's recommended to use ResourceBuilder to create the resource.
func WithResource(res pcommon.Resource) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
res.CopyTo(rm.Resource())
})
}
// WithStartTimeOverride overrides start time for all the resource metrics data points.
// This option should be only used if different start time has to be set on metrics coming from different resources.
func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
var dps pmetric.NumberDataPointSlice
metrics := rm.ScopeMetrics().At(0).Metrics()
for i := 0; i < metrics.Len(); i++ {
switch metrics.At(i).Type() {
case pmetric.MetricTypeGauge:
dps = metrics.At(i).Gauge().DataPoints()
case pmetric.MetricTypeSum:
dps = metrics.At(i).Sum().DataPoints()
}
for j := 0; j < dps.Len(); j++ {
dps.At(j).SetStartTimestamp(start)
}
}
})
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceMetricsOption arguments.
func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName(ScopeName)
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricBigipNodeAvailability.emit(ils.Metrics())
mb.metricBigipNodeConnectionCount.emit(ils.Metrics())
mb.metricBigipNodeDataTransmitted.emit(ils.Metrics())
mb.metricBigipNodeEnabled.emit(ils.Metrics())
mb.metricBigipNodePacketCount.emit(ils.Metrics())
mb.metricBigipNodeRequestCount.emit(ils.Metrics())
mb.metricBigipNodeSessionCount.emit(ils.Metrics())
mb.metricBigipPoolAvailability.emit(ils.Metrics())
mb.metricBigipPoolConnectionCount.emit(ils.Metrics())
mb.metricBigipPoolDataTransmitted.emit(ils.Metrics())
mb.metricBigipPoolEnabled.emit(ils.Metrics())
mb.metricBigipPoolMemberCount.emit(ils.Metrics())
mb.metricBigipPoolPacketCount.emit(ils.Metrics())
mb.metricBigipPoolRequestCount.emit(ils.Metrics())
mb.metricBigipPoolMemberAvailability.emit(ils.Metrics())
mb.metricBigipPoolMemberConnectionCount.emit(ils.Metrics())
mb.metricBigipPoolMemberDataTransmitted.emit(ils.Metrics())
mb.metricBigipPoolMemberEnabled.emit(ils.Metrics())
mb.metricBigipPoolMemberPacketCount.emit(ils.Metrics())
mb.metricBigipPoolMemberRequestCount.emit(ils.Metrics())
mb.metricBigipPoolMemberSessionCount.emit(ils.Metrics())
mb.metricBigipVirtualServerAvailability.emit(ils.Metrics())
mb.metricBigipVirtualServerConnectionCount.emit(ils.Metrics())
mb.metricBigipVirtualServerDataTransmitted.emit(ils.Metrics())
mb.metricBigipVirtualServerEnabled.emit(ils.Metrics())
mb.metricBigipVirtualServerPacketCount.emit(ils.Metrics())
mb.metricBigipVirtualServerRequestCount.emit(ils.Metrics())
for _, op := range options {
op.apply(rm)
}
for attr, filter := range mb.resourceAttributeIncludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) {
return
}
}
for attr, filter := range mb.resourceAttributeExcludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) {
return
}
}
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
}
}
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user config, e.g. delta or cumulative.
func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics {
mb.EmitForResource(options...)
metrics := mb.metricsBuffer
mb.metricsBuffer = pmetric.NewMetrics()
return metrics
}
// RecordBigipNodeAvailabilityDataPoint adds a data point to bigip.node.availability metric.
func (mb *MetricsBuilder) RecordBigipNodeAvailabilityDataPoint(ts pcommon.Timestamp, val int64, availabilityStatusAttributeValue AttributeAvailabilityStatus) {
mb.metricBigipNodeAvailability.recordDataPoint(mb.startTime, ts, val, availabilityStatusAttributeValue.String())
}
// RecordBigipNodeConnectionCountDataPoint adds a data point to bigip.node.connection.count metric.
func (mb *MetricsBuilder) RecordBigipNodeConnectionCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricBigipNodeConnectionCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordBigipNodeDataTransmittedDataPoint adds a data point to bigip.node.data.transmitted metric.
func (mb *MetricsBuilder) RecordBigipNodeDataTransmittedDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
mb.metricBigipNodeDataTransmitted.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordBigipNodeEnabledDataPoint adds a data point to bigip.node.enabled metric.
func (mb *MetricsBuilder) RecordBigipNodeEnabledDataPoint(ts pcommon.Timestamp, val int64, enabledStatusAttributeValue AttributeEnabledStatus) {
mb.metricBigipNodeEnabled.recordDataPoint(mb.startTime, ts, val, enabledStatusAttributeValue.String())
}
// RecordBigipNodePacketCountDataPoint adds a data point to bigip.node.packet.count metric.
func (mb *MetricsBuilder) RecordBigipNodePacketCountDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
mb.metricBigipNodePacketCount.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordBigipNodeRequestCountDataPoint adds a data point to bigip.node.request.count metric.
func (mb *MetricsBuilder) RecordBigipNodeRequestCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricBigipNodeRequestCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordBigipNodeSessionCountDataPoint adds a data point to bigip.node.session.count metric.
func (mb *MetricsBuilder) RecordBigipNodeSessionCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricBigipNodeSessionCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordBigipPoolAvailabilityDataPoint adds a data point to bigip.pool.availability metric.
func (mb *MetricsBuilder) RecordBigipPoolAvailabilityDataPoint(ts pcommon.Timestamp, val int64, availabilityStatusAttributeValue AttributeAvailabilityStatus) {
mb.metricBigipPoolAvailability.recordDataPoint(mb.startTime, ts, val, availabilityStatusAttributeValue.String())
}
// RecordBigipPoolConnectionCountDataPoint adds a data point to bigip.pool.connection.count metric.
func (mb *MetricsBuilder) RecordBigipPoolConnectionCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricBigipPoolConnectionCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordBigipPoolDataTransmittedDataPoint adds a data point to bigip.pool.data.transmitted metric.
func (mb *MetricsBuilder) RecordBigipPoolDataTransmittedDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
mb.metricBigipPoolDataTransmitted.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordBigipPoolEnabledDataPoint adds a data point to bigip.pool.enabled metric.
func (mb *MetricsBuilder) RecordBigipPoolEnabledDataPoint(ts pcommon.Timestamp, val int64, enabledStatusAttributeValue AttributeEnabledStatus) {
mb.metricBigipPoolEnabled.recordDataPoint(mb.startTime, ts, val, enabledStatusAttributeValue.String())
}
// RecordBigipPoolMemberCountDataPoint adds a data point to bigip.pool.member.count metric.
func (mb *MetricsBuilder) RecordBigipPoolMemberCountDataPoint(ts pcommon.Timestamp, val int64, activeStatusAttributeValue AttributeActiveStatus) {
mb.metricBigipPoolMemberCount.recordDataPoint(mb.startTime, ts, val, activeStatusAttributeValue.String())
}
// RecordBigipPoolPacketCountDataPoint adds a data point to bigip.pool.packet.count metric.
func (mb *MetricsBuilder) RecordBigipPoolPacketCountDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
mb.metricBigipPoolPacketCount.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordBigipPoolRequestCountDataPoint adds a data point to bigip.pool.request.count metric.
func (mb *MetricsBuilder) RecordBigipPoolRequestCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricBigipPoolRequestCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordBigipPoolMemberAvailabilityDataPoint adds a data point to bigip.pool_member.availability metric.
func (mb *MetricsBuilder) RecordBigipPoolMemberAvailabilityDataPoint(ts pcommon.Timestamp, val int64, availabilityStatusAttributeValue AttributeAvailabilityStatus) {
mb.metricBigipPoolMemberAvailability.recordDataPoint(mb.startTime, ts, val, availabilityStatusAttributeValue.String())
}
// RecordBigipPoolMemberConnectionCountDataPoint adds a data point to bigip.pool_member.connection.count metric.
func (mb *MetricsBuilder) RecordBigipPoolMemberConnectionCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricBigipPoolMemberConnectionCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordBigipPoolMemberDataTransmittedDataPoint adds a data point to bigip.pool_member.data.transmitted metric.
func (mb *MetricsBuilder) RecordBigipPoolMemberDataTransmittedDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
mb.metricBigipPoolMemberDataTransmitted.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordBigipPoolMemberEnabledDataPoint adds a data point to bigip.pool_member.enabled metric.
func (mb *MetricsBuilder) RecordBigipPoolMemberEnabledDataPoint(ts pcommon.Timestamp, val int64, enabledStatusAttributeValue AttributeEnabledStatus) {
mb.metricBigipPoolMemberEnabled.recordDataPoint(mb.startTime, ts, val, enabledStatusAttributeValue.String())
}
// RecordBigipPoolMemberPacketCountDataPoint adds a data point to bigip.pool_member.packet.count metric.
func (mb *MetricsBuilder) RecordBigipPoolMemberPacketCountDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
mb.metricBigipPoolMemberPacketCount.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordBigipPoolMemberRequestCountDataPoint adds a data point to bigip.pool_member.request.count metric.
func (mb *MetricsBuilder) RecordBigipPoolMemberRequestCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricBigipPoolMemberRequestCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordBigipPoolMemberSessionCountDataPoint adds a data point to bigip.pool_member.session.count metric.
func (mb *MetricsBuilder) RecordBigipPoolMemberSessionCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricBigipPoolMemberSessionCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordBigipVirtualServerAvailabilityDataPoint adds a data point to bigip.virtual_server.availability metric.
func (mb *MetricsBuilder) RecordBigipVirtualServerAvailabilityDataPoint(ts pcommon.Timestamp, val int64, availabilityStatusAttributeValue AttributeAvailabilityStatus) {
mb.metricBigipVirtualServerAvailability.recordDataPoint(mb.startTime, ts, val, availabilityStatusAttributeValue.String())
}
// RecordBigipVirtualServerConnectionCountDataPoint adds a data point to bigip.virtual_server.connection.count metric.
func (mb *MetricsBuilder) RecordBigipVirtualServerConnectionCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricBigipVirtualServerConnectionCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordBigipVirtualServerDataTransmittedDataPoint adds a data point to bigip.virtual_server.data.transmitted metric.
func (mb *MetricsBuilder) RecordBigipVirtualServerDataTransmittedDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
mb.metricBigipVirtualServerDataTransmitted.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordBigipVirtualServerEnabledDataPoint adds a data point to bigip.virtual_server.enabled metric.
func (mb *MetricsBuilder) RecordBigipVirtualServerEnabledDataPoint(ts pcommon.Timestamp, val int64, enabledStatusAttributeValue AttributeEnabledStatus) {
mb.metricBigipVirtualServerEnabled.recordDataPoint(mb.startTime, ts, val, enabledStatusAttributeValue.String())
}
// RecordBigipVirtualServerPacketCountDataPoint adds a data point to bigip.virtual_server.packet.count metric.
func (mb *MetricsBuilder) RecordBigipVirtualServerPacketCountDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue AttributeDirection) {
mb.metricBigipVirtualServerPacketCount.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordBigipVirtualServerRequestCountDataPoint adds a data point to bigip.virtual_server.request.count metric.
func (mb *MetricsBuilder) RecordBigipVirtualServerRequestCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricBigipVirtualServerRequestCount.recordDataPoint(mb.startTime, ts, val)
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) {
mb.startTime = pcommon.NewTimestampFromTime(time.Now())
for _, op := range options {
op.apply(mb)
}
}