components/otelopscol/receiver/varnishreceiver/internal/metadata/generated_metrics.go (841 lines of code) (raw):
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/filter"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
)
// AttributeBackendConnectionType specifies the value backend_connection_type attribute.
type AttributeBackendConnectionType int
const (
_ AttributeBackendConnectionType = iota
AttributeBackendConnectionTypeSuccess
AttributeBackendConnectionTypeRecycle
AttributeBackendConnectionTypeReuse
AttributeBackendConnectionTypeFail
AttributeBackendConnectionTypeUnhealthy
AttributeBackendConnectionTypeBusy
AttributeBackendConnectionTypeRetry
)
// String returns the string representation of the AttributeBackendConnectionType.
func (av AttributeBackendConnectionType) String() string {
switch av {
case AttributeBackendConnectionTypeSuccess:
return "success"
case AttributeBackendConnectionTypeRecycle:
return "recycle"
case AttributeBackendConnectionTypeReuse:
return "reuse"
case AttributeBackendConnectionTypeFail:
return "fail"
case AttributeBackendConnectionTypeUnhealthy:
return "unhealthy"
case AttributeBackendConnectionTypeBusy:
return "busy"
case AttributeBackendConnectionTypeRetry:
return "retry"
}
return ""
}
// MapAttributeBackendConnectionType is a helper map of string to AttributeBackendConnectionType attribute value.
var MapAttributeBackendConnectionType = map[string]AttributeBackendConnectionType{
"success": AttributeBackendConnectionTypeSuccess,
"recycle": AttributeBackendConnectionTypeRecycle,
"reuse": AttributeBackendConnectionTypeReuse,
"fail": AttributeBackendConnectionTypeFail,
"unhealthy": AttributeBackendConnectionTypeUnhealthy,
"busy": AttributeBackendConnectionTypeBusy,
"retry": AttributeBackendConnectionTypeRetry,
}
// AttributeCacheOperations specifies the value cache_operations attribute.
type AttributeCacheOperations int
const (
_ AttributeCacheOperations = iota
AttributeCacheOperationsHit
AttributeCacheOperationsMiss
AttributeCacheOperationsHitPass
)
// String returns the string representation of the AttributeCacheOperations.
func (av AttributeCacheOperations) String() string {
switch av {
case AttributeCacheOperationsHit:
return "hit"
case AttributeCacheOperationsMiss:
return "miss"
case AttributeCacheOperationsHitPass:
return "hit_pass"
}
return ""
}
// MapAttributeCacheOperations is a helper map of string to AttributeCacheOperations attribute value.
var MapAttributeCacheOperations = map[string]AttributeCacheOperations{
"hit": AttributeCacheOperationsHit,
"miss": AttributeCacheOperationsMiss,
"hit_pass": AttributeCacheOperationsHitPass,
}
// AttributeSessionType specifies the value session_type attribute.
type AttributeSessionType int
const (
_ AttributeSessionType = iota
AttributeSessionTypeAccepted
AttributeSessionTypeDropped
AttributeSessionTypeFailed
)
// String returns the string representation of the AttributeSessionType.
func (av AttributeSessionType) String() string {
switch av {
case AttributeSessionTypeAccepted:
return "accepted"
case AttributeSessionTypeDropped:
return "dropped"
case AttributeSessionTypeFailed:
return "failed"
}
return ""
}
// MapAttributeSessionType is a helper map of string to AttributeSessionType attribute value.
var MapAttributeSessionType = map[string]AttributeSessionType{
"accepted": AttributeSessionTypeAccepted,
"dropped": AttributeSessionTypeDropped,
"failed": AttributeSessionTypeFailed,
}
// AttributeState specifies the value state attribute.
type AttributeState int
const (
_ AttributeState = iota
AttributeStateReceived
AttributeStateDropped
)
// String returns the string representation of the AttributeState.
func (av AttributeState) String() string {
switch av {
case AttributeStateReceived:
return "received"
case AttributeStateDropped:
return "dropped"
}
return ""
}
// MapAttributeState is a helper map of string to AttributeState attribute value.
var MapAttributeState = map[string]AttributeState{
"received": AttributeStateReceived,
"dropped": AttributeStateDropped,
}
// AttributeThreadOperations specifies the value thread_operations attribute.
type AttributeThreadOperations int
const (
_ AttributeThreadOperations = iota
AttributeThreadOperationsCreated
AttributeThreadOperationsDestroyed
AttributeThreadOperationsFailed
)
// String returns the string representation of the AttributeThreadOperations.
func (av AttributeThreadOperations) String() string {
switch av {
case AttributeThreadOperationsCreated:
return "created"
case AttributeThreadOperationsDestroyed:
return "destroyed"
case AttributeThreadOperationsFailed:
return "failed"
}
return ""
}
// MapAttributeThreadOperations is a helper map of string to AttributeThreadOperations attribute value.
var MapAttributeThreadOperations = map[string]AttributeThreadOperations{
"created": AttributeThreadOperationsCreated,
"destroyed": AttributeThreadOperationsDestroyed,
"failed": AttributeThreadOperationsFailed,
}
var MetricsInfo = metricsInfo{
VarnishBackendConnectionCount: metricInfo{
Name: "varnish.backend.connection.count",
},
VarnishBackendRequestCount: metricInfo{
Name: "varnish.backend.request.count",
},
VarnishCacheOperationCount: metricInfo{
Name: "varnish.cache.operation.count",
},
VarnishClientRequestCount: metricInfo{
Name: "varnish.client.request.count",
},
VarnishClientRequestErrorCount: metricInfo{
Name: "varnish.client.request.error.count",
},
VarnishObjectCount: metricInfo{
Name: "varnish.object.count",
},
VarnishObjectExpired: metricInfo{
Name: "varnish.object.expired",
},
VarnishObjectMoved: metricInfo{
Name: "varnish.object.moved",
},
VarnishObjectNuked: metricInfo{
Name: "varnish.object.nuked",
},
VarnishSessionCount: metricInfo{
Name: "varnish.session.count",
},
VarnishThreadOperationCount: metricInfo{
Name: "varnish.thread.operation.count",
},
}
type metricsInfo struct {
VarnishBackendConnectionCount metricInfo
VarnishBackendRequestCount metricInfo
VarnishCacheOperationCount metricInfo
VarnishClientRequestCount metricInfo
VarnishClientRequestErrorCount metricInfo
VarnishObjectCount metricInfo
VarnishObjectExpired metricInfo
VarnishObjectMoved metricInfo
VarnishObjectNuked metricInfo
VarnishSessionCount metricInfo
VarnishThreadOperationCount metricInfo
}
type metricInfo struct {
Name string
}
type metricVarnishBackendConnectionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.backend.connection.count metric with initial data.
func (m *metricVarnishBackendConnectionCount) init() {
m.data.SetName("varnish.backend.connection.count")
m.data.SetDescription("The backend connection type count.")
m.data.SetUnit("{connections}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricVarnishBackendConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, backendConnectionTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", backendConnectionTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishBackendConnectionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishBackendConnectionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishBackendConnectionCount(cfg MetricConfig) metricVarnishBackendConnectionCount {
m := metricVarnishBackendConnectionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricVarnishBackendRequestCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.backend.request.count metric with initial data.
func (m *metricVarnishBackendRequestCount) init() {
m.data.SetName("varnish.backend.request.count")
m.data.SetDescription("The backend requests count.")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricVarnishBackendRequestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishBackendRequestCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishBackendRequestCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishBackendRequestCount(cfg MetricConfig) metricVarnishBackendRequestCount {
m := metricVarnishBackendRequestCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricVarnishCacheOperationCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.cache.operation.count metric with initial data.
func (m *metricVarnishCacheOperationCount) init() {
m.data.SetName("varnish.cache.operation.count")
m.data.SetDescription("The cache operation type count.")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricVarnishCacheOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, cacheOperationsAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", cacheOperationsAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishCacheOperationCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishCacheOperationCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishCacheOperationCount(cfg MetricConfig) metricVarnishCacheOperationCount {
m := metricVarnishCacheOperationCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricVarnishClientRequestCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.client.request.count metric with initial data.
func (m *metricVarnishClientRequestCount) init() {
m.data.SetName("varnish.client.request.count")
m.data.SetDescription("The client request count.")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricVarnishClientRequestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, stateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("state", stateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishClientRequestCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishClientRequestCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishClientRequestCount(cfg MetricConfig) metricVarnishClientRequestCount {
m := metricVarnishClientRequestCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricVarnishClientRequestErrorCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.client.request.error.count metric with initial data.
func (m *metricVarnishClientRequestErrorCount) init() {
m.data.SetName("varnish.client.request.error.count")
m.data.SetDescription("The client request errors received by status code.")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricVarnishClientRequestErrorCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, httpStatusCodeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status_code", httpStatusCodeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishClientRequestErrorCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishClientRequestErrorCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishClientRequestErrorCount(cfg MetricConfig) metricVarnishClientRequestErrorCount {
m := metricVarnishClientRequestErrorCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricVarnishObjectCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.object.count metric with initial data.
func (m *metricVarnishObjectCount) init() {
m.data.SetName("varnish.object.count")
m.data.SetDescription("The HTTP objects in the cache count.")
m.data.SetUnit("{objects}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricVarnishObjectCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishObjectCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishObjectCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishObjectCount(cfg MetricConfig) metricVarnishObjectCount {
m := metricVarnishObjectCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricVarnishObjectExpired struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.object.expired metric with initial data.
func (m *metricVarnishObjectExpired) init() {
m.data.SetName("varnish.object.expired")
m.data.SetDescription("The expired objects from old age count.")
m.data.SetUnit("{objects}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricVarnishObjectExpired) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishObjectExpired) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishObjectExpired) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishObjectExpired(cfg MetricConfig) metricVarnishObjectExpired {
m := metricVarnishObjectExpired{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricVarnishObjectMoved struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.object.moved metric with initial data.
func (m *metricVarnishObjectMoved) init() {
m.data.SetName("varnish.object.moved")
m.data.SetDescription("The moved operations done on the LRU list count.")
m.data.SetUnit("{objects}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricVarnishObjectMoved) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishObjectMoved) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishObjectMoved) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishObjectMoved(cfg MetricConfig) metricVarnishObjectMoved {
m := metricVarnishObjectMoved{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricVarnishObjectNuked struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.object.nuked metric with initial data.
func (m *metricVarnishObjectNuked) init() {
m.data.SetName("varnish.object.nuked")
m.data.SetDescription("The objects that have been forcefully evicted from storage count.")
m.data.SetUnit("{objects}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricVarnishObjectNuked) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishObjectNuked) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishObjectNuked) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishObjectNuked(cfg MetricConfig) metricVarnishObjectNuked {
m := metricVarnishObjectNuked{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricVarnishSessionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.session.count metric with initial data.
func (m *metricVarnishSessionCount) init() {
m.data.SetName("varnish.session.count")
m.data.SetDescription("The session connection type count.")
m.data.SetUnit("{connections}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricVarnishSessionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sessionTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("kind", sessionTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishSessionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishSessionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishSessionCount(cfg MetricConfig) metricVarnishSessionCount {
m := metricVarnishSessionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricVarnishThreadOperationCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills varnish.thread.operation.count metric with initial data.
func (m *metricVarnishThreadOperationCount) init() {
m.data.SetName("varnish.thread.operation.count")
m.data.SetDescription("The thread operation type count.")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricVarnishThreadOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, threadOperationsAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("operation", threadOperationsAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricVarnishThreadOperationCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricVarnishThreadOperationCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricVarnishThreadOperationCount(cfg MetricConfig) metricVarnishThreadOperationCount {
m := metricVarnishThreadOperationCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user config.
type MetricsBuilder struct {
config MetricsBuilderConfig // config of the metrics builder.
startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
metricsCapacity int // maximum observed number of metrics per resource.
metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
buildInfo component.BuildInfo // contains version information.
resourceAttributeIncludeFilter map[string]filter.Filter
resourceAttributeExcludeFilter map[string]filter.Filter
metricVarnishBackendConnectionCount metricVarnishBackendConnectionCount
metricVarnishBackendRequestCount metricVarnishBackendRequestCount
metricVarnishCacheOperationCount metricVarnishCacheOperationCount
metricVarnishClientRequestCount metricVarnishClientRequestCount
metricVarnishClientRequestErrorCount metricVarnishClientRequestErrorCount
metricVarnishObjectCount metricVarnishObjectCount
metricVarnishObjectExpired metricVarnishObjectExpired
metricVarnishObjectMoved metricVarnishObjectMoved
metricVarnishObjectNuked metricVarnishObjectNuked
metricVarnishSessionCount metricVarnishSessionCount
metricVarnishThreadOperationCount metricVarnishThreadOperationCount
}
// MetricBuilderOption applies changes to default metrics builder.
type MetricBuilderOption interface {
apply(*MetricsBuilder)
}
type metricBuilderOptionFunc func(mb *MetricsBuilder)
func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) {
mbof(mb)
}
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
return metricBuilderOptionFunc(func(mb *MetricsBuilder) {
mb.startTime = startTime
})
}
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
config: mbc,
startTime: pcommon.NewTimestampFromTime(time.Now()),
metricsBuffer: pmetric.NewMetrics(),
buildInfo: settings.BuildInfo,
metricVarnishBackendConnectionCount: newMetricVarnishBackendConnectionCount(mbc.Metrics.VarnishBackendConnectionCount),
metricVarnishBackendRequestCount: newMetricVarnishBackendRequestCount(mbc.Metrics.VarnishBackendRequestCount),
metricVarnishCacheOperationCount: newMetricVarnishCacheOperationCount(mbc.Metrics.VarnishCacheOperationCount),
metricVarnishClientRequestCount: newMetricVarnishClientRequestCount(mbc.Metrics.VarnishClientRequestCount),
metricVarnishClientRequestErrorCount: newMetricVarnishClientRequestErrorCount(mbc.Metrics.VarnishClientRequestErrorCount),
metricVarnishObjectCount: newMetricVarnishObjectCount(mbc.Metrics.VarnishObjectCount),
metricVarnishObjectExpired: newMetricVarnishObjectExpired(mbc.Metrics.VarnishObjectExpired),
metricVarnishObjectMoved: newMetricVarnishObjectMoved(mbc.Metrics.VarnishObjectMoved),
metricVarnishObjectNuked: newMetricVarnishObjectNuked(mbc.Metrics.VarnishObjectNuked),
metricVarnishSessionCount: newMetricVarnishSessionCount(mbc.Metrics.VarnishSessionCount),
metricVarnishThreadOperationCount: newMetricVarnishThreadOperationCount(mbc.Metrics.VarnishThreadOperationCount),
resourceAttributeIncludeFilter: make(map[string]filter.Filter),
resourceAttributeExcludeFilter: make(map[string]filter.Filter),
}
if mbc.ResourceAttributes.VarnishCacheName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["varnish.cache.name"] = filter.CreateFilter(mbc.ResourceAttributes.VarnishCacheName.MetricsInclude)
}
if mbc.ResourceAttributes.VarnishCacheName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["varnish.cache.name"] = filter.CreateFilter(mbc.ResourceAttributes.VarnishCacheName.MetricsExclude)
}
for _, op := range options {
op.apply(mb)
}
return mb
}
// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
return NewResourceBuilder(mb.config.ResourceAttributes)
}
// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
}
}
// ResourceMetricsOption applies changes to provided resource metrics.
type ResourceMetricsOption interface {
apply(pmetric.ResourceMetrics)
}
type resourceMetricsOptionFunc func(pmetric.ResourceMetrics)
func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) {
rmof(rm)
}
// WithResource sets the provided resource on the emitted ResourceMetrics.
// It's recommended to use ResourceBuilder to create the resource.
func WithResource(res pcommon.Resource) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
res.CopyTo(rm.Resource())
})
}
// WithStartTimeOverride overrides start time for all the resource metrics data points.
// This option should be only used if different start time has to be set on metrics coming from different resources.
func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
var dps pmetric.NumberDataPointSlice
metrics := rm.ScopeMetrics().At(0).Metrics()
for i := 0; i < metrics.Len(); i++ {
switch metrics.At(i).Type() {
case pmetric.MetricTypeGauge:
dps = metrics.At(i).Gauge().DataPoints()
case pmetric.MetricTypeSum:
dps = metrics.At(i).Sum().DataPoints()
}
for j := 0; j < dps.Len(); j++ {
dps.At(j).SetStartTimestamp(start)
}
}
})
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceMetricsOption arguments.
func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName(ScopeName)
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricVarnishBackendConnectionCount.emit(ils.Metrics())
mb.metricVarnishBackendRequestCount.emit(ils.Metrics())
mb.metricVarnishCacheOperationCount.emit(ils.Metrics())
mb.metricVarnishClientRequestCount.emit(ils.Metrics())
mb.metricVarnishClientRequestErrorCount.emit(ils.Metrics())
mb.metricVarnishObjectCount.emit(ils.Metrics())
mb.metricVarnishObjectExpired.emit(ils.Metrics())
mb.metricVarnishObjectMoved.emit(ils.Metrics())
mb.metricVarnishObjectNuked.emit(ils.Metrics())
mb.metricVarnishSessionCount.emit(ils.Metrics())
mb.metricVarnishThreadOperationCount.emit(ils.Metrics())
for _, op := range options {
op.apply(rm)
}
for attr, filter := range mb.resourceAttributeIncludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) {
return
}
}
for attr, filter := range mb.resourceAttributeExcludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) {
return
}
}
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
}
}
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user config, e.g. delta or cumulative.
func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics {
mb.EmitForResource(options...)
metrics := mb.metricsBuffer
mb.metricsBuffer = pmetric.NewMetrics()
return metrics
}
// RecordVarnishBackendConnectionCountDataPoint adds a data point to varnish.backend.connection.count metric.
func (mb *MetricsBuilder) RecordVarnishBackendConnectionCountDataPoint(ts pcommon.Timestamp, val int64, backendConnectionTypeAttributeValue AttributeBackendConnectionType) {
mb.metricVarnishBackendConnectionCount.recordDataPoint(mb.startTime, ts, val, backendConnectionTypeAttributeValue.String())
}
// RecordVarnishBackendRequestCountDataPoint adds a data point to varnish.backend.request.count metric.
func (mb *MetricsBuilder) RecordVarnishBackendRequestCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricVarnishBackendRequestCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordVarnishCacheOperationCountDataPoint adds a data point to varnish.cache.operation.count metric.
func (mb *MetricsBuilder) RecordVarnishCacheOperationCountDataPoint(ts pcommon.Timestamp, val int64, cacheOperationsAttributeValue AttributeCacheOperations) {
mb.metricVarnishCacheOperationCount.recordDataPoint(mb.startTime, ts, val, cacheOperationsAttributeValue.String())
}
// RecordVarnishClientRequestCountDataPoint adds a data point to varnish.client.request.count metric.
func (mb *MetricsBuilder) RecordVarnishClientRequestCountDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue AttributeState) {
mb.metricVarnishClientRequestCount.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String())
}
// RecordVarnishClientRequestErrorCountDataPoint adds a data point to varnish.client.request.error.count metric.
func (mb *MetricsBuilder) RecordVarnishClientRequestErrorCountDataPoint(ts pcommon.Timestamp, val int64, httpStatusCodeAttributeValue string) {
mb.metricVarnishClientRequestErrorCount.recordDataPoint(mb.startTime, ts, val, httpStatusCodeAttributeValue)
}
// RecordVarnishObjectCountDataPoint adds a data point to varnish.object.count metric.
func (mb *MetricsBuilder) RecordVarnishObjectCountDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricVarnishObjectCount.recordDataPoint(mb.startTime, ts, val)
}
// RecordVarnishObjectExpiredDataPoint adds a data point to varnish.object.expired metric.
func (mb *MetricsBuilder) RecordVarnishObjectExpiredDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricVarnishObjectExpired.recordDataPoint(mb.startTime, ts, val)
}
// RecordVarnishObjectMovedDataPoint adds a data point to varnish.object.moved metric.
func (mb *MetricsBuilder) RecordVarnishObjectMovedDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricVarnishObjectMoved.recordDataPoint(mb.startTime, ts, val)
}
// RecordVarnishObjectNukedDataPoint adds a data point to varnish.object.nuked metric.
func (mb *MetricsBuilder) RecordVarnishObjectNukedDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricVarnishObjectNuked.recordDataPoint(mb.startTime, ts, val)
}
// RecordVarnishSessionCountDataPoint adds a data point to varnish.session.count metric.
func (mb *MetricsBuilder) RecordVarnishSessionCountDataPoint(ts pcommon.Timestamp, val int64, sessionTypeAttributeValue AttributeSessionType) {
mb.metricVarnishSessionCount.recordDataPoint(mb.startTime, ts, val, sessionTypeAttributeValue.String())
}
// RecordVarnishThreadOperationCountDataPoint adds a data point to varnish.thread.operation.count metric.
func (mb *MetricsBuilder) RecordVarnishThreadOperationCountDataPoint(ts pcommon.Timestamp, val int64, threadOperationsAttributeValue AttributeThreadOperations) {
mb.metricVarnishThreadOperationCount.recordDataPoint(mb.startTime, ts, val, threadOperationsAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) {
mb.startTime = pcommon.NewTimestampFromTime(time.Now())
for _, op := range options {
op.apply(mb)
}
}