receiver/saphanareceiver/internal/metadata/generated_metrics.go (3,187 lines of code) (raw):
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"fmt"
"strconv"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/filter"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
)
// AttributeActivePendingRequestState specifies the value active_pending_request_state attribute.
type AttributeActivePendingRequestState int
const (
_ AttributeActivePendingRequestState = iota
AttributeActivePendingRequestStateActive
AttributeActivePendingRequestStatePending
)
// String returns the string representation of the AttributeActivePendingRequestState.
func (av AttributeActivePendingRequestState) String() string {
switch av {
case AttributeActivePendingRequestStateActive:
return "active"
case AttributeActivePendingRequestStatePending:
return "pending"
}
return ""
}
// MapAttributeActivePendingRequestState is a helper map of string to AttributeActivePendingRequestState attribute value.
var MapAttributeActivePendingRequestState = map[string]AttributeActivePendingRequestState{
"active": AttributeActivePendingRequestStateActive,
"pending": AttributeActivePendingRequestStatePending,
}
// AttributeColumnMemorySubtype specifies the value column_memory_subtype attribute.
type AttributeColumnMemorySubtype int
const (
_ AttributeColumnMemorySubtype = iota
AttributeColumnMemorySubtypeData
AttributeColumnMemorySubtypeDict
AttributeColumnMemorySubtypeIndex
AttributeColumnMemorySubtypeMisc
)
// String returns the string representation of the AttributeColumnMemorySubtype.
func (av AttributeColumnMemorySubtype) String() string {
switch av {
case AttributeColumnMemorySubtypeData:
return "data"
case AttributeColumnMemorySubtypeDict:
return "dict"
case AttributeColumnMemorySubtypeIndex:
return "index"
case AttributeColumnMemorySubtypeMisc:
return "misc"
}
return ""
}
// MapAttributeColumnMemorySubtype is a helper map of string to AttributeColumnMemorySubtype attribute value.
var MapAttributeColumnMemorySubtype = map[string]AttributeColumnMemorySubtype{
"data": AttributeColumnMemorySubtypeData,
"dict": AttributeColumnMemorySubtypeDict,
"index": AttributeColumnMemorySubtypeIndex,
"misc": AttributeColumnMemorySubtypeMisc,
}
// AttributeColumnMemoryType specifies the value column_memory_type attribute.
type AttributeColumnMemoryType int
const (
_ AttributeColumnMemoryType = iota
AttributeColumnMemoryTypeMain
AttributeColumnMemoryTypeDelta
)
// String returns the string representation of the AttributeColumnMemoryType.
func (av AttributeColumnMemoryType) String() string {
switch av {
case AttributeColumnMemoryTypeMain:
return "main"
case AttributeColumnMemoryTypeDelta:
return "delta"
}
return ""
}
// MapAttributeColumnMemoryType is a helper map of string to AttributeColumnMemoryType attribute value.
var MapAttributeColumnMemoryType = map[string]AttributeColumnMemoryType{
"main": AttributeColumnMemoryTypeMain,
"delta": AttributeColumnMemoryTypeDelta,
}
// AttributeConnectionStatus specifies the value connection_status attribute.
type AttributeConnectionStatus int
const (
_ AttributeConnectionStatus = iota
AttributeConnectionStatusRunning
AttributeConnectionStatusIdle
AttributeConnectionStatusQueueing
)
// String returns the string representation of the AttributeConnectionStatus.
func (av AttributeConnectionStatus) String() string {
switch av {
case AttributeConnectionStatusRunning:
return "running"
case AttributeConnectionStatusIdle:
return "idle"
case AttributeConnectionStatusQueueing:
return "queueing"
}
return ""
}
// MapAttributeConnectionStatus is a helper map of string to AttributeConnectionStatus attribute value.
var MapAttributeConnectionStatus = map[string]AttributeConnectionStatus{
"running": AttributeConnectionStatusRunning,
"idle": AttributeConnectionStatusIdle,
"queueing": AttributeConnectionStatusQueueing,
}
// AttributeCPUType specifies the value cpu_type attribute.
type AttributeCPUType int
const (
_ AttributeCPUType = iota
AttributeCPUTypeUser
AttributeCPUTypeSystem
AttributeCPUTypeIoWait
AttributeCPUTypeIdle
)
// String returns the string representation of the AttributeCPUType.
func (av AttributeCPUType) String() string {
switch av {
case AttributeCPUTypeUser:
return "user"
case AttributeCPUTypeSystem:
return "system"
case AttributeCPUTypeIoWait:
return "io_wait"
case AttributeCPUTypeIdle:
return "idle"
}
return ""
}
// MapAttributeCPUType is a helper map of string to AttributeCPUType attribute value.
var MapAttributeCPUType = map[string]AttributeCPUType{
"user": AttributeCPUTypeUser,
"system": AttributeCPUTypeSystem,
"io_wait": AttributeCPUTypeIoWait,
"idle": AttributeCPUTypeIdle,
}
// AttributeDiskStateUsedFree specifies the value disk_state_used_free attribute.
type AttributeDiskStateUsedFree int
const (
_ AttributeDiskStateUsedFree = iota
AttributeDiskStateUsedFreeUsed
AttributeDiskStateUsedFreeFree
)
// String returns the string representation of the AttributeDiskStateUsedFree.
func (av AttributeDiskStateUsedFree) String() string {
switch av {
case AttributeDiskStateUsedFreeUsed:
return "used"
case AttributeDiskStateUsedFreeFree:
return "free"
}
return ""
}
// MapAttributeDiskStateUsedFree is a helper map of string to AttributeDiskStateUsedFree attribute value.
var MapAttributeDiskStateUsedFree = map[string]AttributeDiskStateUsedFree{
"used": AttributeDiskStateUsedFreeUsed,
"free": AttributeDiskStateUsedFreeFree,
}
// AttributeHostSwapState specifies the value host_swap_state attribute.
type AttributeHostSwapState int
const (
_ AttributeHostSwapState = iota
AttributeHostSwapStateUsed
AttributeHostSwapStateFree
)
// String returns the string representation of the AttributeHostSwapState.
func (av AttributeHostSwapState) String() string {
switch av {
case AttributeHostSwapStateUsed:
return "used"
case AttributeHostSwapStateFree:
return "free"
}
return ""
}
// MapAttributeHostSwapState is a helper map of string to AttributeHostSwapState attribute value.
var MapAttributeHostSwapState = map[string]AttributeHostSwapState{
"used": AttributeHostSwapStateUsed,
"free": AttributeHostSwapStateFree,
}
// AttributeInternalExternalRequestType specifies the value internal_external_request_type attribute.
type AttributeInternalExternalRequestType int
const (
_ AttributeInternalExternalRequestType = iota
AttributeInternalExternalRequestTypeInternal
AttributeInternalExternalRequestTypeExternal
)
// String returns the string representation of the AttributeInternalExternalRequestType.
func (av AttributeInternalExternalRequestType) String() string {
switch av {
case AttributeInternalExternalRequestTypeInternal:
return "internal"
case AttributeInternalExternalRequestTypeExternal:
return "external"
}
return ""
}
// MapAttributeInternalExternalRequestType is a helper map of string to AttributeInternalExternalRequestType attribute value.
var MapAttributeInternalExternalRequestType = map[string]AttributeInternalExternalRequestType{
"internal": AttributeInternalExternalRequestTypeInternal,
"external": AttributeInternalExternalRequestTypeExternal,
}
// AttributeMemoryStateUsedFree specifies the value memory_state_used_free attribute.
type AttributeMemoryStateUsedFree int
const (
_ AttributeMemoryStateUsedFree = iota
AttributeMemoryStateUsedFreeUsed
AttributeMemoryStateUsedFreeFree
)
// String returns the string representation of the AttributeMemoryStateUsedFree.
func (av AttributeMemoryStateUsedFree) String() string {
switch av {
case AttributeMemoryStateUsedFreeUsed:
return "used"
case AttributeMemoryStateUsedFreeFree:
return "free"
}
return ""
}
// MapAttributeMemoryStateUsedFree is a helper map of string to AttributeMemoryStateUsedFree attribute value.
var MapAttributeMemoryStateUsedFree = map[string]AttributeMemoryStateUsedFree{
"used": AttributeMemoryStateUsedFreeUsed,
"free": AttributeMemoryStateUsedFreeFree,
}
// AttributeRowMemoryType specifies the value row_memory_type attribute.
type AttributeRowMemoryType int
const (
_ AttributeRowMemoryType = iota
AttributeRowMemoryTypeFixed
AttributeRowMemoryTypeVariable
)
// String returns the string representation of the AttributeRowMemoryType.
func (av AttributeRowMemoryType) String() string {
switch av {
case AttributeRowMemoryTypeFixed:
return "fixed"
case AttributeRowMemoryTypeVariable:
return "variable"
}
return ""
}
// MapAttributeRowMemoryType is a helper map of string to AttributeRowMemoryType attribute value.
var MapAttributeRowMemoryType = map[string]AttributeRowMemoryType{
"fixed": AttributeRowMemoryTypeFixed,
"variable": AttributeRowMemoryTypeVariable,
}
// AttributeSchemaMemoryType specifies the value schema_memory_type attribute.
type AttributeSchemaMemoryType int
const (
_ AttributeSchemaMemoryType = iota
AttributeSchemaMemoryTypeMain
AttributeSchemaMemoryTypeDelta
AttributeSchemaMemoryTypeHistoryMain
AttributeSchemaMemoryTypeHistoryDelta
)
// String returns the string representation of the AttributeSchemaMemoryType.
func (av AttributeSchemaMemoryType) String() string {
switch av {
case AttributeSchemaMemoryTypeMain:
return "main"
case AttributeSchemaMemoryTypeDelta:
return "delta"
case AttributeSchemaMemoryTypeHistoryMain:
return "history_main"
case AttributeSchemaMemoryTypeHistoryDelta:
return "history_delta"
}
return ""
}
// MapAttributeSchemaMemoryType is a helper map of string to AttributeSchemaMemoryType attribute value.
var MapAttributeSchemaMemoryType = map[string]AttributeSchemaMemoryType{
"main": AttributeSchemaMemoryTypeMain,
"delta": AttributeSchemaMemoryTypeDelta,
"history_main": AttributeSchemaMemoryTypeHistoryMain,
"history_delta": AttributeSchemaMemoryTypeHistoryDelta,
}
// AttributeSchemaOperationType specifies the value schema_operation_type attribute.
type AttributeSchemaOperationType int
const (
_ AttributeSchemaOperationType = iota
AttributeSchemaOperationTypeRead
AttributeSchemaOperationTypeWrite
AttributeSchemaOperationTypeMerge
)
// String returns the string representation of the AttributeSchemaOperationType.
func (av AttributeSchemaOperationType) String() string {
switch av {
case AttributeSchemaOperationTypeRead:
return "read"
case AttributeSchemaOperationTypeWrite:
return "write"
case AttributeSchemaOperationTypeMerge:
return "merge"
}
return ""
}
// MapAttributeSchemaOperationType is a helper map of string to AttributeSchemaOperationType attribute value.
var MapAttributeSchemaOperationType = map[string]AttributeSchemaOperationType{
"read": AttributeSchemaOperationTypeRead,
"write": AttributeSchemaOperationTypeWrite,
"merge": AttributeSchemaOperationTypeMerge,
}
// AttributeSchemaRecordType specifies the value schema_record_type attribute.
type AttributeSchemaRecordType int
const (
_ AttributeSchemaRecordType = iota
AttributeSchemaRecordTypeMain
AttributeSchemaRecordTypeDelta
AttributeSchemaRecordTypeHistoryMain
AttributeSchemaRecordTypeHistoryDelta
)
// String returns the string representation of the AttributeSchemaRecordType.
func (av AttributeSchemaRecordType) String() string {
switch av {
case AttributeSchemaRecordTypeMain:
return "main"
case AttributeSchemaRecordTypeDelta:
return "delta"
case AttributeSchemaRecordTypeHistoryMain:
return "history_main"
case AttributeSchemaRecordTypeHistoryDelta:
return "history_delta"
}
return ""
}
// MapAttributeSchemaRecordType is a helper map of string to AttributeSchemaRecordType attribute value.
var MapAttributeSchemaRecordType = map[string]AttributeSchemaRecordType{
"main": AttributeSchemaRecordTypeMain,
"delta": AttributeSchemaRecordTypeDelta,
"history_main": AttributeSchemaRecordTypeHistoryMain,
"history_delta": AttributeSchemaRecordTypeHistoryDelta,
}
// AttributeServiceMemoryUsedType specifies the value service_memory_used_type attribute.
type AttributeServiceMemoryUsedType int
const (
_ AttributeServiceMemoryUsedType = iota
AttributeServiceMemoryUsedTypeLogical
AttributeServiceMemoryUsedTypePhysical
)
// String returns the string representation of the AttributeServiceMemoryUsedType.
func (av AttributeServiceMemoryUsedType) String() string {
switch av {
case AttributeServiceMemoryUsedTypeLogical:
return "logical"
case AttributeServiceMemoryUsedTypePhysical:
return "physical"
}
return ""
}
// MapAttributeServiceMemoryUsedType is a helper map of string to AttributeServiceMemoryUsedType attribute value.
var MapAttributeServiceMemoryUsedType = map[string]AttributeServiceMemoryUsedType{
"logical": AttributeServiceMemoryUsedTypeLogical,
"physical": AttributeServiceMemoryUsedTypePhysical,
}
// AttributeServiceStatus specifies the value service_status attribute.
type AttributeServiceStatus int
const (
_ AttributeServiceStatus = iota
AttributeServiceStatusActive
AttributeServiceStatusInactive
)
// String returns the string representation of the AttributeServiceStatus.
func (av AttributeServiceStatus) String() string {
switch av {
case AttributeServiceStatusActive:
return "active"
case AttributeServiceStatusInactive:
return "inactive"
}
return ""
}
// MapAttributeServiceStatus is a helper map of string to AttributeServiceStatus attribute value.
var MapAttributeServiceStatus = map[string]AttributeServiceStatus{
"active": AttributeServiceStatusActive,
"inactive": AttributeServiceStatusInactive,
}
// AttributeThreadStatus specifies the value thread_status attribute.
type AttributeThreadStatus int
const (
_ AttributeThreadStatus = iota
AttributeThreadStatusActive
AttributeThreadStatusInactive
)
// String returns the string representation of the AttributeThreadStatus.
func (av AttributeThreadStatus) String() string {
switch av {
case AttributeThreadStatusActive:
return "active"
case AttributeThreadStatusInactive:
return "inactive"
}
return ""
}
// MapAttributeThreadStatus is a helper map of string to AttributeThreadStatus attribute value.
var MapAttributeThreadStatus = map[string]AttributeThreadStatus{
"active": AttributeThreadStatusActive,
"inactive": AttributeThreadStatusInactive,
}
// AttributeTransactionType specifies the value transaction_type attribute.
type AttributeTransactionType int
const (
_ AttributeTransactionType = iota
AttributeTransactionTypeUpdate
AttributeTransactionTypeCommit
AttributeTransactionTypeRollback
)
// String returns the string representation of the AttributeTransactionType.
func (av AttributeTransactionType) String() string {
switch av {
case AttributeTransactionTypeUpdate:
return "update"
case AttributeTransactionTypeCommit:
return "commit"
case AttributeTransactionTypeRollback:
return "rollback"
}
return ""
}
// MapAttributeTransactionType is a helper map of string to AttributeTransactionType attribute value.
var MapAttributeTransactionType = map[string]AttributeTransactionType{
"update": AttributeTransactionTypeUpdate,
"commit": AttributeTransactionTypeCommit,
"rollback": AttributeTransactionTypeRollback,
}
// AttributeVolumeOperationType specifies the value volume_operation_type attribute.
type AttributeVolumeOperationType int
const (
_ AttributeVolumeOperationType = iota
AttributeVolumeOperationTypeRead
AttributeVolumeOperationTypeWrite
)
// String returns the string representation of the AttributeVolumeOperationType.
func (av AttributeVolumeOperationType) String() string {
switch av {
case AttributeVolumeOperationTypeRead:
return "read"
case AttributeVolumeOperationTypeWrite:
return "write"
}
return ""
}
// MapAttributeVolumeOperationType is a helper map of string to AttributeVolumeOperationType attribute value.
var MapAttributeVolumeOperationType = map[string]AttributeVolumeOperationType{
"read": AttributeVolumeOperationTypeRead,
"write": AttributeVolumeOperationTypeWrite,
}
var MetricsInfo = metricsInfo{
SaphanaAlertCount: metricInfo{
Name: "saphana.alert.count",
},
SaphanaBackupLatest: metricInfo{
Name: "saphana.backup.latest",
},
SaphanaColumnMemoryUsed: metricInfo{
Name: "saphana.column.memory.used",
},
SaphanaComponentMemoryUsed: metricInfo{
Name: "saphana.component.memory.used",
},
SaphanaConnectionCount: metricInfo{
Name: "saphana.connection.count",
},
SaphanaCPUUsed: metricInfo{
Name: "saphana.cpu.used",
},
SaphanaDiskSizeCurrent: metricInfo{
Name: "saphana.disk.size.current",
},
SaphanaHostMemoryCurrent: metricInfo{
Name: "saphana.host.memory.current",
},
SaphanaHostSwapCurrent: metricInfo{
Name: "saphana.host.swap.current",
},
SaphanaInstanceCodeSize: metricInfo{
Name: "saphana.instance.code_size",
},
SaphanaInstanceMemoryCurrent: metricInfo{
Name: "saphana.instance.memory.current",
},
SaphanaInstanceMemorySharedAllocated: metricInfo{
Name: "saphana.instance.memory.shared.allocated",
},
SaphanaInstanceMemoryUsedPeak: metricInfo{
Name: "saphana.instance.memory.used.peak",
},
SaphanaLicenseExpirationTime: metricInfo{
Name: "saphana.license.expiration.time",
},
SaphanaLicenseLimit: metricInfo{
Name: "saphana.license.limit",
},
SaphanaLicensePeak: metricInfo{
Name: "saphana.license.peak",
},
SaphanaNetworkRequestAverageTime: metricInfo{
Name: "saphana.network.request.average_time",
},
SaphanaNetworkRequestCount: metricInfo{
Name: "saphana.network.request.count",
},
SaphanaNetworkRequestFinishedCount: metricInfo{
Name: "saphana.network.request.finished.count",
},
SaphanaReplicationAverageTime: metricInfo{
Name: "saphana.replication.average_time",
},
SaphanaReplicationBacklogSize: metricInfo{
Name: "saphana.replication.backlog.size",
},
SaphanaReplicationBacklogTime: metricInfo{
Name: "saphana.replication.backlog.time",
},
SaphanaRowStoreMemoryUsed: metricInfo{
Name: "saphana.row_store.memory.used",
},
SaphanaSchemaMemoryUsedCurrent: metricInfo{
Name: "saphana.schema.memory.used.current",
},
SaphanaSchemaMemoryUsedMax: metricInfo{
Name: "saphana.schema.memory.used.max",
},
SaphanaSchemaOperationCount: metricInfo{
Name: "saphana.schema.operation.count",
},
SaphanaSchemaRecordCompressedCount: metricInfo{
Name: "saphana.schema.record.compressed.count",
},
SaphanaSchemaRecordCount: metricInfo{
Name: "saphana.schema.record.count",
},
SaphanaServiceCodeSize: metricInfo{
Name: "saphana.service.code_size",
},
SaphanaServiceCount: metricInfo{
Name: "saphana.service.count",
},
SaphanaServiceMemoryCompactorsAllocated: metricInfo{
Name: "saphana.service.memory.compactors.allocated",
},
SaphanaServiceMemoryCompactorsFreeable: metricInfo{
Name: "saphana.service.memory.compactors.freeable",
},
SaphanaServiceMemoryEffectiveLimit: metricInfo{
Name: "saphana.service.memory.effective_limit",
},
SaphanaServiceMemoryHeapCurrent: metricInfo{
Name: "saphana.service.memory.heap.current",
},
SaphanaServiceMemoryLimit: metricInfo{
Name: "saphana.service.memory.limit",
},
SaphanaServiceMemorySharedCurrent: metricInfo{
Name: "saphana.service.memory.shared.current",
},
SaphanaServiceMemoryUsed: metricInfo{
Name: "saphana.service.memory.used",
},
SaphanaServiceStackSize: metricInfo{
Name: "saphana.service.stack_size",
},
SaphanaServiceThreadCount: metricInfo{
Name: "saphana.service.thread.count",
},
SaphanaTransactionBlocked: metricInfo{
Name: "saphana.transaction.blocked",
},
SaphanaTransactionCount: metricInfo{
Name: "saphana.transaction.count",
},
SaphanaUptime: metricInfo{
Name: "saphana.uptime",
},
SaphanaVolumeOperationCount: metricInfo{
Name: "saphana.volume.operation.count",
},
SaphanaVolumeOperationSize: metricInfo{
Name: "saphana.volume.operation.size",
},
SaphanaVolumeOperationTime: metricInfo{
Name: "saphana.volume.operation.time",
},
}
type metricsInfo struct {
SaphanaAlertCount metricInfo
SaphanaBackupLatest metricInfo
SaphanaColumnMemoryUsed metricInfo
SaphanaComponentMemoryUsed metricInfo
SaphanaConnectionCount metricInfo
SaphanaCPUUsed metricInfo
SaphanaDiskSizeCurrent metricInfo
SaphanaHostMemoryCurrent metricInfo
SaphanaHostSwapCurrent metricInfo
SaphanaInstanceCodeSize metricInfo
SaphanaInstanceMemoryCurrent metricInfo
SaphanaInstanceMemorySharedAllocated metricInfo
SaphanaInstanceMemoryUsedPeak metricInfo
SaphanaLicenseExpirationTime metricInfo
SaphanaLicenseLimit metricInfo
SaphanaLicensePeak metricInfo
SaphanaNetworkRequestAverageTime metricInfo
SaphanaNetworkRequestCount metricInfo
SaphanaNetworkRequestFinishedCount metricInfo
SaphanaReplicationAverageTime metricInfo
SaphanaReplicationBacklogSize metricInfo
SaphanaReplicationBacklogTime metricInfo
SaphanaRowStoreMemoryUsed metricInfo
SaphanaSchemaMemoryUsedCurrent metricInfo
SaphanaSchemaMemoryUsedMax metricInfo
SaphanaSchemaOperationCount metricInfo
SaphanaSchemaRecordCompressedCount metricInfo
SaphanaSchemaRecordCount metricInfo
SaphanaServiceCodeSize metricInfo
SaphanaServiceCount metricInfo
SaphanaServiceMemoryCompactorsAllocated metricInfo
SaphanaServiceMemoryCompactorsFreeable metricInfo
SaphanaServiceMemoryEffectiveLimit metricInfo
SaphanaServiceMemoryHeapCurrent metricInfo
SaphanaServiceMemoryLimit metricInfo
SaphanaServiceMemorySharedCurrent metricInfo
SaphanaServiceMemoryUsed metricInfo
SaphanaServiceStackSize metricInfo
SaphanaServiceThreadCount metricInfo
SaphanaTransactionBlocked metricInfo
SaphanaTransactionCount metricInfo
SaphanaUptime metricInfo
SaphanaVolumeOperationCount metricInfo
SaphanaVolumeOperationSize metricInfo
SaphanaVolumeOperationTime metricInfo
}
type metricInfo struct {
Name string
}
type metricSaphanaAlertCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.alert.count metric with initial data.
func (m *metricSaphanaAlertCount) init() {
m.data.SetName("saphana.alert.count")
m.data.SetDescription("Number of current alerts.")
m.data.SetUnit("{alerts}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaAlertCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, alertRatingAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("rating", alertRatingAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaAlertCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaAlertCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaAlertCount(cfg MetricConfig) metricSaphanaAlertCount {
m := metricSaphanaAlertCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaBackupLatest struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.backup.latest metric with initial data.
func (m *metricSaphanaBackupLatest) init() {
m.data.SetName("saphana.backup.latest")
m.data.SetDescription("The age of the latest backup by start time.")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
}
func (m *metricSaphanaBackupLatest) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaBackupLatest) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaBackupLatest) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaBackupLatest(cfg MetricConfig) metricSaphanaBackupLatest {
m := metricSaphanaBackupLatest{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaColumnMemoryUsed struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.column.memory.used metric with initial data.
func (m *metricSaphanaColumnMemoryUsed) init() {
m.data.SetName("saphana.column.memory.used")
m.data.SetDescription("The memory used in all columns.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaColumnMemoryUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, columnMemoryTypeAttributeValue string, columnMemorySubtypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("type", columnMemoryTypeAttributeValue)
dp.Attributes().PutStr("subtype", columnMemorySubtypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaColumnMemoryUsed) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaColumnMemoryUsed) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaColumnMemoryUsed(cfg MetricConfig) metricSaphanaColumnMemoryUsed {
m := metricSaphanaColumnMemoryUsed{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaComponentMemoryUsed struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.component.memory.used metric with initial data.
func (m *metricSaphanaComponentMemoryUsed) init() {
m.data.SetName("saphana.component.memory.used")
m.data.SetDescription("The memory used in components.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaComponentMemoryUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, componentAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("component", componentAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaComponentMemoryUsed) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaComponentMemoryUsed) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaComponentMemoryUsed(cfg MetricConfig) metricSaphanaComponentMemoryUsed {
m := metricSaphanaComponentMemoryUsed{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaConnectionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.connection.count metric with initial data.
func (m *metricSaphanaConnectionCount) init() {
m.data.SetName("saphana.connection.count")
m.data.SetDescription("The number of current connections.")
m.data.SetUnit("{connections}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, connectionStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", connectionStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaConnectionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaConnectionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaConnectionCount(cfg MetricConfig) metricSaphanaConnectionCount {
m := metricSaphanaConnectionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaCPUUsed struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.cpu.used metric with initial data.
func (m *metricSaphanaCPUUsed) init() {
m.data.SetName("saphana.cpu.used")
m.data.SetDescription("Total CPU time spent.")
m.data.SetUnit("ms")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaCPUUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, cpuTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("type", cpuTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaCPUUsed) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaCPUUsed) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaCPUUsed(cfg MetricConfig) metricSaphanaCPUUsed {
m := metricSaphanaCPUUsed{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaDiskSizeCurrent struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.disk.size.current metric with initial data.
func (m *metricSaphanaDiskSizeCurrent) init() {
m.data.SetName("saphana.disk.size.current")
m.data.SetDescription("The disk size.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaDiskSizeCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, pathAttributeValue string, diskUsageTypeAttributeValue string, diskStateUsedFreeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("path", pathAttributeValue)
dp.Attributes().PutStr("usage_type", diskUsageTypeAttributeValue)
dp.Attributes().PutStr("state", diskStateUsedFreeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaDiskSizeCurrent) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaDiskSizeCurrent) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaDiskSizeCurrent(cfg MetricConfig) metricSaphanaDiskSizeCurrent {
m := metricSaphanaDiskSizeCurrent{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaHostMemoryCurrent struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.host.memory.current metric with initial data.
func (m *metricSaphanaHostMemoryCurrent) init() {
m.data.SetName("saphana.host.memory.current")
m.data.SetDescription("The amount of physical memory on the host.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaHostMemoryCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, memoryStateUsedFreeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("state", memoryStateUsedFreeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaHostMemoryCurrent) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaHostMemoryCurrent) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaHostMemoryCurrent(cfg MetricConfig) metricSaphanaHostMemoryCurrent {
m := metricSaphanaHostMemoryCurrent{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaHostSwapCurrent struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.host.swap.current metric with initial data.
func (m *metricSaphanaHostSwapCurrent) init() {
m.data.SetName("saphana.host.swap.current")
m.data.SetDescription("The amount of swap space on the host.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaHostSwapCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, hostSwapStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("state", hostSwapStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaHostSwapCurrent) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaHostSwapCurrent) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaHostSwapCurrent(cfg MetricConfig) metricSaphanaHostSwapCurrent {
m := metricSaphanaHostSwapCurrent{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaInstanceCodeSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.instance.code_size metric with initial data.
func (m *metricSaphanaInstanceCodeSize) init() {
m.data.SetName("saphana.instance.code_size")
m.data.SetDescription("The instance code size, including shared libraries of SAP HANA processes.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricSaphanaInstanceCodeSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaInstanceCodeSize) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaInstanceCodeSize) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaInstanceCodeSize(cfg MetricConfig) metricSaphanaInstanceCodeSize {
m := metricSaphanaInstanceCodeSize{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaInstanceMemoryCurrent struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.instance.memory.current metric with initial data.
func (m *metricSaphanaInstanceMemoryCurrent) init() {
m.data.SetName("saphana.instance.memory.current")
m.data.SetDescription("The size of the memory pool for all SAP HANA processes.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaInstanceMemoryCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, memoryStateUsedFreeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("state", memoryStateUsedFreeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaInstanceMemoryCurrent) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaInstanceMemoryCurrent) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaInstanceMemoryCurrent(cfg MetricConfig) metricSaphanaInstanceMemoryCurrent {
m := metricSaphanaInstanceMemoryCurrent{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaInstanceMemorySharedAllocated struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.instance.memory.shared.allocated metric with initial data.
func (m *metricSaphanaInstanceMemorySharedAllocated) init() {
m.data.SetName("saphana.instance.memory.shared.allocated")
m.data.SetDescription("The shared memory size of SAP HANA processes.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricSaphanaInstanceMemorySharedAllocated) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaInstanceMemorySharedAllocated) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaInstanceMemorySharedAllocated) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaInstanceMemorySharedAllocated(cfg MetricConfig) metricSaphanaInstanceMemorySharedAllocated {
m := metricSaphanaInstanceMemorySharedAllocated{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaInstanceMemoryUsedPeak struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.instance.memory.used.peak metric with initial data.
func (m *metricSaphanaInstanceMemoryUsedPeak) init() {
m.data.SetName("saphana.instance.memory.used.peak")
m.data.SetDescription("The peak memory from the memory pool used by SAP HANA processes since the instance started (this is a sample-based value).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricSaphanaInstanceMemoryUsedPeak) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaInstanceMemoryUsedPeak) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaInstanceMemoryUsedPeak) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaInstanceMemoryUsedPeak(cfg MetricConfig) metricSaphanaInstanceMemoryUsedPeak {
m := metricSaphanaInstanceMemoryUsedPeak{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaLicenseExpirationTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.license.expiration.time metric with initial data.
func (m *metricSaphanaLicenseExpirationTime) init() {
m.data.SetName("saphana.license.expiration.time")
m.data.SetDescription("The amount of time remaining before license expiration.")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaLicenseExpirationTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, systemAttributeValue string, productAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("system", systemAttributeValue)
dp.Attributes().PutStr("product", productAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaLicenseExpirationTime) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaLicenseExpirationTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaLicenseExpirationTime(cfg MetricConfig) metricSaphanaLicenseExpirationTime {
m := metricSaphanaLicenseExpirationTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaLicenseLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.license.limit metric with initial data.
func (m *metricSaphanaLicenseLimit) init() {
m.data.SetName("saphana.license.limit")
m.data.SetDescription("The allowed product usage as specified by the license (for example, main memory).")
m.data.SetUnit("{licenses}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaLicenseLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, systemAttributeValue string, productAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("system", systemAttributeValue)
dp.Attributes().PutStr("product", productAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaLicenseLimit) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaLicenseLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaLicenseLimit(cfg MetricConfig) metricSaphanaLicenseLimit {
m := metricSaphanaLicenseLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaLicensePeak struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.license.peak metric with initial data.
func (m *metricSaphanaLicensePeak) init() {
m.data.SetName("saphana.license.peak")
m.data.SetDescription("The peak product usage value during last 13 months, measured periodically.")
m.data.SetUnit("{licenses}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaLicensePeak) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, systemAttributeValue string, productAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("system", systemAttributeValue)
dp.Attributes().PutStr("product", productAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaLicensePeak) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaLicensePeak) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaLicensePeak(cfg MetricConfig) metricSaphanaLicensePeak {
m := metricSaphanaLicensePeak{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaNetworkRequestAverageTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.network.request.average_time metric with initial data.
func (m *metricSaphanaNetworkRequestAverageTime) init() {
m.data.SetName("saphana.network.request.average_time")
m.data.SetDescription("The average response time calculated over recent requests")
m.data.SetUnit("ms")
m.data.SetEmptyGauge()
}
func (m *metricSaphanaNetworkRequestAverageTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaNetworkRequestAverageTime) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaNetworkRequestAverageTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaNetworkRequestAverageTime(cfg MetricConfig) metricSaphanaNetworkRequestAverageTime {
m := metricSaphanaNetworkRequestAverageTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaNetworkRequestCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.network.request.count metric with initial data.
func (m *metricSaphanaNetworkRequestCount) init() {
m.data.SetName("saphana.network.request.count")
m.data.SetDescription("The number of active and pending service requests.")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaNetworkRequestCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, activePendingRequestStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("state", activePendingRequestStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaNetworkRequestCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaNetworkRequestCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaNetworkRequestCount(cfg MetricConfig) metricSaphanaNetworkRequestCount {
m := metricSaphanaNetworkRequestCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaNetworkRequestFinishedCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.network.request.finished.count metric with initial data.
func (m *metricSaphanaNetworkRequestFinishedCount) init() {
m.data.SetName("saphana.network.request.finished.count")
m.data.SetDescription("The number of service requests that have completed.")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaNetworkRequestFinishedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, internalExternalRequestTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("type", internalExternalRequestTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaNetworkRequestFinishedCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaNetworkRequestFinishedCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaNetworkRequestFinishedCount(cfg MetricConfig) metricSaphanaNetworkRequestFinishedCount {
m := metricSaphanaNetworkRequestFinishedCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaReplicationAverageTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.replication.average_time metric with initial data.
func (m *metricSaphanaReplicationAverageTime) init() {
m.data.SetName("saphana.replication.average_time")
m.data.SetDescription("The average amount of time consumed replicating a log.")
m.data.SetUnit("us")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaReplicationAverageTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, primaryHostAttributeValue string, secondaryHostAttributeValue string, portAttributeValue string, replicationModeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("primary", primaryHostAttributeValue)
dp.Attributes().PutStr("secondary", secondaryHostAttributeValue)
dp.Attributes().PutStr("port", portAttributeValue)
dp.Attributes().PutStr("mode", replicationModeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaReplicationAverageTime) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaReplicationAverageTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaReplicationAverageTime(cfg MetricConfig) metricSaphanaReplicationAverageTime {
m := metricSaphanaReplicationAverageTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaReplicationBacklogSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.replication.backlog.size metric with initial data.
func (m *metricSaphanaReplicationBacklogSize) init() {
m.data.SetName("saphana.replication.backlog.size")
m.data.SetDescription("The current replication backlog size.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaReplicationBacklogSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, primaryHostAttributeValue string, secondaryHostAttributeValue string, portAttributeValue string, replicationModeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("primary", primaryHostAttributeValue)
dp.Attributes().PutStr("secondary", secondaryHostAttributeValue)
dp.Attributes().PutStr("port", portAttributeValue)
dp.Attributes().PutStr("mode", replicationModeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaReplicationBacklogSize) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaReplicationBacklogSize) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaReplicationBacklogSize(cfg MetricConfig) metricSaphanaReplicationBacklogSize {
m := metricSaphanaReplicationBacklogSize{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaReplicationBacklogTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.replication.backlog.time metric with initial data.
func (m *metricSaphanaReplicationBacklogTime) init() {
m.data.SetName("saphana.replication.backlog.time")
m.data.SetDescription("The current replication backlog.")
m.data.SetUnit("us")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaReplicationBacklogTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, primaryHostAttributeValue string, secondaryHostAttributeValue string, portAttributeValue string, replicationModeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("primary", primaryHostAttributeValue)
dp.Attributes().PutStr("secondary", secondaryHostAttributeValue)
dp.Attributes().PutStr("port", portAttributeValue)
dp.Attributes().PutStr("mode", replicationModeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaReplicationBacklogTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaReplicationBacklogTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaReplicationBacklogTime(cfg MetricConfig) metricSaphanaReplicationBacklogTime {
m := metricSaphanaReplicationBacklogTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaRowStoreMemoryUsed struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.row_store.memory.used metric with initial data.
func (m *metricSaphanaRowStoreMemoryUsed) init() {
m.data.SetName("saphana.row_store.memory.used")
m.data.SetDescription("The used memory for all row tables.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaRowStoreMemoryUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, rowMemoryTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("type", rowMemoryTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaRowStoreMemoryUsed) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaRowStoreMemoryUsed) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaRowStoreMemoryUsed(cfg MetricConfig) metricSaphanaRowStoreMemoryUsed {
m := metricSaphanaRowStoreMemoryUsed{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaSchemaMemoryUsedCurrent struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.schema.memory.used.current metric with initial data.
func (m *metricSaphanaSchemaMemoryUsedCurrent) init() {
m.data.SetName("saphana.schema.memory.used.current")
m.data.SetDescription("The memory size for all tables in schema.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaSchemaMemoryUsedCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string, schemaMemoryTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("type", schemaMemoryTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaSchemaMemoryUsedCurrent) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaSchemaMemoryUsedCurrent) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaSchemaMemoryUsedCurrent(cfg MetricConfig) metricSaphanaSchemaMemoryUsedCurrent {
m := metricSaphanaSchemaMemoryUsedCurrent{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaSchemaMemoryUsedMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.schema.memory.used.max metric with initial data.
func (m *metricSaphanaSchemaMemoryUsedMax) init() {
m.data.SetName("saphana.schema.memory.used.max")
m.data.SetDescription("The estimated maximum memory consumption for all fully loaded tables in schema (data for open transactions is not included).")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaSchemaMemoryUsedMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaSchemaMemoryUsedMax) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaSchemaMemoryUsedMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaSchemaMemoryUsedMax(cfg MetricConfig) metricSaphanaSchemaMemoryUsedMax {
m := metricSaphanaSchemaMemoryUsedMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaSchemaOperationCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.schema.operation.count metric with initial data.
func (m *metricSaphanaSchemaOperationCount) init() {
m.data.SetName("saphana.schema.operation.count")
m.data.SetDescription("The number of operations done on all tables in schema.")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaSchemaOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string, schemaOperationTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("type", schemaOperationTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaSchemaOperationCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaSchemaOperationCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaSchemaOperationCount(cfg MetricConfig) metricSaphanaSchemaOperationCount {
m := metricSaphanaSchemaOperationCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaSchemaRecordCompressedCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.schema.record.compressed.count metric with initial data.
func (m *metricSaphanaSchemaRecordCompressedCount) init() {
m.data.SetName("saphana.schema.record.compressed.count")
m.data.SetDescription("The number of entries in main during the last optimize compression run for all tables in schema.")
m.data.SetUnit("{records}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaSchemaRecordCompressedCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaSchemaRecordCompressedCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaSchemaRecordCompressedCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaSchemaRecordCompressedCount(cfg MetricConfig) metricSaphanaSchemaRecordCompressedCount {
m := metricSaphanaSchemaRecordCompressedCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaSchemaRecordCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.schema.record.count metric with initial data.
func (m *metricSaphanaSchemaRecordCount) init() {
m.data.SetName("saphana.schema.record.count")
m.data.SetDescription("The number of records for all tables in schema.")
m.data.SetUnit("{records}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaSchemaRecordCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaAttributeValue string, schemaRecordTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema", schemaAttributeValue)
dp.Attributes().PutStr("type", schemaRecordTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaSchemaRecordCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaSchemaRecordCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaSchemaRecordCount(cfg MetricConfig) metricSaphanaSchemaRecordCount {
m := metricSaphanaSchemaRecordCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceCodeSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.code_size metric with initial data.
func (m *metricSaphanaServiceCodeSize) init() {
m.data.SetName("saphana.service.code_size")
m.data.SetDescription("The service code size, including shared libraries.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceCodeSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serviceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("service", serviceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceCodeSize) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceCodeSize) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceCodeSize(cfg MetricConfig) metricSaphanaServiceCodeSize {
m := metricSaphanaServiceCodeSize{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.count metric with initial data.
func (m *metricSaphanaServiceCount) init() {
m.data.SetName("saphana.service.count")
m.data.SetDescription("The number of services in a given status.")
m.data.SetUnit("{services}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serviceStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", serviceStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceCount(cfg MetricConfig) metricSaphanaServiceCount {
m := metricSaphanaServiceCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceMemoryCompactorsAllocated struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.memory.compactors.allocated metric with initial data.
func (m *metricSaphanaServiceMemoryCompactorsAllocated) init() {
m.data.SetName("saphana.service.memory.compactors.allocated")
m.data.SetDescription("The part of the memory pool that can potentially (if unpinned) be freed during a memory shortage.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceMemoryCompactorsAllocated) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serviceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("service", serviceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceMemoryCompactorsAllocated) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceMemoryCompactorsAllocated) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceMemoryCompactorsAllocated(cfg MetricConfig) metricSaphanaServiceMemoryCompactorsAllocated {
m := metricSaphanaServiceMemoryCompactorsAllocated{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceMemoryCompactorsFreeable struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.memory.compactors.freeable metric with initial data.
func (m *metricSaphanaServiceMemoryCompactorsFreeable) init() {
m.data.SetName("saphana.service.memory.compactors.freeable")
m.data.SetDescription("The memory that can be freed during a memory shortage.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceMemoryCompactorsFreeable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serviceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("service", serviceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceMemoryCompactorsFreeable) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceMemoryCompactorsFreeable) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceMemoryCompactorsFreeable(cfg MetricConfig) metricSaphanaServiceMemoryCompactorsFreeable {
m := metricSaphanaServiceMemoryCompactorsFreeable{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceMemoryEffectiveLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.memory.effective_limit metric with initial data.
func (m *metricSaphanaServiceMemoryEffectiveLimit) init() {
m.data.SetName("saphana.service.memory.effective_limit")
m.data.SetDescription("The effective maximum memory pool size, calculated considering the pool sizes of other processes.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceMemoryEffectiveLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serviceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("service", serviceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceMemoryEffectiveLimit) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceMemoryEffectiveLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceMemoryEffectiveLimit(cfg MetricConfig) metricSaphanaServiceMemoryEffectiveLimit {
m := metricSaphanaServiceMemoryEffectiveLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceMemoryHeapCurrent struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.memory.heap.current metric with initial data.
func (m *metricSaphanaServiceMemoryHeapCurrent) init() {
m.data.SetName("saphana.service.memory.heap.current")
m.data.SetDescription("The size of the heap portion of the memory pool.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceMemoryHeapCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serviceAttributeValue string, memoryStateUsedFreeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("service", serviceAttributeValue)
dp.Attributes().PutStr("state", memoryStateUsedFreeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceMemoryHeapCurrent) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceMemoryHeapCurrent) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceMemoryHeapCurrent(cfg MetricConfig) metricSaphanaServiceMemoryHeapCurrent {
m := metricSaphanaServiceMemoryHeapCurrent{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceMemoryLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.memory.limit metric with initial data.
func (m *metricSaphanaServiceMemoryLimit) init() {
m.data.SetName("saphana.service.memory.limit")
m.data.SetDescription("The configured maximum memory pool size.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceMemoryLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serviceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("service", serviceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceMemoryLimit) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceMemoryLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceMemoryLimit(cfg MetricConfig) metricSaphanaServiceMemoryLimit {
m := metricSaphanaServiceMemoryLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceMemorySharedCurrent struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.memory.shared.current metric with initial data.
func (m *metricSaphanaServiceMemorySharedCurrent) init() {
m.data.SetName("saphana.service.memory.shared.current")
m.data.SetDescription("The size of the shared portion of the memory pool.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceMemorySharedCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serviceAttributeValue string, memoryStateUsedFreeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("service", serviceAttributeValue)
dp.Attributes().PutStr("state", memoryStateUsedFreeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceMemorySharedCurrent) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceMemorySharedCurrent) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceMemorySharedCurrent(cfg MetricConfig) metricSaphanaServiceMemorySharedCurrent {
m := metricSaphanaServiceMemorySharedCurrent{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceMemoryUsed struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.memory.used metric with initial data.
func (m *metricSaphanaServiceMemoryUsed) init() {
m.data.SetName("saphana.service.memory.used")
m.data.SetDescription("The used memory from the operating system perspective.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceMemoryUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serviceAttributeValue string, serviceMemoryUsedTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("service", serviceAttributeValue)
dp.Attributes().PutStr("type", serviceMemoryUsedTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceMemoryUsed) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceMemoryUsed) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceMemoryUsed(cfg MetricConfig) metricSaphanaServiceMemoryUsed {
m := metricSaphanaServiceMemoryUsed{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceStackSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.stack_size metric with initial data.
func (m *metricSaphanaServiceStackSize) init() {
m.data.SetName("saphana.service.stack_size")
m.data.SetDescription("The service stack size.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceStackSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serviceAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("service", serviceAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceStackSize) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceStackSize) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceStackSize(cfg MetricConfig) metricSaphanaServiceStackSize {
m := metricSaphanaServiceStackSize{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaServiceThreadCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.service.thread.count metric with initial data.
func (m *metricSaphanaServiceThreadCount) init() {
m.data.SetName("saphana.service.thread.count")
m.data.SetDescription("The number of service threads in a given status.")
m.data.SetUnit("{threads}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaServiceThreadCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, threadStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("status", threadStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaServiceThreadCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaServiceThreadCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaServiceThreadCount(cfg MetricConfig) metricSaphanaServiceThreadCount {
m := metricSaphanaServiceThreadCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaTransactionBlocked struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.transaction.blocked metric with initial data.
func (m *metricSaphanaTransactionBlocked) init() {
m.data.SetName("saphana.transaction.blocked")
m.data.SetDescription("The number of transactions waiting for a lock.")
m.data.SetUnit("{transactions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricSaphanaTransactionBlocked) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaTransactionBlocked) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaTransactionBlocked) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaTransactionBlocked(cfg MetricConfig) metricSaphanaTransactionBlocked {
m := metricSaphanaTransactionBlocked{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaTransactionCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.transaction.count metric with initial data.
func (m *metricSaphanaTransactionCount) init() {
m.data.SetName("saphana.transaction.count")
m.data.SetDescription("The number of transactions.")
m.data.SetUnit("{transactions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaTransactionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, transactionTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("type", transactionTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaTransactionCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaTransactionCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaTransactionCount(cfg MetricConfig) metricSaphanaTransactionCount {
m := metricSaphanaTransactionCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaUptime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.uptime metric with initial data.
func (m *metricSaphanaUptime) init() {
m.data.SetName("saphana.uptime")
m.data.SetDescription("The uptime of the database.")
m.data.SetUnit("s")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaUptime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, systemAttributeValue string, databaseAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("system", systemAttributeValue)
dp.Attributes().PutStr("database", databaseAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaUptime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaUptime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaUptime(cfg MetricConfig) metricSaphanaUptime {
m := metricSaphanaUptime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaVolumeOperationCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.volume.operation.count metric with initial data.
func (m *metricSaphanaVolumeOperationCount) init() {
m.data.SetName("saphana.volume.operation.count")
m.data.SetDescription("The number of operations executed.")
m.data.SetUnit("{operations}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaVolumeOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("path", pathAttributeValue)
dp.Attributes().PutStr("usage_type", diskUsageTypeAttributeValue)
dp.Attributes().PutStr("type", volumeOperationTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaVolumeOperationCount) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaVolumeOperationCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaVolumeOperationCount(cfg MetricConfig) metricSaphanaVolumeOperationCount {
m := metricSaphanaVolumeOperationCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaVolumeOperationSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.volume.operation.size metric with initial data.
func (m *metricSaphanaVolumeOperationSize) init() {
m.data.SetName("saphana.volume.operation.size")
m.data.SetDescription("The size of operations executed.")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaVolumeOperationSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("path", pathAttributeValue)
dp.Attributes().PutStr("usage_type", diskUsageTypeAttributeValue)
dp.Attributes().PutStr("type", volumeOperationTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaVolumeOperationSize) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaVolumeOperationSize) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaVolumeOperationSize(cfg MetricConfig) metricSaphanaVolumeOperationSize {
m := metricSaphanaVolumeOperationSize{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSaphanaVolumeOperationTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills saphana.volume.operation.time metric with initial data.
func (m *metricSaphanaVolumeOperationTime) init() {
m.data.SetName("saphana.volume.operation.time")
m.data.SetDescription("The time spent executing operations.")
m.data.SetUnit("ms")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSaphanaVolumeOperationTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("path", pathAttributeValue)
dp.Attributes().PutStr("usage_type", diskUsageTypeAttributeValue)
dp.Attributes().PutStr("type", volumeOperationTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSaphanaVolumeOperationTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSaphanaVolumeOperationTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSaphanaVolumeOperationTime(cfg MetricConfig) metricSaphanaVolumeOperationTime {
m := metricSaphanaVolumeOperationTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user config.
type MetricsBuilder struct {
config MetricsBuilderConfig // config of the metrics builder.
startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
metricsCapacity int // maximum observed number of metrics per resource.
metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
buildInfo component.BuildInfo // contains version information.
resourceAttributeIncludeFilter map[string]filter.Filter
resourceAttributeExcludeFilter map[string]filter.Filter
metricSaphanaAlertCount metricSaphanaAlertCount
metricSaphanaBackupLatest metricSaphanaBackupLatest
metricSaphanaColumnMemoryUsed metricSaphanaColumnMemoryUsed
metricSaphanaComponentMemoryUsed metricSaphanaComponentMemoryUsed
metricSaphanaConnectionCount metricSaphanaConnectionCount
metricSaphanaCPUUsed metricSaphanaCPUUsed
metricSaphanaDiskSizeCurrent metricSaphanaDiskSizeCurrent
metricSaphanaHostMemoryCurrent metricSaphanaHostMemoryCurrent
metricSaphanaHostSwapCurrent metricSaphanaHostSwapCurrent
metricSaphanaInstanceCodeSize metricSaphanaInstanceCodeSize
metricSaphanaInstanceMemoryCurrent metricSaphanaInstanceMemoryCurrent
metricSaphanaInstanceMemorySharedAllocated metricSaphanaInstanceMemorySharedAllocated
metricSaphanaInstanceMemoryUsedPeak metricSaphanaInstanceMemoryUsedPeak
metricSaphanaLicenseExpirationTime metricSaphanaLicenseExpirationTime
metricSaphanaLicenseLimit metricSaphanaLicenseLimit
metricSaphanaLicensePeak metricSaphanaLicensePeak
metricSaphanaNetworkRequestAverageTime metricSaphanaNetworkRequestAverageTime
metricSaphanaNetworkRequestCount metricSaphanaNetworkRequestCount
metricSaphanaNetworkRequestFinishedCount metricSaphanaNetworkRequestFinishedCount
metricSaphanaReplicationAverageTime metricSaphanaReplicationAverageTime
metricSaphanaReplicationBacklogSize metricSaphanaReplicationBacklogSize
metricSaphanaReplicationBacklogTime metricSaphanaReplicationBacklogTime
metricSaphanaRowStoreMemoryUsed metricSaphanaRowStoreMemoryUsed
metricSaphanaSchemaMemoryUsedCurrent metricSaphanaSchemaMemoryUsedCurrent
metricSaphanaSchemaMemoryUsedMax metricSaphanaSchemaMemoryUsedMax
metricSaphanaSchemaOperationCount metricSaphanaSchemaOperationCount
metricSaphanaSchemaRecordCompressedCount metricSaphanaSchemaRecordCompressedCount
metricSaphanaSchemaRecordCount metricSaphanaSchemaRecordCount
metricSaphanaServiceCodeSize metricSaphanaServiceCodeSize
metricSaphanaServiceCount metricSaphanaServiceCount
metricSaphanaServiceMemoryCompactorsAllocated metricSaphanaServiceMemoryCompactorsAllocated
metricSaphanaServiceMemoryCompactorsFreeable metricSaphanaServiceMemoryCompactorsFreeable
metricSaphanaServiceMemoryEffectiveLimit metricSaphanaServiceMemoryEffectiveLimit
metricSaphanaServiceMemoryHeapCurrent metricSaphanaServiceMemoryHeapCurrent
metricSaphanaServiceMemoryLimit metricSaphanaServiceMemoryLimit
metricSaphanaServiceMemorySharedCurrent metricSaphanaServiceMemorySharedCurrent
metricSaphanaServiceMemoryUsed metricSaphanaServiceMemoryUsed
metricSaphanaServiceStackSize metricSaphanaServiceStackSize
metricSaphanaServiceThreadCount metricSaphanaServiceThreadCount
metricSaphanaTransactionBlocked metricSaphanaTransactionBlocked
metricSaphanaTransactionCount metricSaphanaTransactionCount
metricSaphanaUptime metricSaphanaUptime
metricSaphanaVolumeOperationCount metricSaphanaVolumeOperationCount
metricSaphanaVolumeOperationSize metricSaphanaVolumeOperationSize
metricSaphanaVolumeOperationTime metricSaphanaVolumeOperationTime
}
// MetricBuilderOption applies changes to default metrics builder.
type MetricBuilderOption interface {
apply(*MetricsBuilder)
}
type metricBuilderOptionFunc func(mb *MetricsBuilder)
func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) {
mbof(mb)
}
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
return metricBuilderOptionFunc(func(mb *MetricsBuilder) {
mb.startTime = startTime
})
}
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
config: mbc,
startTime: pcommon.NewTimestampFromTime(time.Now()),
metricsBuffer: pmetric.NewMetrics(),
buildInfo: settings.BuildInfo,
metricSaphanaAlertCount: newMetricSaphanaAlertCount(mbc.Metrics.SaphanaAlertCount),
metricSaphanaBackupLatest: newMetricSaphanaBackupLatest(mbc.Metrics.SaphanaBackupLatest),
metricSaphanaColumnMemoryUsed: newMetricSaphanaColumnMemoryUsed(mbc.Metrics.SaphanaColumnMemoryUsed),
metricSaphanaComponentMemoryUsed: newMetricSaphanaComponentMemoryUsed(mbc.Metrics.SaphanaComponentMemoryUsed),
metricSaphanaConnectionCount: newMetricSaphanaConnectionCount(mbc.Metrics.SaphanaConnectionCount),
metricSaphanaCPUUsed: newMetricSaphanaCPUUsed(mbc.Metrics.SaphanaCPUUsed),
metricSaphanaDiskSizeCurrent: newMetricSaphanaDiskSizeCurrent(mbc.Metrics.SaphanaDiskSizeCurrent),
metricSaphanaHostMemoryCurrent: newMetricSaphanaHostMemoryCurrent(mbc.Metrics.SaphanaHostMemoryCurrent),
metricSaphanaHostSwapCurrent: newMetricSaphanaHostSwapCurrent(mbc.Metrics.SaphanaHostSwapCurrent),
metricSaphanaInstanceCodeSize: newMetricSaphanaInstanceCodeSize(mbc.Metrics.SaphanaInstanceCodeSize),
metricSaphanaInstanceMemoryCurrent: newMetricSaphanaInstanceMemoryCurrent(mbc.Metrics.SaphanaInstanceMemoryCurrent),
metricSaphanaInstanceMemorySharedAllocated: newMetricSaphanaInstanceMemorySharedAllocated(mbc.Metrics.SaphanaInstanceMemorySharedAllocated),
metricSaphanaInstanceMemoryUsedPeak: newMetricSaphanaInstanceMemoryUsedPeak(mbc.Metrics.SaphanaInstanceMemoryUsedPeak),
metricSaphanaLicenseExpirationTime: newMetricSaphanaLicenseExpirationTime(mbc.Metrics.SaphanaLicenseExpirationTime),
metricSaphanaLicenseLimit: newMetricSaphanaLicenseLimit(mbc.Metrics.SaphanaLicenseLimit),
metricSaphanaLicensePeak: newMetricSaphanaLicensePeak(mbc.Metrics.SaphanaLicensePeak),
metricSaphanaNetworkRequestAverageTime: newMetricSaphanaNetworkRequestAverageTime(mbc.Metrics.SaphanaNetworkRequestAverageTime),
metricSaphanaNetworkRequestCount: newMetricSaphanaNetworkRequestCount(mbc.Metrics.SaphanaNetworkRequestCount),
metricSaphanaNetworkRequestFinishedCount: newMetricSaphanaNetworkRequestFinishedCount(mbc.Metrics.SaphanaNetworkRequestFinishedCount),
metricSaphanaReplicationAverageTime: newMetricSaphanaReplicationAverageTime(mbc.Metrics.SaphanaReplicationAverageTime),
metricSaphanaReplicationBacklogSize: newMetricSaphanaReplicationBacklogSize(mbc.Metrics.SaphanaReplicationBacklogSize),
metricSaphanaReplicationBacklogTime: newMetricSaphanaReplicationBacklogTime(mbc.Metrics.SaphanaReplicationBacklogTime),
metricSaphanaRowStoreMemoryUsed: newMetricSaphanaRowStoreMemoryUsed(mbc.Metrics.SaphanaRowStoreMemoryUsed),
metricSaphanaSchemaMemoryUsedCurrent: newMetricSaphanaSchemaMemoryUsedCurrent(mbc.Metrics.SaphanaSchemaMemoryUsedCurrent),
metricSaphanaSchemaMemoryUsedMax: newMetricSaphanaSchemaMemoryUsedMax(mbc.Metrics.SaphanaSchemaMemoryUsedMax),
metricSaphanaSchemaOperationCount: newMetricSaphanaSchemaOperationCount(mbc.Metrics.SaphanaSchemaOperationCount),
metricSaphanaSchemaRecordCompressedCount: newMetricSaphanaSchemaRecordCompressedCount(mbc.Metrics.SaphanaSchemaRecordCompressedCount),
metricSaphanaSchemaRecordCount: newMetricSaphanaSchemaRecordCount(mbc.Metrics.SaphanaSchemaRecordCount),
metricSaphanaServiceCodeSize: newMetricSaphanaServiceCodeSize(mbc.Metrics.SaphanaServiceCodeSize),
metricSaphanaServiceCount: newMetricSaphanaServiceCount(mbc.Metrics.SaphanaServiceCount),
metricSaphanaServiceMemoryCompactorsAllocated: newMetricSaphanaServiceMemoryCompactorsAllocated(mbc.Metrics.SaphanaServiceMemoryCompactorsAllocated),
metricSaphanaServiceMemoryCompactorsFreeable: newMetricSaphanaServiceMemoryCompactorsFreeable(mbc.Metrics.SaphanaServiceMemoryCompactorsFreeable),
metricSaphanaServiceMemoryEffectiveLimit: newMetricSaphanaServiceMemoryEffectiveLimit(mbc.Metrics.SaphanaServiceMemoryEffectiveLimit),
metricSaphanaServiceMemoryHeapCurrent: newMetricSaphanaServiceMemoryHeapCurrent(mbc.Metrics.SaphanaServiceMemoryHeapCurrent),
metricSaphanaServiceMemoryLimit: newMetricSaphanaServiceMemoryLimit(mbc.Metrics.SaphanaServiceMemoryLimit),
metricSaphanaServiceMemorySharedCurrent: newMetricSaphanaServiceMemorySharedCurrent(mbc.Metrics.SaphanaServiceMemorySharedCurrent),
metricSaphanaServiceMemoryUsed: newMetricSaphanaServiceMemoryUsed(mbc.Metrics.SaphanaServiceMemoryUsed),
metricSaphanaServiceStackSize: newMetricSaphanaServiceStackSize(mbc.Metrics.SaphanaServiceStackSize),
metricSaphanaServiceThreadCount: newMetricSaphanaServiceThreadCount(mbc.Metrics.SaphanaServiceThreadCount),
metricSaphanaTransactionBlocked: newMetricSaphanaTransactionBlocked(mbc.Metrics.SaphanaTransactionBlocked),
metricSaphanaTransactionCount: newMetricSaphanaTransactionCount(mbc.Metrics.SaphanaTransactionCount),
metricSaphanaUptime: newMetricSaphanaUptime(mbc.Metrics.SaphanaUptime),
metricSaphanaVolumeOperationCount: newMetricSaphanaVolumeOperationCount(mbc.Metrics.SaphanaVolumeOperationCount),
metricSaphanaVolumeOperationSize: newMetricSaphanaVolumeOperationSize(mbc.Metrics.SaphanaVolumeOperationSize),
metricSaphanaVolumeOperationTime: newMetricSaphanaVolumeOperationTime(mbc.Metrics.SaphanaVolumeOperationTime),
resourceAttributeIncludeFilter: make(map[string]filter.Filter),
resourceAttributeExcludeFilter: make(map[string]filter.Filter),
}
if mbc.ResourceAttributes.DbSystem.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["db.system"] = filter.CreateFilter(mbc.ResourceAttributes.DbSystem.MetricsInclude)
}
if mbc.ResourceAttributes.DbSystem.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["db.system"] = filter.CreateFilter(mbc.ResourceAttributes.DbSystem.MetricsExclude)
}
if mbc.ResourceAttributes.SaphanaHost.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["saphana.host"] = filter.CreateFilter(mbc.ResourceAttributes.SaphanaHost.MetricsInclude)
}
if mbc.ResourceAttributes.SaphanaHost.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["saphana.host"] = filter.CreateFilter(mbc.ResourceAttributes.SaphanaHost.MetricsExclude)
}
for _, op := range options {
op.apply(mb)
}
return mb
}
// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
return NewResourceBuilder(mb.config.ResourceAttributes)
}
// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
}
}
// ResourceMetricsOption applies changes to provided resource metrics.
type ResourceMetricsOption interface {
apply(pmetric.ResourceMetrics)
}
type resourceMetricsOptionFunc func(pmetric.ResourceMetrics)
func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) {
rmof(rm)
}
// WithResource sets the provided resource on the emitted ResourceMetrics.
// It's recommended to use ResourceBuilder to create the resource.
func WithResource(res pcommon.Resource) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
res.CopyTo(rm.Resource())
})
}
// WithStartTimeOverride overrides start time for all the resource metrics data points.
// This option should be only used if different start time has to be set on metrics coming from different resources.
func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
var dps pmetric.NumberDataPointSlice
metrics := rm.ScopeMetrics().At(0).Metrics()
for i := 0; i < metrics.Len(); i++ {
switch metrics.At(i).Type() {
case pmetric.MetricTypeGauge:
dps = metrics.At(i).Gauge().DataPoints()
case pmetric.MetricTypeSum:
dps = metrics.At(i).Sum().DataPoints()
}
for j := 0; j < dps.Len(); j++ {
dps.At(j).SetStartTimestamp(start)
}
}
})
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceMetricsOption arguments.
func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName(ScopeName)
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricSaphanaAlertCount.emit(ils.Metrics())
mb.metricSaphanaBackupLatest.emit(ils.Metrics())
mb.metricSaphanaColumnMemoryUsed.emit(ils.Metrics())
mb.metricSaphanaComponentMemoryUsed.emit(ils.Metrics())
mb.metricSaphanaConnectionCount.emit(ils.Metrics())
mb.metricSaphanaCPUUsed.emit(ils.Metrics())
mb.metricSaphanaDiskSizeCurrent.emit(ils.Metrics())
mb.metricSaphanaHostMemoryCurrent.emit(ils.Metrics())
mb.metricSaphanaHostSwapCurrent.emit(ils.Metrics())
mb.metricSaphanaInstanceCodeSize.emit(ils.Metrics())
mb.metricSaphanaInstanceMemoryCurrent.emit(ils.Metrics())
mb.metricSaphanaInstanceMemorySharedAllocated.emit(ils.Metrics())
mb.metricSaphanaInstanceMemoryUsedPeak.emit(ils.Metrics())
mb.metricSaphanaLicenseExpirationTime.emit(ils.Metrics())
mb.metricSaphanaLicenseLimit.emit(ils.Metrics())
mb.metricSaphanaLicensePeak.emit(ils.Metrics())
mb.metricSaphanaNetworkRequestAverageTime.emit(ils.Metrics())
mb.metricSaphanaNetworkRequestCount.emit(ils.Metrics())
mb.metricSaphanaNetworkRequestFinishedCount.emit(ils.Metrics())
mb.metricSaphanaReplicationAverageTime.emit(ils.Metrics())
mb.metricSaphanaReplicationBacklogSize.emit(ils.Metrics())
mb.metricSaphanaReplicationBacklogTime.emit(ils.Metrics())
mb.metricSaphanaRowStoreMemoryUsed.emit(ils.Metrics())
mb.metricSaphanaSchemaMemoryUsedCurrent.emit(ils.Metrics())
mb.metricSaphanaSchemaMemoryUsedMax.emit(ils.Metrics())
mb.metricSaphanaSchemaOperationCount.emit(ils.Metrics())
mb.metricSaphanaSchemaRecordCompressedCount.emit(ils.Metrics())
mb.metricSaphanaSchemaRecordCount.emit(ils.Metrics())
mb.metricSaphanaServiceCodeSize.emit(ils.Metrics())
mb.metricSaphanaServiceCount.emit(ils.Metrics())
mb.metricSaphanaServiceMemoryCompactorsAllocated.emit(ils.Metrics())
mb.metricSaphanaServiceMemoryCompactorsFreeable.emit(ils.Metrics())
mb.metricSaphanaServiceMemoryEffectiveLimit.emit(ils.Metrics())
mb.metricSaphanaServiceMemoryHeapCurrent.emit(ils.Metrics())
mb.metricSaphanaServiceMemoryLimit.emit(ils.Metrics())
mb.metricSaphanaServiceMemorySharedCurrent.emit(ils.Metrics())
mb.metricSaphanaServiceMemoryUsed.emit(ils.Metrics())
mb.metricSaphanaServiceStackSize.emit(ils.Metrics())
mb.metricSaphanaServiceThreadCount.emit(ils.Metrics())
mb.metricSaphanaTransactionBlocked.emit(ils.Metrics())
mb.metricSaphanaTransactionCount.emit(ils.Metrics())
mb.metricSaphanaUptime.emit(ils.Metrics())
mb.metricSaphanaVolumeOperationCount.emit(ils.Metrics())
mb.metricSaphanaVolumeOperationSize.emit(ils.Metrics())
mb.metricSaphanaVolumeOperationTime.emit(ils.Metrics())
for _, op := range options {
op.apply(rm)
}
for attr, filter := range mb.resourceAttributeIncludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) {
return
}
}
for attr, filter := range mb.resourceAttributeExcludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) {
return
}
}
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
}
}
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user config, e.g. delta or cumulative.
func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics {
mb.EmitForResource(options...)
metrics := mb.metricsBuffer
mb.metricsBuffer = pmetric.NewMetrics()
return metrics
}
// RecordSaphanaAlertCountDataPoint adds a data point to saphana.alert.count metric.
func (mb *MetricsBuilder) RecordSaphanaAlertCountDataPoint(ts pcommon.Timestamp, inputVal string, alertRatingAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaAlertCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaAlertCount.recordDataPoint(mb.startTime, ts, val, alertRatingAttributeValue)
return nil
}
// RecordSaphanaBackupLatestDataPoint adds a data point to saphana.backup.latest metric.
func (mb *MetricsBuilder) RecordSaphanaBackupLatestDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaBackupLatest, value was %s: %w", inputVal, err)
}
mb.metricSaphanaBackupLatest.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordSaphanaColumnMemoryUsedDataPoint adds a data point to saphana.column.memory.used metric.
func (mb *MetricsBuilder) RecordSaphanaColumnMemoryUsedDataPoint(ts pcommon.Timestamp, inputVal string, columnMemoryTypeAttributeValue AttributeColumnMemoryType, columnMemorySubtypeAttributeValue AttributeColumnMemorySubtype) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaColumnMemoryUsed, value was %s: %w", inputVal, err)
}
mb.metricSaphanaColumnMemoryUsed.recordDataPoint(mb.startTime, ts, val, columnMemoryTypeAttributeValue.String(), columnMemorySubtypeAttributeValue.String())
return nil
}
// RecordSaphanaComponentMemoryUsedDataPoint adds a data point to saphana.component.memory.used metric.
func (mb *MetricsBuilder) RecordSaphanaComponentMemoryUsedDataPoint(ts pcommon.Timestamp, inputVal string, componentAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaComponentMemoryUsed, value was %s: %w", inputVal, err)
}
mb.metricSaphanaComponentMemoryUsed.recordDataPoint(mb.startTime, ts, val, componentAttributeValue)
return nil
}
// RecordSaphanaConnectionCountDataPoint adds a data point to saphana.connection.count metric.
func (mb *MetricsBuilder) RecordSaphanaConnectionCountDataPoint(ts pcommon.Timestamp, inputVal string, connectionStatusAttributeValue AttributeConnectionStatus) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaConnectionCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaConnectionCount.recordDataPoint(mb.startTime, ts, val, connectionStatusAttributeValue.String())
return nil
}
// RecordSaphanaCPUUsedDataPoint adds a data point to saphana.cpu.used metric.
func (mb *MetricsBuilder) RecordSaphanaCPUUsedDataPoint(ts pcommon.Timestamp, inputVal string, cpuTypeAttributeValue AttributeCPUType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaCPUUsed, value was %s: %w", inputVal, err)
}
mb.metricSaphanaCPUUsed.recordDataPoint(mb.startTime, ts, val, cpuTypeAttributeValue.String())
return nil
}
// RecordSaphanaDiskSizeCurrentDataPoint adds a data point to saphana.disk.size.current metric.
func (mb *MetricsBuilder) RecordSaphanaDiskSizeCurrentDataPoint(ts pcommon.Timestamp, inputVal string, pathAttributeValue string, diskUsageTypeAttributeValue string, diskStateUsedFreeAttributeValue AttributeDiskStateUsedFree) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaDiskSizeCurrent, value was %s: %w", inputVal, err)
}
mb.metricSaphanaDiskSizeCurrent.recordDataPoint(mb.startTime, ts, val, pathAttributeValue, diskUsageTypeAttributeValue, diskStateUsedFreeAttributeValue.String())
return nil
}
// RecordSaphanaHostMemoryCurrentDataPoint adds a data point to saphana.host.memory.current metric.
func (mb *MetricsBuilder) RecordSaphanaHostMemoryCurrentDataPoint(ts pcommon.Timestamp, inputVal string, memoryStateUsedFreeAttributeValue AttributeMemoryStateUsedFree) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaHostMemoryCurrent, value was %s: %w", inputVal, err)
}
mb.metricSaphanaHostMemoryCurrent.recordDataPoint(mb.startTime, ts, val, memoryStateUsedFreeAttributeValue.String())
return nil
}
// RecordSaphanaHostSwapCurrentDataPoint adds a data point to saphana.host.swap.current metric.
func (mb *MetricsBuilder) RecordSaphanaHostSwapCurrentDataPoint(ts pcommon.Timestamp, inputVal string, hostSwapStateAttributeValue AttributeHostSwapState) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaHostSwapCurrent, value was %s: %w", inputVal, err)
}
mb.metricSaphanaHostSwapCurrent.recordDataPoint(mb.startTime, ts, val, hostSwapStateAttributeValue.String())
return nil
}
// RecordSaphanaInstanceCodeSizeDataPoint adds a data point to saphana.instance.code_size metric.
func (mb *MetricsBuilder) RecordSaphanaInstanceCodeSizeDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaInstanceCodeSize, value was %s: %w", inputVal, err)
}
mb.metricSaphanaInstanceCodeSize.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordSaphanaInstanceMemoryCurrentDataPoint adds a data point to saphana.instance.memory.current metric.
func (mb *MetricsBuilder) RecordSaphanaInstanceMemoryCurrentDataPoint(ts pcommon.Timestamp, inputVal string, memoryStateUsedFreeAttributeValue AttributeMemoryStateUsedFree) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaInstanceMemoryCurrent, value was %s: %w", inputVal, err)
}
mb.metricSaphanaInstanceMemoryCurrent.recordDataPoint(mb.startTime, ts, val, memoryStateUsedFreeAttributeValue.String())
return nil
}
// RecordSaphanaInstanceMemorySharedAllocatedDataPoint adds a data point to saphana.instance.memory.shared.allocated metric.
func (mb *MetricsBuilder) RecordSaphanaInstanceMemorySharedAllocatedDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaInstanceMemorySharedAllocated, value was %s: %w", inputVal, err)
}
mb.metricSaphanaInstanceMemorySharedAllocated.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordSaphanaInstanceMemoryUsedPeakDataPoint adds a data point to saphana.instance.memory.used.peak metric.
func (mb *MetricsBuilder) RecordSaphanaInstanceMemoryUsedPeakDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaInstanceMemoryUsedPeak, value was %s: %w", inputVal, err)
}
mb.metricSaphanaInstanceMemoryUsedPeak.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordSaphanaLicenseExpirationTimeDataPoint adds a data point to saphana.license.expiration.time metric.
func (mb *MetricsBuilder) RecordSaphanaLicenseExpirationTimeDataPoint(ts pcommon.Timestamp, inputVal string, systemAttributeValue string, productAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaLicenseExpirationTime, value was %s: %w", inputVal, err)
}
mb.metricSaphanaLicenseExpirationTime.recordDataPoint(mb.startTime, ts, val, systemAttributeValue, productAttributeValue)
return nil
}
// RecordSaphanaLicenseLimitDataPoint adds a data point to saphana.license.limit metric.
func (mb *MetricsBuilder) RecordSaphanaLicenseLimitDataPoint(ts pcommon.Timestamp, inputVal string, systemAttributeValue string, productAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaLicenseLimit, value was %s: %w", inputVal, err)
}
mb.metricSaphanaLicenseLimit.recordDataPoint(mb.startTime, ts, val, systemAttributeValue, productAttributeValue)
return nil
}
// RecordSaphanaLicensePeakDataPoint adds a data point to saphana.license.peak metric.
func (mb *MetricsBuilder) RecordSaphanaLicensePeakDataPoint(ts pcommon.Timestamp, inputVal string, systemAttributeValue string, productAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaLicensePeak, value was %s: %w", inputVal, err)
}
mb.metricSaphanaLicensePeak.recordDataPoint(mb.startTime, ts, val, systemAttributeValue, productAttributeValue)
return nil
}
// RecordSaphanaNetworkRequestAverageTimeDataPoint adds a data point to saphana.network.request.average_time metric.
func (mb *MetricsBuilder) RecordSaphanaNetworkRequestAverageTimeDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseFloat(inputVal, 64)
if err != nil {
return fmt.Errorf("failed to parse float64 for SaphanaNetworkRequestAverageTime, value was %s: %w", inputVal, err)
}
mb.metricSaphanaNetworkRequestAverageTime.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordSaphanaNetworkRequestCountDataPoint adds a data point to saphana.network.request.count metric.
func (mb *MetricsBuilder) RecordSaphanaNetworkRequestCountDataPoint(ts pcommon.Timestamp, inputVal string, activePendingRequestStateAttributeValue AttributeActivePendingRequestState) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaNetworkRequestCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaNetworkRequestCount.recordDataPoint(mb.startTime, ts, val, activePendingRequestStateAttributeValue.String())
return nil
}
// RecordSaphanaNetworkRequestFinishedCountDataPoint adds a data point to saphana.network.request.finished.count metric.
func (mb *MetricsBuilder) RecordSaphanaNetworkRequestFinishedCountDataPoint(ts pcommon.Timestamp, inputVal string, internalExternalRequestTypeAttributeValue AttributeInternalExternalRequestType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaNetworkRequestFinishedCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaNetworkRequestFinishedCount.recordDataPoint(mb.startTime, ts, val, internalExternalRequestTypeAttributeValue.String())
return nil
}
// RecordSaphanaReplicationAverageTimeDataPoint adds a data point to saphana.replication.average_time metric.
func (mb *MetricsBuilder) RecordSaphanaReplicationAverageTimeDataPoint(ts pcommon.Timestamp, inputVal string, primaryHostAttributeValue string, secondaryHostAttributeValue string, portAttributeValue string, replicationModeAttributeValue string) error {
val, err := strconv.ParseFloat(inputVal, 64)
if err != nil {
return fmt.Errorf("failed to parse float64 for SaphanaReplicationAverageTime, value was %s: %w", inputVal, err)
}
mb.metricSaphanaReplicationAverageTime.recordDataPoint(mb.startTime, ts, val, primaryHostAttributeValue, secondaryHostAttributeValue, portAttributeValue, replicationModeAttributeValue)
return nil
}
// RecordSaphanaReplicationBacklogSizeDataPoint adds a data point to saphana.replication.backlog.size metric.
func (mb *MetricsBuilder) RecordSaphanaReplicationBacklogSizeDataPoint(ts pcommon.Timestamp, inputVal string, primaryHostAttributeValue string, secondaryHostAttributeValue string, portAttributeValue string, replicationModeAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaReplicationBacklogSize, value was %s: %w", inputVal, err)
}
mb.metricSaphanaReplicationBacklogSize.recordDataPoint(mb.startTime, ts, val, primaryHostAttributeValue, secondaryHostAttributeValue, portAttributeValue, replicationModeAttributeValue)
return nil
}
// RecordSaphanaReplicationBacklogTimeDataPoint adds a data point to saphana.replication.backlog.time metric.
func (mb *MetricsBuilder) RecordSaphanaReplicationBacklogTimeDataPoint(ts pcommon.Timestamp, inputVal string, primaryHostAttributeValue string, secondaryHostAttributeValue string, portAttributeValue string, replicationModeAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaReplicationBacklogTime, value was %s: %w", inputVal, err)
}
mb.metricSaphanaReplicationBacklogTime.recordDataPoint(mb.startTime, ts, val, primaryHostAttributeValue, secondaryHostAttributeValue, portAttributeValue, replicationModeAttributeValue)
return nil
}
// RecordSaphanaRowStoreMemoryUsedDataPoint adds a data point to saphana.row_store.memory.used metric.
func (mb *MetricsBuilder) RecordSaphanaRowStoreMemoryUsedDataPoint(ts pcommon.Timestamp, inputVal string, rowMemoryTypeAttributeValue AttributeRowMemoryType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaRowStoreMemoryUsed, value was %s: %w", inputVal, err)
}
mb.metricSaphanaRowStoreMemoryUsed.recordDataPoint(mb.startTime, ts, val, rowMemoryTypeAttributeValue.String())
return nil
}
// RecordSaphanaSchemaMemoryUsedCurrentDataPoint adds a data point to saphana.schema.memory.used.current metric.
func (mb *MetricsBuilder) RecordSaphanaSchemaMemoryUsedCurrentDataPoint(ts pcommon.Timestamp, inputVal string, schemaAttributeValue string, schemaMemoryTypeAttributeValue AttributeSchemaMemoryType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaSchemaMemoryUsedCurrent, value was %s: %w", inputVal, err)
}
mb.metricSaphanaSchemaMemoryUsedCurrent.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue, schemaMemoryTypeAttributeValue.String())
return nil
}
// RecordSaphanaSchemaMemoryUsedMaxDataPoint adds a data point to saphana.schema.memory.used.max metric.
func (mb *MetricsBuilder) RecordSaphanaSchemaMemoryUsedMaxDataPoint(ts pcommon.Timestamp, inputVal string, schemaAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaSchemaMemoryUsedMax, value was %s: %w", inputVal, err)
}
mb.metricSaphanaSchemaMemoryUsedMax.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue)
return nil
}
// RecordSaphanaSchemaOperationCountDataPoint adds a data point to saphana.schema.operation.count metric.
func (mb *MetricsBuilder) RecordSaphanaSchemaOperationCountDataPoint(ts pcommon.Timestamp, inputVal string, schemaAttributeValue string, schemaOperationTypeAttributeValue AttributeSchemaOperationType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaSchemaOperationCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaSchemaOperationCount.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue, schemaOperationTypeAttributeValue.String())
return nil
}
// RecordSaphanaSchemaRecordCompressedCountDataPoint adds a data point to saphana.schema.record.compressed.count metric.
func (mb *MetricsBuilder) RecordSaphanaSchemaRecordCompressedCountDataPoint(ts pcommon.Timestamp, inputVal string, schemaAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaSchemaRecordCompressedCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaSchemaRecordCompressedCount.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue)
return nil
}
// RecordSaphanaSchemaRecordCountDataPoint adds a data point to saphana.schema.record.count metric.
func (mb *MetricsBuilder) RecordSaphanaSchemaRecordCountDataPoint(ts pcommon.Timestamp, inputVal string, schemaAttributeValue string, schemaRecordTypeAttributeValue AttributeSchemaRecordType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaSchemaRecordCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaSchemaRecordCount.recordDataPoint(mb.startTime, ts, val, schemaAttributeValue, schemaRecordTypeAttributeValue.String())
return nil
}
// RecordSaphanaServiceCodeSizeDataPoint adds a data point to saphana.service.code_size metric.
func (mb *MetricsBuilder) RecordSaphanaServiceCodeSizeDataPoint(ts pcommon.Timestamp, inputVal string, serviceAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceCodeSize, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceCodeSize.recordDataPoint(mb.startTime, ts, val, serviceAttributeValue)
return nil
}
// RecordSaphanaServiceCountDataPoint adds a data point to saphana.service.count metric.
func (mb *MetricsBuilder) RecordSaphanaServiceCountDataPoint(ts pcommon.Timestamp, inputVal string, serviceStatusAttributeValue AttributeServiceStatus) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceCount.recordDataPoint(mb.startTime, ts, val, serviceStatusAttributeValue.String())
return nil
}
// RecordSaphanaServiceMemoryCompactorsAllocatedDataPoint adds a data point to saphana.service.memory.compactors.allocated metric.
func (mb *MetricsBuilder) RecordSaphanaServiceMemoryCompactorsAllocatedDataPoint(ts pcommon.Timestamp, inputVal string, serviceAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceMemoryCompactorsAllocated, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceMemoryCompactorsAllocated.recordDataPoint(mb.startTime, ts, val, serviceAttributeValue)
return nil
}
// RecordSaphanaServiceMemoryCompactorsFreeableDataPoint adds a data point to saphana.service.memory.compactors.freeable metric.
func (mb *MetricsBuilder) RecordSaphanaServiceMemoryCompactorsFreeableDataPoint(ts pcommon.Timestamp, inputVal string, serviceAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceMemoryCompactorsFreeable, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceMemoryCompactorsFreeable.recordDataPoint(mb.startTime, ts, val, serviceAttributeValue)
return nil
}
// RecordSaphanaServiceMemoryEffectiveLimitDataPoint adds a data point to saphana.service.memory.effective_limit metric.
func (mb *MetricsBuilder) RecordSaphanaServiceMemoryEffectiveLimitDataPoint(ts pcommon.Timestamp, inputVal string, serviceAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceMemoryEffectiveLimit, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceMemoryEffectiveLimit.recordDataPoint(mb.startTime, ts, val, serviceAttributeValue)
return nil
}
// RecordSaphanaServiceMemoryHeapCurrentDataPoint adds a data point to saphana.service.memory.heap.current metric.
func (mb *MetricsBuilder) RecordSaphanaServiceMemoryHeapCurrentDataPoint(ts pcommon.Timestamp, inputVal string, serviceAttributeValue string, memoryStateUsedFreeAttributeValue AttributeMemoryStateUsedFree) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceMemoryHeapCurrent, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceMemoryHeapCurrent.recordDataPoint(mb.startTime, ts, val, serviceAttributeValue, memoryStateUsedFreeAttributeValue.String())
return nil
}
// RecordSaphanaServiceMemoryLimitDataPoint adds a data point to saphana.service.memory.limit metric.
func (mb *MetricsBuilder) RecordSaphanaServiceMemoryLimitDataPoint(ts pcommon.Timestamp, inputVal string, serviceAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceMemoryLimit, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceMemoryLimit.recordDataPoint(mb.startTime, ts, val, serviceAttributeValue)
return nil
}
// RecordSaphanaServiceMemorySharedCurrentDataPoint adds a data point to saphana.service.memory.shared.current metric.
func (mb *MetricsBuilder) RecordSaphanaServiceMemorySharedCurrentDataPoint(ts pcommon.Timestamp, inputVal string, serviceAttributeValue string, memoryStateUsedFreeAttributeValue AttributeMemoryStateUsedFree) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceMemorySharedCurrent, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceMemorySharedCurrent.recordDataPoint(mb.startTime, ts, val, serviceAttributeValue, memoryStateUsedFreeAttributeValue.String())
return nil
}
// RecordSaphanaServiceMemoryUsedDataPoint adds a data point to saphana.service.memory.used metric.
func (mb *MetricsBuilder) RecordSaphanaServiceMemoryUsedDataPoint(ts pcommon.Timestamp, inputVal string, serviceAttributeValue string, serviceMemoryUsedTypeAttributeValue AttributeServiceMemoryUsedType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceMemoryUsed, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceMemoryUsed.recordDataPoint(mb.startTime, ts, val, serviceAttributeValue, serviceMemoryUsedTypeAttributeValue.String())
return nil
}
// RecordSaphanaServiceStackSizeDataPoint adds a data point to saphana.service.stack_size metric.
func (mb *MetricsBuilder) RecordSaphanaServiceStackSizeDataPoint(ts pcommon.Timestamp, inputVal string, serviceAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceStackSize, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceStackSize.recordDataPoint(mb.startTime, ts, val, serviceAttributeValue)
return nil
}
// RecordSaphanaServiceThreadCountDataPoint adds a data point to saphana.service.thread.count metric.
func (mb *MetricsBuilder) RecordSaphanaServiceThreadCountDataPoint(ts pcommon.Timestamp, inputVal string, threadStatusAttributeValue AttributeThreadStatus) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaServiceThreadCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaServiceThreadCount.recordDataPoint(mb.startTime, ts, val, threadStatusAttributeValue.String())
return nil
}
// RecordSaphanaTransactionBlockedDataPoint adds a data point to saphana.transaction.blocked metric.
func (mb *MetricsBuilder) RecordSaphanaTransactionBlockedDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaTransactionBlocked, value was %s: %w", inputVal, err)
}
mb.metricSaphanaTransactionBlocked.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordSaphanaTransactionCountDataPoint adds a data point to saphana.transaction.count metric.
func (mb *MetricsBuilder) RecordSaphanaTransactionCountDataPoint(ts pcommon.Timestamp, inputVal string, transactionTypeAttributeValue AttributeTransactionType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaTransactionCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaTransactionCount.recordDataPoint(mb.startTime, ts, val, transactionTypeAttributeValue.String())
return nil
}
// RecordSaphanaUptimeDataPoint adds a data point to saphana.uptime metric.
func (mb *MetricsBuilder) RecordSaphanaUptimeDataPoint(ts pcommon.Timestamp, inputVal string, systemAttributeValue string, databaseAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaUptime, value was %s: %w", inputVal, err)
}
mb.metricSaphanaUptime.recordDataPoint(mb.startTime, ts, val, systemAttributeValue, databaseAttributeValue)
return nil
}
// RecordSaphanaVolumeOperationCountDataPoint adds a data point to saphana.volume.operation.count metric.
func (mb *MetricsBuilder) RecordSaphanaVolumeOperationCountDataPoint(ts pcommon.Timestamp, inputVal string, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue AttributeVolumeOperationType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaVolumeOperationCount, value was %s: %w", inputVal, err)
}
mb.metricSaphanaVolumeOperationCount.recordDataPoint(mb.startTime, ts, val, pathAttributeValue, diskUsageTypeAttributeValue, volumeOperationTypeAttributeValue.String())
return nil
}
// RecordSaphanaVolumeOperationSizeDataPoint adds a data point to saphana.volume.operation.size metric.
func (mb *MetricsBuilder) RecordSaphanaVolumeOperationSizeDataPoint(ts pcommon.Timestamp, inputVal string, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue AttributeVolumeOperationType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaVolumeOperationSize, value was %s: %w", inputVal, err)
}
mb.metricSaphanaVolumeOperationSize.recordDataPoint(mb.startTime, ts, val, pathAttributeValue, diskUsageTypeAttributeValue, volumeOperationTypeAttributeValue.String())
return nil
}
// RecordSaphanaVolumeOperationTimeDataPoint adds a data point to saphana.volume.operation.time metric.
func (mb *MetricsBuilder) RecordSaphanaVolumeOperationTimeDataPoint(ts pcommon.Timestamp, inputVal string, pathAttributeValue string, diskUsageTypeAttributeValue string, volumeOperationTypeAttributeValue AttributeVolumeOperationType) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for SaphanaVolumeOperationTime, value was %s: %w", inputVal, err)
}
mb.metricSaphanaVolumeOperationTime.recordDataPoint(mb.startTime, ts, val, pathAttributeValue, diskUsageTypeAttributeValue, volumeOperationTypeAttributeValue.String())
return nil
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) {
mb.startTime = pcommon.NewTimestampFromTime(time.Now())
for _, op := range options {
op.apply(mb)
}
}