receiver/oracledbreceiver/internal/metadata/generated_metrics.go (2,433 lines of code) (raw):
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"fmt"
"strconv"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/filter"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
)
var MetricsInfo = metricsInfo{
OracledbConsistentGets: metricInfo{
Name: "oracledb.consistent_gets",
},
OracledbCPUTime: metricInfo{
Name: "oracledb.cpu_time",
},
OracledbDbBlockGets: metricInfo{
Name: "oracledb.db_block_gets",
},
OracledbDdlStatementsParallelized: metricInfo{
Name: "oracledb.ddl_statements_parallelized",
},
OracledbDmlLocksLimit: metricInfo{
Name: "oracledb.dml_locks.limit",
},
OracledbDmlLocksUsage: metricInfo{
Name: "oracledb.dml_locks.usage",
},
OracledbDmlStatementsParallelized: metricInfo{
Name: "oracledb.dml_statements_parallelized",
},
OracledbEnqueueDeadlocks: metricInfo{
Name: "oracledb.enqueue_deadlocks",
},
OracledbEnqueueLocksLimit: metricInfo{
Name: "oracledb.enqueue_locks.limit",
},
OracledbEnqueueLocksUsage: metricInfo{
Name: "oracledb.enqueue_locks.usage",
},
OracledbEnqueueResourcesLimit: metricInfo{
Name: "oracledb.enqueue_resources.limit",
},
OracledbEnqueueResourcesUsage: metricInfo{
Name: "oracledb.enqueue_resources.usage",
},
OracledbExchangeDeadlocks: metricInfo{
Name: "oracledb.exchange_deadlocks",
},
OracledbExecutions: metricInfo{
Name: "oracledb.executions",
},
OracledbHardParses: metricInfo{
Name: "oracledb.hard_parses",
},
OracledbLogicalReads: metricInfo{
Name: "oracledb.logical_reads",
},
OracledbParallelOperationsDowngraded1To25Pct: metricInfo{
Name: "oracledb.parallel_operations_downgraded_1_to_25_pct",
},
OracledbParallelOperationsDowngraded25To50Pct: metricInfo{
Name: "oracledb.parallel_operations_downgraded_25_to_50_pct",
},
OracledbParallelOperationsDowngraded50To75Pct: metricInfo{
Name: "oracledb.parallel_operations_downgraded_50_to_75_pct",
},
OracledbParallelOperationsDowngraded75To99Pct: metricInfo{
Name: "oracledb.parallel_operations_downgraded_75_to_99_pct",
},
OracledbParallelOperationsDowngradedToSerial: metricInfo{
Name: "oracledb.parallel_operations_downgraded_to_serial",
},
OracledbParallelOperationsNotDowngraded: metricInfo{
Name: "oracledb.parallel_operations_not_downgraded",
},
OracledbParseCalls: metricInfo{
Name: "oracledb.parse_calls",
},
OracledbPgaMemory: metricInfo{
Name: "oracledb.pga_memory",
},
OracledbPhysicalReadIoRequests: metricInfo{
Name: "oracledb.physical_read_io_requests",
},
OracledbPhysicalReads: metricInfo{
Name: "oracledb.physical_reads",
},
OracledbPhysicalReadsDirect: metricInfo{
Name: "oracledb.physical_reads_direct",
},
OracledbPhysicalWriteIoRequests: metricInfo{
Name: "oracledb.physical_write_io_requests",
},
OracledbPhysicalWrites: metricInfo{
Name: "oracledb.physical_writes",
},
OracledbPhysicalWritesDirect: metricInfo{
Name: "oracledb.physical_writes_direct",
},
OracledbProcessesLimit: metricInfo{
Name: "oracledb.processes.limit",
},
OracledbProcessesUsage: metricInfo{
Name: "oracledb.processes.usage",
},
OracledbQueriesParallelized: metricInfo{
Name: "oracledb.queries_parallelized",
},
OracledbSessionsLimit: metricInfo{
Name: "oracledb.sessions.limit",
},
OracledbSessionsUsage: metricInfo{
Name: "oracledb.sessions.usage",
},
OracledbTablespaceSizeLimit: metricInfo{
Name: "oracledb.tablespace_size.limit",
},
OracledbTablespaceSizeUsage: metricInfo{
Name: "oracledb.tablespace_size.usage",
},
OracledbTransactionsLimit: metricInfo{
Name: "oracledb.transactions.limit",
},
OracledbTransactionsUsage: metricInfo{
Name: "oracledb.transactions.usage",
},
OracledbUserCommits: metricInfo{
Name: "oracledb.user_commits",
},
OracledbUserRollbacks: metricInfo{
Name: "oracledb.user_rollbacks",
},
}
type metricsInfo struct {
OracledbConsistentGets metricInfo
OracledbCPUTime metricInfo
OracledbDbBlockGets metricInfo
OracledbDdlStatementsParallelized metricInfo
OracledbDmlLocksLimit metricInfo
OracledbDmlLocksUsage metricInfo
OracledbDmlStatementsParallelized metricInfo
OracledbEnqueueDeadlocks metricInfo
OracledbEnqueueLocksLimit metricInfo
OracledbEnqueueLocksUsage metricInfo
OracledbEnqueueResourcesLimit metricInfo
OracledbEnqueueResourcesUsage metricInfo
OracledbExchangeDeadlocks metricInfo
OracledbExecutions metricInfo
OracledbHardParses metricInfo
OracledbLogicalReads metricInfo
OracledbParallelOperationsDowngraded1To25Pct metricInfo
OracledbParallelOperationsDowngraded25To50Pct metricInfo
OracledbParallelOperationsDowngraded50To75Pct metricInfo
OracledbParallelOperationsDowngraded75To99Pct metricInfo
OracledbParallelOperationsDowngradedToSerial metricInfo
OracledbParallelOperationsNotDowngraded metricInfo
OracledbParseCalls metricInfo
OracledbPgaMemory metricInfo
OracledbPhysicalReadIoRequests metricInfo
OracledbPhysicalReads metricInfo
OracledbPhysicalReadsDirect metricInfo
OracledbPhysicalWriteIoRequests metricInfo
OracledbPhysicalWrites metricInfo
OracledbPhysicalWritesDirect metricInfo
OracledbProcessesLimit metricInfo
OracledbProcessesUsage metricInfo
OracledbQueriesParallelized metricInfo
OracledbSessionsLimit metricInfo
OracledbSessionsUsage metricInfo
OracledbTablespaceSizeLimit metricInfo
OracledbTablespaceSizeUsage metricInfo
OracledbTransactionsLimit metricInfo
OracledbTransactionsUsage metricInfo
OracledbUserCommits metricInfo
OracledbUserRollbacks metricInfo
}
type metricInfo struct {
Name string
}
type metricOracledbConsistentGets struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.consistent_gets metric with initial data.
func (m *metricOracledbConsistentGets) init() {
m.data.SetName("oracledb.consistent_gets")
m.data.SetDescription("Number of times a consistent read was requested for a block from the buffer cache.")
m.data.SetUnit("{gets}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbConsistentGets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbConsistentGets) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbConsistentGets) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbConsistentGets(cfg MetricConfig) metricOracledbConsistentGets {
m := metricOracledbConsistentGets{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbCPUTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.cpu_time metric with initial data.
func (m *metricOracledbCPUTime) init() {
m.data.SetName("oracledb.cpu_time")
m.data.SetDescription("Cumulative CPU time, in seconds")
m.data.SetUnit("s")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbCPUTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbCPUTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbCPUTime(cfg MetricConfig) metricOracledbCPUTime {
m := metricOracledbCPUTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbDbBlockGets struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.db_block_gets metric with initial data.
func (m *metricOracledbDbBlockGets) init() {
m.data.SetName("oracledb.db_block_gets")
m.data.SetDescription("Number of times a current block was requested from the buffer cache.")
m.data.SetUnit("{gets}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbDbBlockGets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbDbBlockGets) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbDbBlockGets) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbDbBlockGets(cfg MetricConfig) metricOracledbDbBlockGets {
m := metricOracledbDbBlockGets{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbDdlStatementsParallelized struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.ddl_statements_parallelized metric with initial data.
func (m *metricOracledbDdlStatementsParallelized) init() {
m.data.SetName("oracledb.ddl_statements_parallelized")
m.data.SetDescription("Number of DDL statements that were executed in parallel")
m.data.SetUnit("{statements}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbDdlStatementsParallelized) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbDdlStatementsParallelized) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbDdlStatementsParallelized) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbDdlStatementsParallelized(cfg MetricConfig) metricOracledbDdlStatementsParallelized {
m := metricOracledbDdlStatementsParallelized{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbDmlLocksLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.dml_locks.limit metric with initial data.
func (m *metricOracledbDmlLocksLimit) init() {
m.data.SetName("oracledb.dml_locks.limit")
m.data.SetDescription("Maximum limit of active DML (Data Manipulation Language) locks, -1 if unlimited.")
m.data.SetUnit("{locks}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbDmlLocksLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbDmlLocksLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbDmlLocksLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbDmlLocksLimit(cfg MetricConfig) metricOracledbDmlLocksLimit {
m := metricOracledbDmlLocksLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbDmlLocksUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.dml_locks.usage metric with initial data.
func (m *metricOracledbDmlLocksUsage) init() {
m.data.SetName("oracledb.dml_locks.usage")
m.data.SetDescription("Current count of active DML (Data Manipulation Language) locks.")
m.data.SetUnit("{locks}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbDmlLocksUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbDmlLocksUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbDmlLocksUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbDmlLocksUsage(cfg MetricConfig) metricOracledbDmlLocksUsage {
m := metricOracledbDmlLocksUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbDmlStatementsParallelized struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.dml_statements_parallelized metric with initial data.
func (m *metricOracledbDmlStatementsParallelized) init() {
m.data.SetName("oracledb.dml_statements_parallelized")
m.data.SetDescription("Number of DML statements that were executed in parallel")
m.data.SetUnit("{statements}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbDmlStatementsParallelized) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbDmlStatementsParallelized) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbDmlStatementsParallelized) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbDmlStatementsParallelized(cfg MetricConfig) metricOracledbDmlStatementsParallelized {
m := metricOracledbDmlStatementsParallelized{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbEnqueueDeadlocks struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.enqueue_deadlocks metric with initial data.
func (m *metricOracledbEnqueueDeadlocks) init() {
m.data.SetName("oracledb.enqueue_deadlocks")
m.data.SetDescription("Total number of deadlocks between table or row locks in different sessions.")
m.data.SetUnit("{deadlocks}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbEnqueueDeadlocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbEnqueueDeadlocks) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbEnqueueDeadlocks) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbEnqueueDeadlocks(cfg MetricConfig) metricOracledbEnqueueDeadlocks {
m := metricOracledbEnqueueDeadlocks{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbEnqueueLocksLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.enqueue_locks.limit metric with initial data.
func (m *metricOracledbEnqueueLocksLimit) init() {
m.data.SetName("oracledb.enqueue_locks.limit")
m.data.SetDescription("Maximum limit of active enqueue locks, -1 if unlimited.")
m.data.SetUnit("{locks}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbEnqueueLocksLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbEnqueueLocksLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbEnqueueLocksLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbEnqueueLocksLimit(cfg MetricConfig) metricOracledbEnqueueLocksLimit {
m := metricOracledbEnqueueLocksLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbEnqueueLocksUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.enqueue_locks.usage metric with initial data.
func (m *metricOracledbEnqueueLocksUsage) init() {
m.data.SetName("oracledb.enqueue_locks.usage")
m.data.SetDescription("Current count of active enqueue locks.")
m.data.SetUnit("{locks}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbEnqueueLocksUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbEnqueueLocksUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbEnqueueLocksUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbEnqueueLocksUsage(cfg MetricConfig) metricOracledbEnqueueLocksUsage {
m := metricOracledbEnqueueLocksUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbEnqueueResourcesLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.enqueue_resources.limit metric with initial data.
func (m *metricOracledbEnqueueResourcesLimit) init() {
m.data.SetName("oracledb.enqueue_resources.limit")
m.data.SetDescription("Maximum limit of active enqueue resources, -1 if unlimited.")
m.data.SetUnit("{resources}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbEnqueueResourcesLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbEnqueueResourcesLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbEnqueueResourcesLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbEnqueueResourcesLimit(cfg MetricConfig) metricOracledbEnqueueResourcesLimit {
m := metricOracledbEnqueueResourcesLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbEnqueueResourcesUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.enqueue_resources.usage metric with initial data.
func (m *metricOracledbEnqueueResourcesUsage) init() {
m.data.SetName("oracledb.enqueue_resources.usage")
m.data.SetDescription("Current count of active enqueue resources.")
m.data.SetUnit("{resources}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbEnqueueResourcesUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbEnqueueResourcesUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbEnqueueResourcesUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbEnqueueResourcesUsage(cfg MetricConfig) metricOracledbEnqueueResourcesUsage {
m := metricOracledbEnqueueResourcesUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbExchangeDeadlocks struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.exchange_deadlocks metric with initial data.
func (m *metricOracledbExchangeDeadlocks) init() {
m.data.SetName("oracledb.exchange_deadlocks")
m.data.SetDescription("Number of times that a process detected a potential deadlock when exchanging two buffers and raised an internal, restartable error. Index scans are the only operations that perform exchanges.")
m.data.SetUnit("{deadlocks}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbExchangeDeadlocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbExchangeDeadlocks) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbExchangeDeadlocks) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbExchangeDeadlocks(cfg MetricConfig) metricOracledbExchangeDeadlocks {
m := metricOracledbExchangeDeadlocks{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbExecutions struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.executions metric with initial data.
func (m *metricOracledbExecutions) init() {
m.data.SetName("oracledb.executions")
m.data.SetDescription("Total number of calls (user and recursive) that executed SQL statements")
m.data.SetUnit("{executions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbExecutions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbExecutions) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbExecutions) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbExecutions(cfg MetricConfig) metricOracledbExecutions {
m := metricOracledbExecutions{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbHardParses struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.hard_parses metric with initial data.
func (m *metricOracledbHardParses) init() {
m.data.SetName("oracledb.hard_parses")
m.data.SetDescription("Number of hard parses")
m.data.SetUnit("{parses}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbHardParses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbHardParses) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbHardParses) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbHardParses(cfg MetricConfig) metricOracledbHardParses {
m := metricOracledbHardParses{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbLogicalReads struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.logical_reads metric with initial data.
func (m *metricOracledbLogicalReads) init() {
m.data.SetName("oracledb.logical_reads")
m.data.SetDescription("Number of logical reads")
m.data.SetUnit("{reads}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbLogicalReads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbLogicalReads) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbLogicalReads) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbLogicalReads(cfg MetricConfig) metricOracledbLogicalReads {
m := metricOracledbLogicalReads{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbParallelOperationsDowngraded1To25Pct struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.parallel_operations_downgraded_1_to_25_pct metric with initial data.
func (m *metricOracledbParallelOperationsDowngraded1To25Pct) init() {
m.data.SetName("oracledb.parallel_operations_downgraded_1_to_25_pct")
m.data.SetDescription("Number of times parallel execution was requested and the degree of parallelism was reduced down to 1-25% because of insufficient parallel execution servers")
m.data.SetUnit("{executions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbParallelOperationsDowngraded1To25Pct) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbParallelOperationsDowngraded1To25Pct) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbParallelOperationsDowngraded1To25Pct) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbParallelOperationsDowngraded1To25Pct(cfg MetricConfig) metricOracledbParallelOperationsDowngraded1To25Pct {
m := metricOracledbParallelOperationsDowngraded1To25Pct{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbParallelOperationsDowngraded25To50Pct struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.parallel_operations_downgraded_25_to_50_pct metric with initial data.
func (m *metricOracledbParallelOperationsDowngraded25To50Pct) init() {
m.data.SetName("oracledb.parallel_operations_downgraded_25_to_50_pct")
m.data.SetDescription("Number of times parallel execution was requested and the degree of parallelism was reduced down to 25-50% because of insufficient parallel execution servers")
m.data.SetUnit("{executions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbParallelOperationsDowngraded25To50Pct) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbParallelOperationsDowngraded25To50Pct) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbParallelOperationsDowngraded25To50Pct) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbParallelOperationsDowngraded25To50Pct(cfg MetricConfig) metricOracledbParallelOperationsDowngraded25To50Pct {
m := metricOracledbParallelOperationsDowngraded25To50Pct{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbParallelOperationsDowngraded50To75Pct struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.parallel_operations_downgraded_50_to_75_pct metric with initial data.
func (m *metricOracledbParallelOperationsDowngraded50To75Pct) init() {
m.data.SetName("oracledb.parallel_operations_downgraded_50_to_75_pct")
m.data.SetDescription("Number of times parallel execution was requested and the degree of parallelism was reduced down to 50-75% because of insufficient parallel execution servers")
m.data.SetUnit("{executions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbParallelOperationsDowngraded50To75Pct) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbParallelOperationsDowngraded50To75Pct) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbParallelOperationsDowngraded50To75Pct) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbParallelOperationsDowngraded50To75Pct(cfg MetricConfig) metricOracledbParallelOperationsDowngraded50To75Pct {
m := metricOracledbParallelOperationsDowngraded50To75Pct{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbParallelOperationsDowngraded75To99Pct struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.parallel_operations_downgraded_75_to_99_pct metric with initial data.
func (m *metricOracledbParallelOperationsDowngraded75To99Pct) init() {
m.data.SetName("oracledb.parallel_operations_downgraded_75_to_99_pct")
m.data.SetDescription("Number of times parallel execution was requested and the degree of parallelism was reduced down to 75-99% because of insufficient parallel execution servers")
m.data.SetUnit("{executions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbParallelOperationsDowngraded75To99Pct) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbParallelOperationsDowngraded75To99Pct) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbParallelOperationsDowngraded75To99Pct) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbParallelOperationsDowngraded75To99Pct(cfg MetricConfig) metricOracledbParallelOperationsDowngraded75To99Pct {
m := metricOracledbParallelOperationsDowngraded75To99Pct{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbParallelOperationsDowngradedToSerial struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.parallel_operations_downgraded_to_serial metric with initial data.
func (m *metricOracledbParallelOperationsDowngradedToSerial) init() {
m.data.SetName("oracledb.parallel_operations_downgraded_to_serial")
m.data.SetDescription("Number of times parallel execution was requested but execution was serial because of insufficient parallel execution servers")
m.data.SetUnit("{executions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbParallelOperationsDowngradedToSerial) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbParallelOperationsDowngradedToSerial) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbParallelOperationsDowngradedToSerial) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbParallelOperationsDowngradedToSerial(cfg MetricConfig) metricOracledbParallelOperationsDowngradedToSerial {
m := metricOracledbParallelOperationsDowngradedToSerial{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbParallelOperationsNotDowngraded struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.parallel_operations_not_downgraded metric with initial data.
func (m *metricOracledbParallelOperationsNotDowngraded) init() {
m.data.SetName("oracledb.parallel_operations_not_downgraded")
m.data.SetDescription("Number of times parallel execution was executed at the requested degree of parallelism")
m.data.SetUnit("{executions}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbParallelOperationsNotDowngraded) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbParallelOperationsNotDowngraded) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbParallelOperationsNotDowngraded) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbParallelOperationsNotDowngraded(cfg MetricConfig) metricOracledbParallelOperationsNotDowngraded {
m := metricOracledbParallelOperationsNotDowngraded{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbParseCalls struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.parse_calls metric with initial data.
func (m *metricOracledbParseCalls) init() {
m.data.SetName("oracledb.parse_calls")
m.data.SetDescription("Total number of parse calls.")
m.data.SetUnit("{parses}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbParseCalls) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbParseCalls) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbParseCalls) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbParseCalls(cfg MetricConfig) metricOracledbParseCalls {
m := metricOracledbParseCalls{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbPgaMemory struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.pga_memory metric with initial data.
func (m *metricOracledbPgaMemory) init() {
m.data.SetName("oracledb.pga_memory")
m.data.SetDescription("Session PGA (Program Global Area) memory")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbPgaMemory) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbPgaMemory) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbPgaMemory) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbPgaMemory(cfg MetricConfig) metricOracledbPgaMemory {
m := metricOracledbPgaMemory{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbPhysicalReadIoRequests struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.physical_read_io_requests metric with initial data.
func (m *metricOracledbPhysicalReadIoRequests) init() {
m.data.SetName("oracledb.physical_read_io_requests")
m.data.SetDescription("Number of read requests for application activity")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbPhysicalReadIoRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbPhysicalReadIoRequests) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbPhysicalReadIoRequests) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbPhysicalReadIoRequests(cfg MetricConfig) metricOracledbPhysicalReadIoRequests {
m := metricOracledbPhysicalReadIoRequests{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbPhysicalReads struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.physical_reads metric with initial data.
func (m *metricOracledbPhysicalReads) init() {
m.data.SetName("oracledb.physical_reads")
m.data.SetDescription("Number of physical reads")
m.data.SetUnit("{reads}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbPhysicalReads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbPhysicalReads) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbPhysicalReads) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbPhysicalReads(cfg MetricConfig) metricOracledbPhysicalReads {
m := metricOracledbPhysicalReads{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbPhysicalReadsDirect struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.physical_reads_direct metric with initial data.
func (m *metricOracledbPhysicalReadsDirect) init() {
m.data.SetName("oracledb.physical_reads_direct")
m.data.SetDescription("Number of reads directly from disk, bypassing the buffer cache")
m.data.SetUnit("{reads}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbPhysicalReadsDirect) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbPhysicalReadsDirect) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbPhysicalReadsDirect) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbPhysicalReadsDirect(cfg MetricConfig) metricOracledbPhysicalReadsDirect {
m := metricOracledbPhysicalReadsDirect{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbPhysicalWriteIoRequests struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.physical_write_io_requests metric with initial data.
func (m *metricOracledbPhysicalWriteIoRequests) init() {
m.data.SetName("oracledb.physical_write_io_requests")
m.data.SetDescription("Number of write requests for application activity")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbPhysicalWriteIoRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbPhysicalWriteIoRequests) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbPhysicalWriteIoRequests) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbPhysicalWriteIoRequests(cfg MetricConfig) metricOracledbPhysicalWriteIoRequests {
m := metricOracledbPhysicalWriteIoRequests{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbPhysicalWrites struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.physical_writes metric with initial data.
func (m *metricOracledbPhysicalWrites) init() {
m.data.SetName("oracledb.physical_writes")
m.data.SetDescription("Number of physical writes")
m.data.SetUnit("{writes}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbPhysicalWrites) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbPhysicalWrites) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbPhysicalWrites) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbPhysicalWrites(cfg MetricConfig) metricOracledbPhysicalWrites {
m := metricOracledbPhysicalWrites{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbPhysicalWritesDirect struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.physical_writes_direct metric with initial data.
func (m *metricOracledbPhysicalWritesDirect) init() {
m.data.SetName("oracledb.physical_writes_direct")
m.data.SetDescription("Number of writes directly to disk, bypassing the buffer cache")
m.data.SetUnit("{writes}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbPhysicalWritesDirect) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbPhysicalWritesDirect) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbPhysicalWritesDirect) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbPhysicalWritesDirect(cfg MetricConfig) metricOracledbPhysicalWritesDirect {
m := metricOracledbPhysicalWritesDirect{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbProcessesLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.processes.limit metric with initial data.
func (m *metricOracledbProcessesLimit) init() {
m.data.SetName("oracledb.processes.limit")
m.data.SetDescription("Maximum limit of active processes, -1 if unlimited.")
m.data.SetUnit("{processes}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbProcessesLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbProcessesLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbProcessesLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbProcessesLimit(cfg MetricConfig) metricOracledbProcessesLimit {
m := metricOracledbProcessesLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbProcessesUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.processes.usage metric with initial data.
func (m *metricOracledbProcessesUsage) init() {
m.data.SetName("oracledb.processes.usage")
m.data.SetDescription("Current count of active processes.")
m.data.SetUnit("{processes}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbProcessesUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbProcessesUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbProcessesUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbProcessesUsage(cfg MetricConfig) metricOracledbProcessesUsage {
m := metricOracledbProcessesUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbQueriesParallelized struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.queries_parallelized metric with initial data.
func (m *metricOracledbQueriesParallelized) init() {
m.data.SetName("oracledb.queries_parallelized")
m.data.SetDescription("Number of SELECT statements executed in parallel")
m.data.SetUnit("{queries}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbQueriesParallelized) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbQueriesParallelized) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbQueriesParallelized) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbQueriesParallelized(cfg MetricConfig) metricOracledbQueriesParallelized {
m := metricOracledbQueriesParallelized{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbSessionsLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.sessions.limit metric with initial data.
func (m *metricOracledbSessionsLimit) init() {
m.data.SetName("oracledb.sessions.limit")
m.data.SetDescription("Maximum limit of active sessions, -1 if unlimited.")
m.data.SetUnit("{sessions}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbSessionsLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbSessionsLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbSessionsLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbSessionsLimit(cfg MetricConfig) metricOracledbSessionsLimit {
m := metricOracledbSessionsLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbSessionsUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.sessions.usage metric with initial data.
func (m *metricOracledbSessionsUsage) init() {
m.data.SetName("oracledb.sessions.usage")
m.data.SetDescription("Count of active sessions.")
m.data.SetUnit("{sessions}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricOracledbSessionsUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sessionTypeAttributeValue string, sessionStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("session_type", sessionTypeAttributeValue)
dp.Attributes().PutStr("session_status", sessionStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbSessionsUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbSessionsUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbSessionsUsage(cfg MetricConfig) metricOracledbSessionsUsage {
m := metricOracledbSessionsUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbTablespaceSizeLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.tablespace_size.limit metric with initial data.
func (m *metricOracledbTablespaceSizeLimit) init() {
m.data.SetName("oracledb.tablespace_size.limit")
m.data.SetDescription("Maximum size of tablespace in bytes, -1 if unlimited.")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricOracledbTablespaceSizeLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tablespaceNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("tablespace_name", tablespaceNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbTablespaceSizeLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbTablespaceSizeLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbTablespaceSizeLimit(cfg MetricConfig) metricOracledbTablespaceSizeLimit {
m := metricOracledbTablespaceSizeLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbTablespaceSizeUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.tablespace_size.usage metric with initial data.
func (m *metricOracledbTablespaceSizeUsage) init() {
m.data.SetName("oracledb.tablespace_size.usage")
m.data.SetDescription("Used tablespace in bytes.")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricOracledbTablespaceSizeUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, tablespaceNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("tablespace_name", tablespaceNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbTablespaceSizeUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbTablespaceSizeUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbTablespaceSizeUsage(cfg MetricConfig) metricOracledbTablespaceSizeUsage {
m := metricOracledbTablespaceSizeUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbTransactionsLimit struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.transactions.limit metric with initial data.
func (m *metricOracledbTransactionsLimit) init() {
m.data.SetName("oracledb.transactions.limit")
m.data.SetDescription("Maximum limit of active transactions, -1 if unlimited.")
m.data.SetUnit("{transactions}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbTransactionsLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbTransactionsLimit) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbTransactionsLimit) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbTransactionsLimit(cfg MetricConfig) metricOracledbTransactionsLimit {
m := metricOracledbTransactionsLimit{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbTransactionsUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.transactions.usage metric with initial data.
func (m *metricOracledbTransactionsUsage) init() {
m.data.SetName("oracledb.transactions.usage")
m.data.SetDescription("Current count of active transactions.")
m.data.SetUnit("{transactions}")
m.data.SetEmptyGauge()
}
func (m *metricOracledbTransactionsUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbTransactionsUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbTransactionsUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbTransactionsUsage(cfg MetricConfig) metricOracledbTransactionsUsage {
m := metricOracledbTransactionsUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbUserCommits struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.user_commits metric with initial data.
func (m *metricOracledbUserCommits) init() {
m.data.SetName("oracledb.user_commits")
m.data.SetDescription("Number of user commits. When a user commits a transaction, the redo generated that reflects the changes made to database blocks must be written to disk. Commits often represent the closest thing to a user transaction rate.")
m.data.SetUnit("{commits}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbUserCommits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbUserCommits) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbUserCommits) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbUserCommits(cfg MetricConfig) metricOracledbUserCommits {
m := metricOracledbUserCommits{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricOracledbUserRollbacks struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills oracledb.user_rollbacks metric with initial data.
func (m *metricOracledbUserRollbacks) init() {
m.data.SetName("oracledb.user_rollbacks")
m.data.SetDescription("Number of times users manually issue the ROLLBACK statement or an error occurs during a user's transactions")
m.data.SetUnit("1")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricOracledbUserRollbacks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricOracledbUserRollbacks) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricOracledbUserRollbacks) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricOracledbUserRollbacks(cfg MetricConfig) metricOracledbUserRollbacks {
m := metricOracledbUserRollbacks{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user config.
type MetricsBuilder struct {
config MetricsBuilderConfig // config of the metrics builder.
startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
metricsCapacity int // maximum observed number of metrics per resource.
metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
buildInfo component.BuildInfo // contains version information.
resourceAttributeIncludeFilter map[string]filter.Filter
resourceAttributeExcludeFilter map[string]filter.Filter
metricOracledbConsistentGets metricOracledbConsistentGets
metricOracledbCPUTime metricOracledbCPUTime
metricOracledbDbBlockGets metricOracledbDbBlockGets
metricOracledbDdlStatementsParallelized metricOracledbDdlStatementsParallelized
metricOracledbDmlLocksLimit metricOracledbDmlLocksLimit
metricOracledbDmlLocksUsage metricOracledbDmlLocksUsage
metricOracledbDmlStatementsParallelized metricOracledbDmlStatementsParallelized
metricOracledbEnqueueDeadlocks metricOracledbEnqueueDeadlocks
metricOracledbEnqueueLocksLimit metricOracledbEnqueueLocksLimit
metricOracledbEnqueueLocksUsage metricOracledbEnqueueLocksUsage
metricOracledbEnqueueResourcesLimit metricOracledbEnqueueResourcesLimit
metricOracledbEnqueueResourcesUsage metricOracledbEnqueueResourcesUsage
metricOracledbExchangeDeadlocks metricOracledbExchangeDeadlocks
metricOracledbExecutions metricOracledbExecutions
metricOracledbHardParses metricOracledbHardParses
metricOracledbLogicalReads metricOracledbLogicalReads
metricOracledbParallelOperationsDowngraded1To25Pct metricOracledbParallelOperationsDowngraded1To25Pct
metricOracledbParallelOperationsDowngraded25To50Pct metricOracledbParallelOperationsDowngraded25To50Pct
metricOracledbParallelOperationsDowngraded50To75Pct metricOracledbParallelOperationsDowngraded50To75Pct
metricOracledbParallelOperationsDowngraded75To99Pct metricOracledbParallelOperationsDowngraded75To99Pct
metricOracledbParallelOperationsDowngradedToSerial metricOracledbParallelOperationsDowngradedToSerial
metricOracledbParallelOperationsNotDowngraded metricOracledbParallelOperationsNotDowngraded
metricOracledbParseCalls metricOracledbParseCalls
metricOracledbPgaMemory metricOracledbPgaMemory
metricOracledbPhysicalReadIoRequests metricOracledbPhysicalReadIoRequests
metricOracledbPhysicalReads metricOracledbPhysicalReads
metricOracledbPhysicalReadsDirect metricOracledbPhysicalReadsDirect
metricOracledbPhysicalWriteIoRequests metricOracledbPhysicalWriteIoRequests
metricOracledbPhysicalWrites metricOracledbPhysicalWrites
metricOracledbPhysicalWritesDirect metricOracledbPhysicalWritesDirect
metricOracledbProcessesLimit metricOracledbProcessesLimit
metricOracledbProcessesUsage metricOracledbProcessesUsage
metricOracledbQueriesParallelized metricOracledbQueriesParallelized
metricOracledbSessionsLimit metricOracledbSessionsLimit
metricOracledbSessionsUsage metricOracledbSessionsUsage
metricOracledbTablespaceSizeLimit metricOracledbTablespaceSizeLimit
metricOracledbTablespaceSizeUsage metricOracledbTablespaceSizeUsage
metricOracledbTransactionsLimit metricOracledbTransactionsLimit
metricOracledbTransactionsUsage metricOracledbTransactionsUsage
metricOracledbUserCommits metricOracledbUserCommits
metricOracledbUserRollbacks metricOracledbUserRollbacks
}
// MetricBuilderOption applies changes to default metrics builder.
type MetricBuilderOption interface {
apply(*MetricsBuilder)
}
type metricBuilderOptionFunc func(mb *MetricsBuilder)
func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) {
mbof(mb)
}
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
return metricBuilderOptionFunc(func(mb *MetricsBuilder) {
mb.startTime = startTime
})
}
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
config: mbc,
startTime: pcommon.NewTimestampFromTime(time.Now()),
metricsBuffer: pmetric.NewMetrics(),
buildInfo: settings.BuildInfo,
metricOracledbConsistentGets: newMetricOracledbConsistentGets(mbc.Metrics.OracledbConsistentGets),
metricOracledbCPUTime: newMetricOracledbCPUTime(mbc.Metrics.OracledbCPUTime),
metricOracledbDbBlockGets: newMetricOracledbDbBlockGets(mbc.Metrics.OracledbDbBlockGets),
metricOracledbDdlStatementsParallelized: newMetricOracledbDdlStatementsParallelized(mbc.Metrics.OracledbDdlStatementsParallelized),
metricOracledbDmlLocksLimit: newMetricOracledbDmlLocksLimit(mbc.Metrics.OracledbDmlLocksLimit),
metricOracledbDmlLocksUsage: newMetricOracledbDmlLocksUsage(mbc.Metrics.OracledbDmlLocksUsage),
metricOracledbDmlStatementsParallelized: newMetricOracledbDmlStatementsParallelized(mbc.Metrics.OracledbDmlStatementsParallelized),
metricOracledbEnqueueDeadlocks: newMetricOracledbEnqueueDeadlocks(mbc.Metrics.OracledbEnqueueDeadlocks),
metricOracledbEnqueueLocksLimit: newMetricOracledbEnqueueLocksLimit(mbc.Metrics.OracledbEnqueueLocksLimit),
metricOracledbEnqueueLocksUsage: newMetricOracledbEnqueueLocksUsage(mbc.Metrics.OracledbEnqueueLocksUsage),
metricOracledbEnqueueResourcesLimit: newMetricOracledbEnqueueResourcesLimit(mbc.Metrics.OracledbEnqueueResourcesLimit),
metricOracledbEnqueueResourcesUsage: newMetricOracledbEnqueueResourcesUsage(mbc.Metrics.OracledbEnqueueResourcesUsage),
metricOracledbExchangeDeadlocks: newMetricOracledbExchangeDeadlocks(mbc.Metrics.OracledbExchangeDeadlocks),
metricOracledbExecutions: newMetricOracledbExecutions(mbc.Metrics.OracledbExecutions),
metricOracledbHardParses: newMetricOracledbHardParses(mbc.Metrics.OracledbHardParses),
metricOracledbLogicalReads: newMetricOracledbLogicalReads(mbc.Metrics.OracledbLogicalReads),
metricOracledbParallelOperationsDowngraded1To25Pct: newMetricOracledbParallelOperationsDowngraded1To25Pct(mbc.Metrics.OracledbParallelOperationsDowngraded1To25Pct),
metricOracledbParallelOperationsDowngraded25To50Pct: newMetricOracledbParallelOperationsDowngraded25To50Pct(mbc.Metrics.OracledbParallelOperationsDowngraded25To50Pct),
metricOracledbParallelOperationsDowngraded50To75Pct: newMetricOracledbParallelOperationsDowngraded50To75Pct(mbc.Metrics.OracledbParallelOperationsDowngraded50To75Pct),
metricOracledbParallelOperationsDowngraded75To99Pct: newMetricOracledbParallelOperationsDowngraded75To99Pct(mbc.Metrics.OracledbParallelOperationsDowngraded75To99Pct),
metricOracledbParallelOperationsDowngradedToSerial: newMetricOracledbParallelOperationsDowngradedToSerial(mbc.Metrics.OracledbParallelOperationsDowngradedToSerial),
metricOracledbParallelOperationsNotDowngraded: newMetricOracledbParallelOperationsNotDowngraded(mbc.Metrics.OracledbParallelOperationsNotDowngraded),
metricOracledbParseCalls: newMetricOracledbParseCalls(mbc.Metrics.OracledbParseCalls),
metricOracledbPgaMemory: newMetricOracledbPgaMemory(mbc.Metrics.OracledbPgaMemory),
metricOracledbPhysicalReadIoRequests: newMetricOracledbPhysicalReadIoRequests(mbc.Metrics.OracledbPhysicalReadIoRequests),
metricOracledbPhysicalReads: newMetricOracledbPhysicalReads(mbc.Metrics.OracledbPhysicalReads),
metricOracledbPhysicalReadsDirect: newMetricOracledbPhysicalReadsDirect(mbc.Metrics.OracledbPhysicalReadsDirect),
metricOracledbPhysicalWriteIoRequests: newMetricOracledbPhysicalWriteIoRequests(mbc.Metrics.OracledbPhysicalWriteIoRequests),
metricOracledbPhysicalWrites: newMetricOracledbPhysicalWrites(mbc.Metrics.OracledbPhysicalWrites),
metricOracledbPhysicalWritesDirect: newMetricOracledbPhysicalWritesDirect(mbc.Metrics.OracledbPhysicalWritesDirect),
metricOracledbProcessesLimit: newMetricOracledbProcessesLimit(mbc.Metrics.OracledbProcessesLimit),
metricOracledbProcessesUsage: newMetricOracledbProcessesUsage(mbc.Metrics.OracledbProcessesUsage),
metricOracledbQueriesParallelized: newMetricOracledbQueriesParallelized(mbc.Metrics.OracledbQueriesParallelized),
metricOracledbSessionsLimit: newMetricOracledbSessionsLimit(mbc.Metrics.OracledbSessionsLimit),
metricOracledbSessionsUsage: newMetricOracledbSessionsUsage(mbc.Metrics.OracledbSessionsUsage),
metricOracledbTablespaceSizeLimit: newMetricOracledbTablespaceSizeLimit(mbc.Metrics.OracledbTablespaceSizeLimit),
metricOracledbTablespaceSizeUsage: newMetricOracledbTablespaceSizeUsage(mbc.Metrics.OracledbTablespaceSizeUsage),
metricOracledbTransactionsLimit: newMetricOracledbTransactionsLimit(mbc.Metrics.OracledbTransactionsLimit),
metricOracledbTransactionsUsage: newMetricOracledbTransactionsUsage(mbc.Metrics.OracledbTransactionsUsage),
metricOracledbUserCommits: newMetricOracledbUserCommits(mbc.Metrics.OracledbUserCommits),
metricOracledbUserRollbacks: newMetricOracledbUserRollbacks(mbc.Metrics.OracledbUserRollbacks),
resourceAttributeIncludeFilter: make(map[string]filter.Filter),
resourceAttributeExcludeFilter: make(map[string]filter.Filter),
}
if mbc.ResourceAttributes.OracledbInstanceName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["oracledb.instance.name"] = filter.CreateFilter(mbc.ResourceAttributes.OracledbInstanceName.MetricsInclude)
}
if mbc.ResourceAttributes.OracledbInstanceName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["oracledb.instance.name"] = filter.CreateFilter(mbc.ResourceAttributes.OracledbInstanceName.MetricsExclude)
}
for _, op := range options {
op.apply(mb)
}
return mb
}
// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
return NewResourceBuilder(mb.config.ResourceAttributes)
}
// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
}
}
// ResourceMetricsOption applies changes to provided resource metrics.
type ResourceMetricsOption interface {
apply(pmetric.ResourceMetrics)
}
type resourceMetricsOptionFunc func(pmetric.ResourceMetrics)
func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) {
rmof(rm)
}
// WithResource sets the provided resource on the emitted ResourceMetrics.
// It's recommended to use ResourceBuilder to create the resource.
func WithResource(res pcommon.Resource) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
res.CopyTo(rm.Resource())
})
}
// WithStartTimeOverride overrides start time for all the resource metrics data points.
// This option should be only used if different start time has to be set on metrics coming from different resources.
func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
var dps pmetric.NumberDataPointSlice
metrics := rm.ScopeMetrics().At(0).Metrics()
for i := 0; i < metrics.Len(); i++ {
switch metrics.At(i).Type() {
case pmetric.MetricTypeGauge:
dps = metrics.At(i).Gauge().DataPoints()
case pmetric.MetricTypeSum:
dps = metrics.At(i).Sum().DataPoints()
}
for j := 0; j < dps.Len(); j++ {
dps.At(j).SetStartTimestamp(start)
}
}
})
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceMetricsOption arguments.
func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName(ScopeName)
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricOracledbConsistentGets.emit(ils.Metrics())
mb.metricOracledbCPUTime.emit(ils.Metrics())
mb.metricOracledbDbBlockGets.emit(ils.Metrics())
mb.metricOracledbDdlStatementsParallelized.emit(ils.Metrics())
mb.metricOracledbDmlLocksLimit.emit(ils.Metrics())
mb.metricOracledbDmlLocksUsage.emit(ils.Metrics())
mb.metricOracledbDmlStatementsParallelized.emit(ils.Metrics())
mb.metricOracledbEnqueueDeadlocks.emit(ils.Metrics())
mb.metricOracledbEnqueueLocksLimit.emit(ils.Metrics())
mb.metricOracledbEnqueueLocksUsage.emit(ils.Metrics())
mb.metricOracledbEnqueueResourcesLimit.emit(ils.Metrics())
mb.metricOracledbEnqueueResourcesUsage.emit(ils.Metrics())
mb.metricOracledbExchangeDeadlocks.emit(ils.Metrics())
mb.metricOracledbExecutions.emit(ils.Metrics())
mb.metricOracledbHardParses.emit(ils.Metrics())
mb.metricOracledbLogicalReads.emit(ils.Metrics())
mb.metricOracledbParallelOperationsDowngraded1To25Pct.emit(ils.Metrics())
mb.metricOracledbParallelOperationsDowngraded25To50Pct.emit(ils.Metrics())
mb.metricOracledbParallelOperationsDowngraded50To75Pct.emit(ils.Metrics())
mb.metricOracledbParallelOperationsDowngraded75To99Pct.emit(ils.Metrics())
mb.metricOracledbParallelOperationsDowngradedToSerial.emit(ils.Metrics())
mb.metricOracledbParallelOperationsNotDowngraded.emit(ils.Metrics())
mb.metricOracledbParseCalls.emit(ils.Metrics())
mb.metricOracledbPgaMemory.emit(ils.Metrics())
mb.metricOracledbPhysicalReadIoRequests.emit(ils.Metrics())
mb.metricOracledbPhysicalReads.emit(ils.Metrics())
mb.metricOracledbPhysicalReadsDirect.emit(ils.Metrics())
mb.metricOracledbPhysicalWriteIoRequests.emit(ils.Metrics())
mb.metricOracledbPhysicalWrites.emit(ils.Metrics())
mb.metricOracledbPhysicalWritesDirect.emit(ils.Metrics())
mb.metricOracledbProcessesLimit.emit(ils.Metrics())
mb.metricOracledbProcessesUsage.emit(ils.Metrics())
mb.metricOracledbQueriesParallelized.emit(ils.Metrics())
mb.metricOracledbSessionsLimit.emit(ils.Metrics())
mb.metricOracledbSessionsUsage.emit(ils.Metrics())
mb.metricOracledbTablespaceSizeLimit.emit(ils.Metrics())
mb.metricOracledbTablespaceSizeUsage.emit(ils.Metrics())
mb.metricOracledbTransactionsLimit.emit(ils.Metrics())
mb.metricOracledbTransactionsUsage.emit(ils.Metrics())
mb.metricOracledbUserCommits.emit(ils.Metrics())
mb.metricOracledbUserRollbacks.emit(ils.Metrics())
for _, op := range options {
op.apply(rm)
}
for attr, filter := range mb.resourceAttributeIncludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) {
return
}
}
for attr, filter := range mb.resourceAttributeExcludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) {
return
}
}
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
}
}
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user config, e.g. delta or cumulative.
func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics {
mb.EmitForResource(options...)
metrics := mb.metricsBuffer
mb.metricsBuffer = pmetric.NewMetrics()
return metrics
}
// RecordOracledbConsistentGetsDataPoint adds a data point to oracledb.consistent_gets metric.
func (mb *MetricsBuilder) RecordOracledbConsistentGetsDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbConsistentGets, value was %s: %w", inputVal, err)
}
mb.metricOracledbConsistentGets.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbCPUTimeDataPoint adds a data point to oracledb.cpu_time metric.
func (mb *MetricsBuilder) RecordOracledbCPUTimeDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricOracledbCPUTime.recordDataPoint(mb.startTime, ts, val)
}
// RecordOracledbDbBlockGetsDataPoint adds a data point to oracledb.db_block_gets metric.
func (mb *MetricsBuilder) RecordOracledbDbBlockGetsDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbDbBlockGets, value was %s: %w", inputVal, err)
}
mb.metricOracledbDbBlockGets.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbDdlStatementsParallelizedDataPoint adds a data point to oracledb.ddl_statements_parallelized metric.
func (mb *MetricsBuilder) RecordOracledbDdlStatementsParallelizedDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbDdlStatementsParallelized, value was %s: %w", inputVal, err)
}
mb.metricOracledbDdlStatementsParallelized.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbDmlLocksLimitDataPoint adds a data point to oracledb.dml_locks.limit metric.
func (mb *MetricsBuilder) RecordOracledbDmlLocksLimitDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbDmlLocksLimit, value was %s: %w", inputVal, err)
}
mb.metricOracledbDmlLocksLimit.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbDmlLocksUsageDataPoint adds a data point to oracledb.dml_locks.usage metric.
func (mb *MetricsBuilder) RecordOracledbDmlLocksUsageDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbDmlLocksUsage, value was %s: %w", inputVal, err)
}
mb.metricOracledbDmlLocksUsage.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbDmlStatementsParallelizedDataPoint adds a data point to oracledb.dml_statements_parallelized metric.
func (mb *MetricsBuilder) RecordOracledbDmlStatementsParallelizedDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbDmlStatementsParallelized, value was %s: %w", inputVal, err)
}
mb.metricOracledbDmlStatementsParallelized.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbEnqueueDeadlocksDataPoint adds a data point to oracledb.enqueue_deadlocks metric.
func (mb *MetricsBuilder) RecordOracledbEnqueueDeadlocksDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbEnqueueDeadlocks, value was %s: %w", inputVal, err)
}
mb.metricOracledbEnqueueDeadlocks.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbEnqueueLocksLimitDataPoint adds a data point to oracledb.enqueue_locks.limit metric.
func (mb *MetricsBuilder) RecordOracledbEnqueueLocksLimitDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbEnqueueLocksLimit, value was %s: %w", inputVal, err)
}
mb.metricOracledbEnqueueLocksLimit.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbEnqueueLocksUsageDataPoint adds a data point to oracledb.enqueue_locks.usage metric.
func (mb *MetricsBuilder) RecordOracledbEnqueueLocksUsageDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbEnqueueLocksUsage, value was %s: %w", inputVal, err)
}
mb.metricOracledbEnqueueLocksUsage.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbEnqueueResourcesLimitDataPoint adds a data point to oracledb.enqueue_resources.limit metric.
func (mb *MetricsBuilder) RecordOracledbEnqueueResourcesLimitDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbEnqueueResourcesLimit, value was %s: %w", inputVal, err)
}
mb.metricOracledbEnqueueResourcesLimit.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbEnqueueResourcesUsageDataPoint adds a data point to oracledb.enqueue_resources.usage metric.
func (mb *MetricsBuilder) RecordOracledbEnqueueResourcesUsageDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbEnqueueResourcesUsage, value was %s: %w", inputVal, err)
}
mb.metricOracledbEnqueueResourcesUsage.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbExchangeDeadlocksDataPoint adds a data point to oracledb.exchange_deadlocks metric.
func (mb *MetricsBuilder) RecordOracledbExchangeDeadlocksDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbExchangeDeadlocks, value was %s: %w", inputVal, err)
}
mb.metricOracledbExchangeDeadlocks.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbExecutionsDataPoint adds a data point to oracledb.executions metric.
func (mb *MetricsBuilder) RecordOracledbExecutionsDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbExecutions, value was %s: %w", inputVal, err)
}
mb.metricOracledbExecutions.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbHardParsesDataPoint adds a data point to oracledb.hard_parses metric.
func (mb *MetricsBuilder) RecordOracledbHardParsesDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbHardParses, value was %s: %w", inputVal, err)
}
mb.metricOracledbHardParses.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbLogicalReadsDataPoint adds a data point to oracledb.logical_reads metric.
func (mb *MetricsBuilder) RecordOracledbLogicalReadsDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbLogicalReads, value was %s: %w", inputVal, err)
}
mb.metricOracledbLogicalReads.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbParallelOperationsDowngraded1To25PctDataPoint adds a data point to oracledb.parallel_operations_downgraded_1_to_25_pct metric.
func (mb *MetricsBuilder) RecordOracledbParallelOperationsDowngraded1To25PctDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbParallelOperationsDowngraded1To25Pct, value was %s: %w", inputVal, err)
}
mb.metricOracledbParallelOperationsDowngraded1To25Pct.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbParallelOperationsDowngraded25To50PctDataPoint adds a data point to oracledb.parallel_operations_downgraded_25_to_50_pct metric.
func (mb *MetricsBuilder) RecordOracledbParallelOperationsDowngraded25To50PctDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbParallelOperationsDowngraded25To50Pct, value was %s: %w", inputVal, err)
}
mb.metricOracledbParallelOperationsDowngraded25To50Pct.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbParallelOperationsDowngraded50To75PctDataPoint adds a data point to oracledb.parallel_operations_downgraded_50_to_75_pct metric.
func (mb *MetricsBuilder) RecordOracledbParallelOperationsDowngraded50To75PctDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbParallelOperationsDowngraded50To75Pct, value was %s: %w", inputVal, err)
}
mb.metricOracledbParallelOperationsDowngraded50To75Pct.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbParallelOperationsDowngraded75To99PctDataPoint adds a data point to oracledb.parallel_operations_downgraded_75_to_99_pct metric.
func (mb *MetricsBuilder) RecordOracledbParallelOperationsDowngraded75To99PctDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbParallelOperationsDowngraded75To99Pct, value was %s: %w", inputVal, err)
}
mb.metricOracledbParallelOperationsDowngraded75To99Pct.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbParallelOperationsDowngradedToSerialDataPoint adds a data point to oracledb.parallel_operations_downgraded_to_serial metric.
func (mb *MetricsBuilder) RecordOracledbParallelOperationsDowngradedToSerialDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbParallelOperationsDowngradedToSerial, value was %s: %w", inputVal, err)
}
mb.metricOracledbParallelOperationsDowngradedToSerial.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbParallelOperationsNotDowngradedDataPoint adds a data point to oracledb.parallel_operations_not_downgraded metric.
func (mb *MetricsBuilder) RecordOracledbParallelOperationsNotDowngradedDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbParallelOperationsNotDowngraded, value was %s: %w", inputVal, err)
}
mb.metricOracledbParallelOperationsNotDowngraded.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbParseCallsDataPoint adds a data point to oracledb.parse_calls metric.
func (mb *MetricsBuilder) RecordOracledbParseCallsDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbParseCalls, value was %s: %w", inputVal, err)
}
mb.metricOracledbParseCalls.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbPgaMemoryDataPoint adds a data point to oracledb.pga_memory metric.
func (mb *MetricsBuilder) RecordOracledbPgaMemoryDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbPgaMemory, value was %s: %w", inputVal, err)
}
mb.metricOracledbPgaMemory.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbPhysicalReadIoRequestsDataPoint adds a data point to oracledb.physical_read_io_requests metric.
func (mb *MetricsBuilder) RecordOracledbPhysicalReadIoRequestsDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbPhysicalReadIoRequests, value was %s: %w", inputVal, err)
}
mb.metricOracledbPhysicalReadIoRequests.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbPhysicalReadsDataPoint adds a data point to oracledb.physical_reads metric.
func (mb *MetricsBuilder) RecordOracledbPhysicalReadsDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbPhysicalReads, value was %s: %w", inputVal, err)
}
mb.metricOracledbPhysicalReads.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbPhysicalReadsDirectDataPoint adds a data point to oracledb.physical_reads_direct metric.
func (mb *MetricsBuilder) RecordOracledbPhysicalReadsDirectDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbPhysicalReadsDirect, value was %s: %w", inputVal, err)
}
mb.metricOracledbPhysicalReadsDirect.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbPhysicalWriteIoRequestsDataPoint adds a data point to oracledb.physical_write_io_requests metric.
func (mb *MetricsBuilder) RecordOracledbPhysicalWriteIoRequestsDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbPhysicalWriteIoRequests, value was %s: %w", inputVal, err)
}
mb.metricOracledbPhysicalWriteIoRequests.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbPhysicalWritesDataPoint adds a data point to oracledb.physical_writes metric.
func (mb *MetricsBuilder) RecordOracledbPhysicalWritesDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbPhysicalWrites, value was %s: %w", inputVal, err)
}
mb.metricOracledbPhysicalWrites.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbPhysicalWritesDirectDataPoint adds a data point to oracledb.physical_writes_direct metric.
func (mb *MetricsBuilder) RecordOracledbPhysicalWritesDirectDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbPhysicalWritesDirect, value was %s: %w", inputVal, err)
}
mb.metricOracledbPhysicalWritesDirect.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbProcessesLimitDataPoint adds a data point to oracledb.processes.limit metric.
func (mb *MetricsBuilder) RecordOracledbProcessesLimitDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbProcessesLimit, value was %s: %w", inputVal, err)
}
mb.metricOracledbProcessesLimit.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbProcessesUsageDataPoint adds a data point to oracledb.processes.usage metric.
func (mb *MetricsBuilder) RecordOracledbProcessesUsageDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbProcessesUsage, value was %s: %w", inputVal, err)
}
mb.metricOracledbProcessesUsage.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbQueriesParallelizedDataPoint adds a data point to oracledb.queries_parallelized metric.
func (mb *MetricsBuilder) RecordOracledbQueriesParallelizedDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbQueriesParallelized, value was %s: %w", inputVal, err)
}
mb.metricOracledbQueriesParallelized.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbSessionsLimitDataPoint adds a data point to oracledb.sessions.limit metric.
func (mb *MetricsBuilder) RecordOracledbSessionsLimitDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbSessionsLimit, value was %s: %w", inputVal, err)
}
mb.metricOracledbSessionsLimit.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbSessionsUsageDataPoint adds a data point to oracledb.sessions.usage metric.
func (mb *MetricsBuilder) RecordOracledbSessionsUsageDataPoint(ts pcommon.Timestamp, inputVal string, sessionTypeAttributeValue string, sessionStatusAttributeValue string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbSessionsUsage, value was %s: %w", inputVal, err)
}
mb.metricOracledbSessionsUsage.recordDataPoint(mb.startTime, ts, val, sessionTypeAttributeValue, sessionStatusAttributeValue)
return nil
}
// RecordOracledbTablespaceSizeLimitDataPoint adds a data point to oracledb.tablespace_size.limit metric.
func (mb *MetricsBuilder) RecordOracledbTablespaceSizeLimitDataPoint(ts pcommon.Timestamp, val int64, tablespaceNameAttributeValue string) {
mb.metricOracledbTablespaceSizeLimit.recordDataPoint(mb.startTime, ts, val, tablespaceNameAttributeValue)
}
// RecordOracledbTablespaceSizeUsageDataPoint adds a data point to oracledb.tablespace_size.usage metric.
func (mb *MetricsBuilder) RecordOracledbTablespaceSizeUsageDataPoint(ts pcommon.Timestamp, val int64, tablespaceNameAttributeValue string) {
mb.metricOracledbTablespaceSizeUsage.recordDataPoint(mb.startTime, ts, val, tablespaceNameAttributeValue)
}
// RecordOracledbTransactionsLimitDataPoint adds a data point to oracledb.transactions.limit metric.
func (mb *MetricsBuilder) RecordOracledbTransactionsLimitDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbTransactionsLimit, value was %s: %w", inputVal, err)
}
mb.metricOracledbTransactionsLimit.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbTransactionsUsageDataPoint adds a data point to oracledb.transactions.usage metric.
func (mb *MetricsBuilder) RecordOracledbTransactionsUsageDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbTransactionsUsage, value was %s: %w", inputVal, err)
}
mb.metricOracledbTransactionsUsage.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbUserCommitsDataPoint adds a data point to oracledb.user_commits metric.
func (mb *MetricsBuilder) RecordOracledbUserCommitsDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbUserCommits, value was %s: %w", inputVal, err)
}
mb.metricOracledbUserCommits.recordDataPoint(mb.startTime, ts, val)
return nil
}
// RecordOracledbUserRollbacksDataPoint adds a data point to oracledb.user_rollbacks metric.
func (mb *MetricsBuilder) RecordOracledbUserRollbacksDataPoint(ts pcommon.Timestamp, inputVal string) error {
val, err := strconv.ParseInt(inputVal, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse int64 for OracledbUserRollbacks, value was %s: %w", inputVal, err)
}
mb.metricOracledbUserRollbacks.recordDataPoint(mb.startTime, ts, val)
return nil
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) {
mb.startTime = pcommon.NewTimestampFromTime(time.Now())
for _, op := range options {
op.apply(mb)
}
}