receiver/snowflakereceiver/internal/metadata/generated_metrics.go (2,060 lines of code) (raw):
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/filter"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
)
var MetricsInfo = metricsInfo{
SnowflakeBillingCloudServiceTotal: metricInfo{
Name: "snowflake.billing.cloud_service.total",
},
SnowflakeBillingTotalCreditTotal: metricInfo{
Name: "snowflake.billing.total_credit.total",
},
SnowflakeBillingVirtualWarehouseTotal: metricInfo{
Name: "snowflake.billing.virtual_warehouse.total",
},
SnowflakeBillingWarehouseCloudServiceTotal: metricInfo{
Name: "snowflake.billing.warehouse.cloud_service.total",
},
SnowflakeBillingWarehouseTotalCreditTotal: metricInfo{
Name: "snowflake.billing.warehouse.total_credit.total",
},
SnowflakeBillingWarehouseVirtualWarehouseTotal: metricInfo{
Name: "snowflake.billing.warehouse.virtual_warehouse.total",
},
SnowflakeDatabaseBytesScannedAvg: metricInfo{
Name: "snowflake.database.bytes_scanned.avg",
},
SnowflakeDatabaseQueryCount: metricInfo{
Name: "snowflake.database.query.count",
},
SnowflakeLoginsTotal: metricInfo{
Name: "snowflake.logins.total",
},
SnowflakePipeCreditsUsedTotal: metricInfo{
Name: "snowflake.pipe.credits_used.total",
},
SnowflakeQueryBlocked: metricInfo{
Name: "snowflake.query.blocked",
},
SnowflakeQueryBytesDeletedAvg: metricInfo{
Name: "snowflake.query.bytes_deleted.avg",
},
SnowflakeQueryBytesSpilledLocalAvg: metricInfo{
Name: "snowflake.query.bytes_spilled.local.avg",
},
SnowflakeQueryBytesSpilledRemoteAvg: metricInfo{
Name: "snowflake.query.bytes_spilled.remote.avg",
},
SnowflakeQueryBytesWrittenAvg: metricInfo{
Name: "snowflake.query.bytes_written.avg",
},
SnowflakeQueryCompilationTimeAvg: metricInfo{
Name: "snowflake.query.compilation_time.avg",
},
SnowflakeQueryDataScannedCacheAvg: metricInfo{
Name: "snowflake.query.data_scanned_cache.avg",
},
SnowflakeQueryExecuted: metricInfo{
Name: "snowflake.query.executed",
},
SnowflakeQueryExecutionTimeAvg: metricInfo{
Name: "snowflake.query.execution_time.avg",
},
SnowflakeQueryPartitionsScannedAvg: metricInfo{
Name: "snowflake.query.partitions_scanned.avg",
},
SnowflakeQueryQueuedOverload: metricInfo{
Name: "snowflake.query.queued_overload",
},
SnowflakeQueryQueuedProvision: metricInfo{
Name: "snowflake.query.queued_provision",
},
SnowflakeQueuedOverloadTimeAvg: metricInfo{
Name: "snowflake.queued_overload_time.avg",
},
SnowflakeQueuedProvisioningTimeAvg: metricInfo{
Name: "snowflake.queued_provisioning_time.avg",
},
SnowflakeQueuedRepairTimeAvg: metricInfo{
Name: "snowflake.queued_repair_time.avg",
},
SnowflakeRowsDeletedAvg: metricInfo{
Name: "snowflake.rows_deleted.avg",
},
SnowflakeRowsInsertedAvg: metricInfo{
Name: "snowflake.rows_inserted.avg",
},
SnowflakeRowsProducedAvg: metricInfo{
Name: "snowflake.rows_produced.avg",
},
SnowflakeRowsUnloadedAvg: metricInfo{
Name: "snowflake.rows_unloaded.avg",
},
SnowflakeRowsUpdatedAvg: metricInfo{
Name: "snowflake.rows_updated.avg",
},
SnowflakeSessionIDCount: metricInfo{
Name: "snowflake.session_id.count",
},
SnowflakeStorageFailsafeBytesTotal: metricInfo{
Name: "snowflake.storage.failsafe_bytes.total",
},
SnowflakeStorageStageBytesTotal: metricInfo{
Name: "snowflake.storage.stage_bytes.total",
},
SnowflakeStorageStorageBytesTotal: metricInfo{
Name: "snowflake.storage.storage_bytes.total",
},
SnowflakeTotalElapsedTimeAvg: metricInfo{
Name: "snowflake.total_elapsed_time.avg",
},
}
type metricsInfo struct {
SnowflakeBillingCloudServiceTotal metricInfo
SnowflakeBillingTotalCreditTotal metricInfo
SnowflakeBillingVirtualWarehouseTotal metricInfo
SnowflakeBillingWarehouseCloudServiceTotal metricInfo
SnowflakeBillingWarehouseTotalCreditTotal metricInfo
SnowflakeBillingWarehouseVirtualWarehouseTotal metricInfo
SnowflakeDatabaseBytesScannedAvg metricInfo
SnowflakeDatabaseQueryCount metricInfo
SnowflakeLoginsTotal metricInfo
SnowflakePipeCreditsUsedTotal metricInfo
SnowflakeQueryBlocked metricInfo
SnowflakeQueryBytesDeletedAvg metricInfo
SnowflakeQueryBytesSpilledLocalAvg metricInfo
SnowflakeQueryBytesSpilledRemoteAvg metricInfo
SnowflakeQueryBytesWrittenAvg metricInfo
SnowflakeQueryCompilationTimeAvg metricInfo
SnowflakeQueryDataScannedCacheAvg metricInfo
SnowflakeQueryExecuted metricInfo
SnowflakeQueryExecutionTimeAvg metricInfo
SnowflakeQueryPartitionsScannedAvg metricInfo
SnowflakeQueryQueuedOverload metricInfo
SnowflakeQueryQueuedProvision metricInfo
SnowflakeQueuedOverloadTimeAvg metricInfo
SnowflakeQueuedProvisioningTimeAvg metricInfo
SnowflakeQueuedRepairTimeAvg metricInfo
SnowflakeRowsDeletedAvg metricInfo
SnowflakeRowsInsertedAvg metricInfo
SnowflakeRowsProducedAvg metricInfo
SnowflakeRowsUnloadedAvg metricInfo
SnowflakeRowsUpdatedAvg metricInfo
SnowflakeSessionIDCount metricInfo
SnowflakeStorageFailsafeBytesTotal metricInfo
SnowflakeStorageStageBytesTotal metricInfo
SnowflakeStorageStorageBytesTotal metricInfo
SnowflakeTotalElapsedTimeAvg metricInfo
}
type metricInfo struct {
Name string
}
type metricSnowflakeBillingCloudServiceTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.billing.cloud_service.total metric with initial data.
func (m *metricSnowflakeBillingCloudServiceTotal) init() {
m.data.SetName("snowflake.billing.cloud_service.total")
m.data.SetDescription("Reported total credits used in the cloud service over the last 24 hour window.")
m.data.SetUnit("{credits}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeBillingCloudServiceTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("service_type", serviceTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeBillingCloudServiceTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeBillingCloudServiceTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeBillingCloudServiceTotal(cfg MetricConfig) metricSnowflakeBillingCloudServiceTotal {
m := metricSnowflakeBillingCloudServiceTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeBillingTotalCreditTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.billing.total_credit.total metric with initial data.
func (m *metricSnowflakeBillingTotalCreditTotal) init() {
m.data.SetName("snowflake.billing.total_credit.total")
m.data.SetDescription("Reported total credits used across account over the last 24 hour window.")
m.data.SetUnit("{credits}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeBillingTotalCreditTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("service_type", serviceTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeBillingTotalCreditTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeBillingTotalCreditTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeBillingTotalCreditTotal(cfg MetricConfig) metricSnowflakeBillingTotalCreditTotal {
m := metricSnowflakeBillingTotalCreditTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeBillingVirtualWarehouseTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.billing.virtual_warehouse.total metric with initial data.
func (m *metricSnowflakeBillingVirtualWarehouseTotal) init() {
m.data.SetName("snowflake.billing.virtual_warehouse.total")
m.data.SetDescription("Reported total credits used by virtual warehouse service over the last 24 hour window.")
m.data.SetUnit("{credits}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeBillingVirtualWarehouseTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("service_type", serviceTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeBillingVirtualWarehouseTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeBillingVirtualWarehouseTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeBillingVirtualWarehouseTotal(cfg MetricConfig) metricSnowflakeBillingVirtualWarehouseTotal {
m := metricSnowflakeBillingVirtualWarehouseTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeBillingWarehouseCloudServiceTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.billing.warehouse.cloud_service.total metric with initial data.
func (m *metricSnowflakeBillingWarehouseCloudServiceTotal) init() {
m.data.SetName("snowflake.billing.warehouse.cloud_service.total")
m.data.SetDescription("Credits used across cloud service for given warehouse over the last 24 hour window.")
m.data.SetUnit("{credits}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeBillingWarehouseCloudServiceTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeBillingWarehouseCloudServiceTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeBillingWarehouseCloudServiceTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeBillingWarehouseCloudServiceTotal(cfg MetricConfig) metricSnowflakeBillingWarehouseCloudServiceTotal {
m := metricSnowflakeBillingWarehouseCloudServiceTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeBillingWarehouseTotalCreditTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.billing.warehouse.total_credit.total metric with initial data.
func (m *metricSnowflakeBillingWarehouseTotalCreditTotal) init() {
m.data.SetName("snowflake.billing.warehouse.total_credit.total")
m.data.SetDescription("Total credits used associated with given warehouse over the last 24 hour window.")
m.data.SetUnit("{credits}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeBillingWarehouseTotalCreditTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeBillingWarehouseTotalCreditTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeBillingWarehouseTotalCreditTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeBillingWarehouseTotalCreditTotal(cfg MetricConfig) metricSnowflakeBillingWarehouseTotalCreditTotal {
m := metricSnowflakeBillingWarehouseTotalCreditTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeBillingWarehouseVirtualWarehouseTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.billing.warehouse.virtual_warehouse.total metric with initial data.
func (m *metricSnowflakeBillingWarehouseVirtualWarehouseTotal) init() {
m.data.SetName("snowflake.billing.warehouse.virtual_warehouse.total")
m.data.SetDescription("Total credits used by virtual warehouse service for given warehouse over the last 24 hour window.")
m.data.SetUnit("{credits}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeBillingWarehouseVirtualWarehouseTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeBillingWarehouseVirtualWarehouseTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeBillingWarehouseVirtualWarehouseTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeBillingWarehouseVirtualWarehouseTotal(cfg MetricConfig) metricSnowflakeBillingWarehouseVirtualWarehouseTotal {
m := metricSnowflakeBillingWarehouseVirtualWarehouseTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeDatabaseBytesScannedAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.database.bytes_scanned.avg metric with initial data.
func (m *metricSnowflakeDatabaseBytesScannedAvg) init() {
m.data.SetName("snowflake.database.bytes_scanned.avg")
m.data.SetDescription("Average bytes scanned in a database over the last 24 hour window.")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeDatabaseBytesScannedAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeDatabaseBytesScannedAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeDatabaseBytesScannedAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeDatabaseBytesScannedAvg(cfg MetricConfig) metricSnowflakeDatabaseBytesScannedAvg {
m := metricSnowflakeDatabaseBytesScannedAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeDatabaseQueryCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.database.query.count metric with initial data.
func (m *metricSnowflakeDatabaseQueryCount) init() {
m.data.SetName("snowflake.database.query.count")
m.data.SetDescription("Total query count for database over the last 24 hour window.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeDatabaseQueryCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeDatabaseQueryCount) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeDatabaseQueryCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeDatabaseQueryCount(cfg MetricConfig) metricSnowflakeDatabaseQueryCount {
m := metricSnowflakeDatabaseQueryCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeLoginsTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.logins.total metric with initial data.
func (m *metricSnowflakeLoginsTotal) init() {
m.data.SetName("snowflake.logins.total")
m.data.SetDescription("Total login attempts for account over the last 24 hour window.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeLoginsTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, errorMessageAttributeValue string, reportedClientTypeAttributeValue string, isSuccessAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("reported_client_type", reportedClientTypeAttributeValue)
dp.Attributes().PutStr("is_success", isSuccessAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeLoginsTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeLoginsTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeLoginsTotal(cfg MetricConfig) metricSnowflakeLoginsTotal {
m := metricSnowflakeLoginsTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakePipeCreditsUsedTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.pipe.credits_used.total metric with initial data.
func (m *metricSnowflakePipeCreditsUsedTotal) init() {
m.data.SetName("snowflake.pipe.credits_used.total")
m.data.SetDescription("Snow pipe credits contotaled over the last 24 hour window.")
m.data.SetUnit("{credits}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakePipeCreditsUsedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, pipeNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("pipe_name", pipeNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakePipeCreditsUsedTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakePipeCreditsUsedTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakePipeCreditsUsedTotal(cfg MetricConfig) metricSnowflakePipeCreditsUsedTotal {
m := metricSnowflakePipeCreditsUsedTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryBlocked struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.blocked metric with initial data.
func (m *metricSnowflakeQueryBlocked) init() {
m.data.SetName("snowflake.query.blocked")
m.data.SetDescription("Blocked query count for warehouse over the last 24 hour window.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryBlocked) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryBlocked) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryBlocked) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryBlocked(cfg MetricConfig) metricSnowflakeQueryBlocked {
m := metricSnowflakeQueryBlocked{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryBytesDeletedAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.bytes_deleted.avg metric with initial data.
func (m *metricSnowflakeQueryBytesDeletedAvg) init() {
m.data.SetName("snowflake.query.bytes_deleted.avg")
m.data.SetDescription("Average bytes deleted in database over the last 24 hour window.")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryBytesDeletedAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryBytesDeletedAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryBytesDeletedAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryBytesDeletedAvg(cfg MetricConfig) metricSnowflakeQueryBytesDeletedAvg {
m := metricSnowflakeQueryBytesDeletedAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryBytesSpilledLocalAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.bytes_spilled.local.avg metric with initial data.
func (m *metricSnowflakeQueryBytesSpilledLocalAvg) init() {
m.data.SetName("snowflake.query.bytes_spilled.local.avg")
m.data.SetDescription("Average bytes spilled (intermediate results do not fit in memory) by local storage over the last 24 hour window.")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryBytesSpilledLocalAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryBytesSpilledLocalAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryBytesSpilledLocalAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryBytesSpilledLocalAvg(cfg MetricConfig) metricSnowflakeQueryBytesSpilledLocalAvg {
m := metricSnowflakeQueryBytesSpilledLocalAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryBytesSpilledRemoteAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.bytes_spilled.remote.avg metric with initial data.
func (m *metricSnowflakeQueryBytesSpilledRemoteAvg) init() {
m.data.SetName("snowflake.query.bytes_spilled.remote.avg")
m.data.SetDescription("Average bytes spilled (intermediate results do not fit in memory) by remote storage over the last 24 hour window.")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryBytesSpilledRemoteAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryBytesSpilledRemoteAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryBytesSpilledRemoteAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryBytesSpilledRemoteAvg(cfg MetricConfig) metricSnowflakeQueryBytesSpilledRemoteAvg {
m := metricSnowflakeQueryBytesSpilledRemoteAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryBytesWrittenAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.bytes_written.avg metric with initial data.
func (m *metricSnowflakeQueryBytesWrittenAvg) init() {
m.data.SetName("snowflake.query.bytes_written.avg")
m.data.SetDescription("Average bytes written by database over the last 24 hour window.")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryBytesWrittenAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryBytesWrittenAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryBytesWrittenAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryBytesWrittenAvg(cfg MetricConfig) metricSnowflakeQueryBytesWrittenAvg {
m := metricSnowflakeQueryBytesWrittenAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryCompilationTimeAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.compilation_time.avg metric with initial data.
func (m *metricSnowflakeQueryCompilationTimeAvg) init() {
m.data.SetName("snowflake.query.compilation_time.avg")
m.data.SetDescription("Average time taken to compile query over the last 24 hour window.")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryCompilationTimeAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryCompilationTimeAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryCompilationTimeAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryCompilationTimeAvg(cfg MetricConfig) metricSnowflakeQueryCompilationTimeAvg {
m := metricSnowflakeQueryCompilationTimeAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryDataScannedCacheAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.data_scanned_cache.avg metric with initial data.
func (m *metricSnowflakeQueryDataScannedCacheAvg) init() {
m.data.SetName("snowflake.query.data_scanned_cache.avg")
m.data.SetDescription("Average percentage of data scanned from cache over the last 24 hour window.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryDataScannedCacheAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryDataScannedCacheAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryDataScannedCacheAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryDataScannedCacheAvg(cfg MetricConfig) metricSnowflakeQueryDataScannedCacheAvg {
m := metricSnowflakeQueryDataScannedCacheAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryExecuted struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.executed metric with initial data.
func (m *metricSnowflakeQueryExecuted) init() {
m.data.SetName("snowflake.query.executed")
m.data.SetDescription("Executed query count for warehouse over the last 24 hour window.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryExecuted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryExecuted) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryExecuted) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryExecuted(cfg MetricConfig) metricSnowflakeQueryExecuted {
m := metricSnowflakeQueryExecuted{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryExecutionTimeAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.execution_time.avg metric with initial data.
func (m *metricSnowflakeQueryExecutionTimeAvg) init() {
m.data.SetName("snowflake.query.execution_time.avg")
m.data.SetDescription("Average time spent executing queries in database over the last 24 hour window.")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryExecutionTimeAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryExecutionTimeAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryExecutionTimeAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryExecutionTimeAvg(cfg MetricConfig) metricSnowflakeQueryExecutionTimeAvg {
m := metricSnowflakeQueryExecutionTimeAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryPartitionsScannedAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.partitions_scanned.avg metric with initial data.
func (m *metricSnowflakeQueryPartitionsScannedAvg) init() {
m.data.SetName("snowflake.query.partitions_scanned.avg")
m.data.SetDescription("Number of partitions scanned during query so far over the last 24 hour window.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryPartitionsScannedAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryPartitionsScannedAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryPartitionsScannedAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryPartitionsScannedAvg(cfg MetricConfig) metricSnowflakeQueryPartitionsScannedAvg {
m := metricSnowflakeQueryPartitionsScannedAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryQueuedOverload struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.queued_overload metric with initial data.
func (m *metricSnowflakeQueryQueuedOverload) init() {
m.data.SetName("snowflake.query.queued_overload")
m.data.SetDescription("Overloaded query count for warehouse over the last 24 hour window.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryQueuedOverload) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryQueuedOverload) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryQueuedOverload) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryQueuedOverload(cfg MetricConfig) metricSnowflakeQueryQueuedOverload {
m := metricSnowflakeQueryQueuedOverload{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueryQueuedProvision struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.query.queued_provision metric with initial data.
func (m *metricSnowflakeQueryQueuedProvision) init() {
m.data.SetName("snowflake.query.queued_provision")
m.data.SetDescription("Number of compute resources queued for provisioning over the last 24 hour window.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueryQueuedProvision) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueryQueuedProvision) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueryQueuedProvision) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueryQueuedProvision(cfg MetricConfig) metricSnowflakeQueryQueuedProvision {
m := metricSnowflakeQueryQueuedProvision{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueuedOverloadTimeAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.queued_overload_time.avg metric with initial data.
func (m *metricSnowflakeQueuedOverloadTimeAvg) init() {
m.data.SetName("snowflake.queued_overload_time.avg")
m.data.SetDescription("Average time spent in warehouse queue due to warehouse being overloaded over the last 24 hour window.")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueuedOverloadTimeAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueuedOverloadTimeAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueuedOverloadTimeAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueuedOverloadTimeAvg(cfg MetricConfig) metricSnowflakeQueuedOverloadTimeAvg {
m := metricSnowflakeQueuedOverloadTimeAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueuedProvisioningTimeAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.queued_provisioning_time.avg metric with initial data.
func (m *metricSnowflakeQueuedProvisioningTimeAvg) init() {
m.data.SetName("snowflake.queued_provisioning_time.avg")
m.data.SetDescription("Average time spent in warehouse queue waiting for resources to provision over the last 24 hour window.")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueuedProvisioningTimeAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueuedProvisioningTimeAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueuedProvisioningTimeAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueuedProvisioningTimeAvg(cfg MetricConfig) metricSnowflakeQueuedProvisioningTimeAvg {
m := metricSnowflakeQueuedProvisioningTimeAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeQueuedRepairTimeAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.queued_repair_time.avg metric with initial data.
func (m *metricSnowflakeQueuedRepairTimeAvg) init() {
m.data.SetName("snowflake.queued_repair_time.avg")
m.data.SetDescription("Average time spent in warehouse queue waiting for compute resources to be repaired over the last 24 hour window.")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeQueuedRepairTimeAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeQueuedRepairTimeAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeQueuedRepairTimeAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeQueuedRepairTimeAvg(cfg MetricConfig) metricSnowflakeQueuedRepairTimeAvg {
m := metricSnowflakeQueuedRepairTimeAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeRowsDeletedAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.rows_deleted.avg metric with initial data.
func (m *metricSnowflakeRowsDeletedAvg) init() {
m.data.SetName("snowflake.rows_deleted.avg")
m.data.SetDescription("Number of rows deleted from a table (or tables) over the last 24 hour window.")
m.data.SetUnit("{rows}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeRowsDeletedAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeRowsDeletedAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeRowsDeletedAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeRowsDeletedAvg(cfg MetricConfig) metricSnowflakeRowsDeletedAvg {
m := metricSnowflakeRowsDeletedAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeRowsInsertedAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.rows_inserted.avg metric with initial data.
func (m *metricSnowflakeRowsInsertedAvg) init() {
m.data.SetName("snowflake.rows_inserted.avg")
m.data.SetDescription("Number of rows inserted into a table (or tables) over the last 24 hour window.")
m.data.SetUnit("{rows}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeRowsInsertedAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeRowsInsertedAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeRowsInsertedAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeRowsInsertedAvg(cfg MetricConfig) metricSnowflakeRowsInsertedAvg {
m := metricSnowflakeRowsInsertedAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeRowsProducedAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.rows_produced.avg metric with initial data.
func (m *metricSnowflakeRowsProducedAvg) init() {
m.data.SetName("snowflake.rows_produced.avg")
m.data.SetDescription("Average number of rows produced by statement over the last 24 hour window.")
m.data.SetUnit("{rows}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeRowsProducedAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeRowsProducedAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeRowsProducedAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeRowsProducedAvg(cfg MetricConfig) metricSnowflakeRowsProducedAvg {
m := metricSnowflakeRowsProducedAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeRowsUnloadedAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.rows_unloaded.avg metric with initial data.
func (m *metricSnowflakeRowsUnloadedAvg) init() {
m.data.SetName("snowflake.rows_unloaded.avg")
m.data.SetDescription("Average number of rows unloaded during data export over the last 24 hour window.")
m.data.SetUnit("{rows}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeRowsUnloadedAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeRowsUnloadedAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeRowsUnloadedAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeRowsUnloadedAvg(cfg MetricConfig) metricSnowflakeRowsUnloadedAvg {
m := metricSnowflakeRowsUnloadedAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeRowsUpdatedAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.rows_updated.avg metric with initial data.
func (m *metricSnowflakeRowsUpdatedAvg) init() {
m.data.SetName("snowflake.rows_updated.avg")
m.data.SetDescription("Average number of rows updated in a table over the last 24 hour window.")
m.data.SetUnit("{rows}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeRowsUpdatedAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeRowsUpdatedAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeRowsUpdatedAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeRowsUpdatedAvg(cfg MetricConfig) metricSnowflakeRowsUpdatedAvg {
m := metricSnowflakeRowsUpdatedAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeSessionIDCount struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.session_id.count metric with initial data.
func (m *metricSnowflakeSessionIDCount) init() {
m.data.SetName("snowflake.session_id.count")
m.data.SetDescription("Distinct session id's associated with snowflake username over the last 24 hour window.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeSessionIDCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, userNameAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
dp.Attributes().PutStr("user_name", userNameAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeSessionIDCount) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeSessionIDCount) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeSessionIDCount(cfg MetricConfig) metricSnowflakeSessionIDCount {
m := metricSnowflakeSessionIDCount{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeStorageFailsafeBytesTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.storage.failsafe_bytes.total metric with initial data.
func (m *metricSnowflakeStorageFailsafeBytesTotal) init() {
m.data.SetName("snowflake.storage.failsafe_bytes.total")
m.data.SetDescription("Number of bytes of data in Fail-safe.")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricSnowflakeStorageFailsafeBytesTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeStorageFailsafeBytesTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeStorageFailsafeBytesTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeStorageFailsafeBytesTotal(cfg MetricConfig) metricSnowflakeStorageFailsafeBytesTotal {
m := metricSnowflakeStorageFailsafeBytesTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeStorageStageBytesTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.storage.stage_bytes.total metric with initial data.
func (m *metricSnowflakeStorageStageBytesTotal) init() {
m.data.SetName("snowflake.storage.stage_bytes.total")
m.data.SetDescription("Number of bytes of stage storage used by files in all internal stages (named, table, user).")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricSnowflakeStorageStageBytesTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeStorageStageBytesTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeStorageStageBytesTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeStorageStageBytesTotal(cfg MetricConfig) metricSnowflakeStorageStageBytesTotal {
m := metricSnowflakeStorageStageBytesTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeStorageStorageBytesTotal struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.storage.storage_bytes.total metric with initial data.
func (m *metricSnowflakeStorageStorageBytesTotal) init() {
m.data.SetName("snowflake.storage.storage_bytes.total")
m.data.SetDescription("Number of bytes of table storage used, including bytes for data currently in Time Travel.")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricSnowflakeStorageStorageBytesTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeStorageStorageBytesTotal) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeStorageStorageBytesTotal) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeStorageStorageBytesTotal(cfg MetricConfig) metricSnowflakeStorageStorageBytesTotal {
m := metricSnowflakeStorageStorageBytesTotal{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricSnowflakeTotalElapsedTimeAvg struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills snowflake.total_elapsed_time.avg metric with initial data.
func (m *metricSnowflakeTotalElapsedTimeAvg) init() {
m.data.SetName("snowflake.total_elapsed_time.avg")
m.data.SetDescription("Average elapsed time over the last 24 hour window.")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricSnowflakeTotalElapsedTimeAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("schema_name", schemaNameAttributeValue)
dp.Attributes().PutStr("execution_status", executionStatusAttributeValue)
dp.Attributes().PutStr("error_message", errorMessageAttributeValue)
dp.Attributes().PutStr("query_type", queryTypeAttributeValue)
dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue)
dp.Attributes().PutStr("database_name", databaseNameAttributeValue)
dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricSnowflakeTotalElapsedTimeAvg) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricSnowflakeTotalElapsedTimeAvg) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricSnowflakeTotalElapsedTimeAvg(cfg MetricConfig) metricSnowflakeTotalElapsedTimeAvg {
m := metricSnowflakeTotalElapsedTimeAvg{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user config.
type MetricsBuilder struct {
config MetricsBuilderConfig // config of the metrics builder.
startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
metricsCapacity int // maximum observed number of metrics per resource.
metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
buildInfo component.BuildInfo // contains version information.
resourceAttributeIncludeFilter map[string]filter.Filter
resourceAttributeExcludeFilter map[string]filter.Filter
metricSnowflakeBillingCloudServiceTotal metricSnowflakeBillingCloudServiceTotal
metricSnowflakeBillingTotalCreditTotal metricSnowflakeBillingTotalCreditTotal
metricSnowflakeBillingVirtualWarehouseTotal metricSnowflakeBillingVirtualWarehouseTotal
metricSnowflakeBillingWarehouseCloudServiceTotal metricSnowflakeBillingWarehouseCloudServiceTotal
metricSnowflakeBillingWarehouseTotalCreditTotal metricSnowflakeBillingWarehouseTotalCreditTotal
metricSnowflakeBillingWarehouseVirtualWarehouseTotal metricSnowflakeBillingWarehouseVirtualWarehouseTotal
metricSnowflakeDatabaseBytesScannedAvg metricSnowflakeDatabaseBytesScannedAvg
metricSnowflakeDatabaseQueryCount metricSnowflakeDatabaseQueryCount
metricSnowflakeLoginsTotal metricSnowflakeLoginsTotal
metricSnowflakePipeCreditsUsedTotal metricSnowflakePipeCreditsUsedTotal
metricSnowflakeQueryBlocked metricSnowflakeQueryBlocked
metricSnowflakeQueryBytesDeletedAvg metricSnowflakeQueryBytesDeletedAvg
metricSnowflakeQueryBytesSpilledLocalAvg metricSnowflakeQueryBytesSpilledLocalAvg
metricSnowflakeQueryBytesSpilledRemoteAvg metricSnowflakeQueryBytesSpilledRemoteAvg
metricSnowflakeQueryBytesWrittenAvg metricSnowflakeQueryBytesWrittenAvg
metricSnowflakeQueryCompilationTimeAvg metricSnowflakeQueryCompilationTimeAvg
metricSnowflakeQueryDataScannedCacheAvg metricSnowflakeQueryDataScannedCacheAvg
metricSnowflakeQueryExecuted metricSnowflakeQueryExecuted
metricSnowflakeQueryExecutionTimeAvg metricSnowflakeQueryExecutionTimeAvg
metricSnowflakeQueryPartitionsScannedAvg metricSnowflakeQueryPartitionsScannedAvg
metricSnowflakeQueryQueuedOverload metricSnowflakeQueryQueuedOverload
metricSnowflakeQueryQueuedProvision metricSnowflakeQueryQueuedProvision
metricSnowflakeQueuedOverloadTimeAvg metricSnowflakeQueuedOverloadTimeAvg
metricSnowflakeQueuedProvisioningTimeAvg metricSnowflakeQueuedProvisioningTimeAvg
metricSnowflakeQueuedRepairTimeAvg metricSnowflakeQueuedRepairTimeAvg
metricSnowflakeRowsDeletedAvg metricSnowflakeRowsDeletedAvg
metricSnowflakeRowsInsertedAvg metricSnowflakeRowsInsertedAvg
metricSnowflakeRowsProducedAvg metricSnowflakeRowsProducedAvg
metricSnowflakeRowsUnloadedAvg metricSnowflakeRowsUnloadedAvg
metricSnowflakeRowsUpdatedAvg metricSnowflakeRowsUpdatedAvg
metricSnowflakeSessionIDCount metricSnowflakeSessionIDCount
metricSnowflakeStorageFailsafeBytesTotal metricSnowflakeStorageFailsafeBytesTotal
metricSnowflakeStorageStageBytesTotal metricSnowflakeStorageStageBytesTotal
metricSnowflakeStorageStorageBytesTotal metricSnowflakeStorageStorageBytesTotal
metricSnowflakeTotalElapsedTimeAvg metricSnowflakeTotalElapsedTimeAvg
}
// MetricBuilderOption applies changes to default metrics builder.
type MetricBuilderOption interface {
apply(*MetricsBuilder)
}
type metricBuilderOptionFunc func(mb *MetricsBuilder)
func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) {
mbof(mb)
}
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
return metricBuilderOptionFunc(func(mb *MetricsBuilder) {
mb.startTime = startTime
})
}
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
config: mbc,
startTime: pcommon.NewTimestampFromTime(time.Now()),
metricsBuffer: pmetric.NewMetrics(),
buildInfo: settings.BuildInfo,
metricSnowflakeBillingCloudServiceTotal: newMetricSnowflakeBillingCloudServiceTotal(mbc.Metrics.SnowflakeBillingCloudServiceTotal),
metricSnowflakeBillingTotalCreditTotal: newMetricSnowflakeBillingTotalCreditTotal(mbc.Metrics.SnowflakeBillingTotalCreditTotal),
metricSnowflakeBillingVirtualWarehouseTotal: newMetricSnowflakeBillingVirtualWarehouseTotal(mbc.Metrics.SnowflakeBillingVirtualWarehouseTotal),
metricSnowflakeBillingWarehouseCloudServiceTotal: newMetricSnowflakeBillingWarehouseCloudServiceTotal(mbc.Metrics.SnowflakeBillingWarehouseCloudServiceTotal),
metricSnowflakeBillingWarehouseTotalCreditTotal: newMetricSnowflakeBillingWarehouseTotalCreditTotal(mbc.Metrics.SnowflakeBillingWarehouseTotalCreditTotal),
metricSnowflakeBillingWarehouseVirtualWarehouseTotal: newMetricSnowflakeBillingWarehouseVirtualWarehouseTotal(mbc.Metrics.SnowflakeBillingWarehouseVirtualWarehouseTotal),
metricSnowflakeDatabaseBytesScannedAvg: newMetricSnowflakeDatabaseBytesScannedAvg(mbc.Metrics.SnowflakeDatabaseBytesScannedAvg),
metricSnowflakeDatabaseQueryCount: newMetricSnowflakeDatabaseQueryCount(mbc.Metrics.SnowflakeDatabaseQueryCount),
metricSnowflakeLoginsTotal: newMetricSnowflakeLoginsTotal(mbc.Metrics.SnowflakeLoginsTotal),
metricSnowflakePipeCreditsUsedTotal: newMetricSnowflakePipeCreditsUsedTotal(mbc.Metrics.SnowflakePipeCreditsUsedTotal),
metricSnowflakeQueryBlocked: newMetricSnowflakeQueryBlocked(mbc.Metrics.SnowflakeQueryBlocked),
metricSnowflakeQueryBytesDeletedAvg: newMetricSnowflakeQueryBytesDeletedAvg(mbc.Metrics.SnowflakeQueryBytesDeletedAvg),
metricSnowflakeQueryBytesSpilledLocalAvg: newMetricSnowflakeQueryBytesSpilledLocalAvg(mbc.Metrics.SnowflakeQueryBytesSpilledLocalAvg),
metricSnowflakeQueryBytesSpilledRemoteAvg: newMetricSnowflakeQueryBytesSpilledRemoteAvg(mbc.Metrics.SnowflakeQueryBytesSpilledRemoteAvg),
metricSnowflakeQueryBytesWrittenAvg: newMetricSnowflakeQueryBytesWrittenAvg(mbc.Metrics.SnowflakeQueryBytesWrittenAvg),
metricSnowflakeQueryCompilationTimeAvg: newMetricSnowflakeQueryCompilationTimeAvg(mbc.Metrics.SnowflakeQueryCompilationTimeAvg),
metricSnowflakeQueryDataScannedCacheAvg: newMetricSnowflakeQueryDataScannedCacheAvg(mbc.Metrics.SnowflakeQueryDataScannedCacheAvg),
metricSnowflakeQueryExecuted: newMetricSnowflakeQueryExecuted(mbc.Metrics.SnowflakeQueryExecuted),
metricSnowflakeQueryExecutionTimeAvg: newMetricSnowflakeQueryExecutionTimeAvg(mbc.Metrics.SnowflakeQueryExecutionTimeAvg),
metricSnowflakeQueryPartitionsScannedAvg: newMetricSnowflakeQueryPartitionsScannedAvg(mbc.Metrics.SnowflakeQueryPartitionsScannedAvg),
metricSnowflakeQueryQueuedOverload: newMetricSnowflakeQueryQueuedOverload(mbc.Metrics.SnowflakeQueryQueuedOverload),
metricSnowflakeQueryQueuedProvision: newMetricSnowflakeQueryQueuedProvision(mbc.Metrics.SnowflakeQueryQueuedProvision),
metricSnowflakeQueuedOverloadTimeAvg: newMetricSnowflakeQueuedOverloadTimeAvg(mbc.Metrics.SnowflakeQueuedOverloadTimeAvg),
metricSnowflakeQueuedProvisioningTimeAvg: newMetricSnowflakeQueuedProvisioningTimeAvg(mbc.Metrics.SnowflakeQueuedProvisioningTimeAvg),
metricSnowflakeQueuedRepairTimeAvg: newMetricSnowflakeQueuedRepairTimeAvg(mbc.Metrics.SnowflakeQueuedRepairTimeAvg),
metricSnowflakeRowsDeletedAvg: newMetricSnowflakeRowsDeletedAvg(mbc.Metrics.SnowflakeRowsDeletedAvg),
metricSnowflakeRowsInsertedAvg: newMetricSnowflakeRowsInsertedAvg(mbc.Metrics.SnowflakeRowsInsertedAvg),
metricSnowflakeRowsProducedAvg: newMetricSnowflakeRowsProducedAvg(mbc.Metrics.SnowflakeRowsProducedAvg),
metricSnowflakeRowsUnloadedAvg: newMetricSnowflakeRowsUnloadedAvg(mbc.Metrics.SnowflakeRowsUnloadedAvg),
metricSnowflakeRowsUpdatedAvg: newMetricSnowflakeRowsUpdatedAvg(mbc.Metrics.SnowflakeRowsUpdatedAvg),
metricSnowflakeSessionIDCount: newMetricSnowflakeSessionIDCount(mbc.Metrics.SnowflakeSessionIDCount),
metricSnowflakeStorageFailsafeBytesTotal: newMetricSnowflakeStorageFailsafeBytesTotal(mbc.Metrics.SnowflakeStorageFailsafeBytesTotal),
metricSnowflakeStorageStageBytesTotal: newMetricSnowflakeStorageStageBytesTotal(mbc.Metrics.SnowflakeStorageStageBytesTotal),
metricSnowflakeStorageStorageBytesTotal: newMetricSnowflakeStorageStorageBytesTotal(mbc.Metrics.SnowflakeStorageStorageBytesTotal),
metricSnowflakeTotalElapsedTimeAvg: newMetricSnowflakeTotalElapsedTimeAvg(mbc.Metrics.SnowflakeTotalElapsedTimeAvg),
resourceAttributeIncludeFilter: make(map[string]filter.Filter),
resourceAttributeExcludeFilter: make(map[string]filter.Filter),
}
if mbc.ResourceAttributes.SnowflakeAccountName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["snowflake.account.name"] = filter.CreateFilter(mbc.ResourceAttributes.SnowflakeAccountName.MetricsInclude)
}
if mbc.ResourceAttributes.SnowflakeAccountName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["snowflake.account.name"] = filter.CreateFilter(mbc.ResourceAttributes.SnowflakeAccountName.MetricsExclude)
}
for _, op := range options {
op.apply(mb)
}
return mb
}
// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
return NewResourceBuilder(mb.config.ResourceAttributes)
}
// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
}
}
// ResourceMetricsOption applies changes to provided resource metrics.
type ResourceMetricsOption interface {
apply(pmetric.ResourceMetrics)
}
type resourceMetricsOptionFunc func(pmetric.ResourceMetrics)
func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) {
rmof(rm)
}
// WithResource sets the provided resource on the emitted ResourceMetrics.
// It's recommended to use ResourceBuilder to create the resource.
func WithResource(res pcommon.Resource) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
res.CopyTo(rm.Resource())
})
}
// WithStartTimeOverride overrides start time for all the resource metrics data points.
// This option should be only used if different start time has to be set on metrics coming from different resources.
func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
var dps pmetric.NumberDataPointSlice
metrics := rm.ScopeMetrics().At(0).Metrics()
for i := 0; i < metrics.Len(); i++ {
switch metrics.At(i).Type() {
case pmetric.MetricTypeGauge:
dps = metrics.At(i).Gauge().DataPoints()
case pmetric.MetricTypeSum:
dps = metrics.At(i).Sum().DataPoints()
}
for j := 0; j < dps.Len(); j++ {
dps.At(j).SetStartTimestamp(start)
}
}
})
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceMetricsOption arguments.
func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName(ScopeName)
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricSnowflakeBillingCloudServiceTotal.emit(ils.Metrics())
mb.metricSnowflakeBillingTotalCreditTotal.emit(ils.Metrics())
mb.metricSnowflakeBillingVirtualWarehouseTotal.emit(ils.Metrics())
mb.metricSnowflakeBillingWarehouseCloudServiceTotal.emit(ils.Metrics())
mb.metricSnowflakeBillingWarehouseTotalCreditTotal.emit(ils.Metrics())
mb.metricSnowflakeBillingWarehouseVirtualWarehouseTotal.emit(ils.Metrics())
mb.metricSnowflakeDatabaseBytesScannedAvg.emit(ils.Metrics())
mb.metricSnowflakeDatabaseQueryCount.emit(ils.Metrics())
mb.metricSnowflakeLoginsTotal.emit(ils.Metrics())
mb.metricSnowflakePipeCreditsUsedTotal.emit(ils.Metrics())
mb.metricSnowflakeQueryBlocked.emit(ils.Metrics())
mb.metricSnowflakeQueryBytesDeletedAvg.emit(ils.Metrics())
mb.metricSnowflakeQueryBytesSpilledLocalAvg.emit(ils.Metrics())
mb.metricSnowflakeQueryBytesSpilledRemoteAvg.emit(ils.Metrics())
mb.metricSnowflakeQueryBytesWrittenAvg.emit(ils.Metrics())
mb.metricSnowflakeQueryCompilationTimeAvg.emit(ils.Metrics())
mb.metricSnowflakeQueryDataScannedCacheAvg.emit(ils.Metrics())
mb.metricSnowflakeQueryExecuted.emit(ils.Metrics())
mb.metricSnowflakeQueryExecutionTimeAvg.emit(ils.Metrics())
mb.metricSnowflakeQueryPartitionsScannedAvg.emit(ils.Metrics())
mb.metricSnowflakeQueryQueuedOverload.emit(ils.Metrics())
mb.metricSnowflakeQueryQueuedProvision.emit(ils.Metrics())
mb.metricSnowflakeQueuedOverloadTimeAvg.emit(ils.Metrics())
mb.metricSnowflakeQueuedProvisioningTimeAvg.emit(ils.Metrics())
mb.metricSnowflakeQueuedRepairTimeAvg.emit(ils.Metrics())
mb.metricSnowflakeRowsDeletedAvg.emit(ils.Metrics())
mb.metricSnowflakeRowsInsertedAvg.emit(ils.Metrics())
mb.metricSnowflakeRowsProducedAvg.emit(ils.Metrics())
mb.metricSnowflakeRowsUnloadedAvg.emit(ils.Metrics())
mb.metricSnowflakeRowsUpdatedAvg.emit(ils.Metrics())
mb.metricSnowflakeSessionIDCount.emit(ils.Metrics())
mb.metricSnowflakeStorageFailsafeBytesTotal.emit(ils.Metrics())
mb.metricSnowflakeStorageStageBytesTotal.emit(ils.Metrics())
mb.metricSnowflakeStorageStorageBytesTotal.emit(ils.Metrics())
mb.metricSnowflakeTotalElapsedTimeAvg.emit(ils.Metrics())
for _, op := range options {
op.apply(rm)
}
for attr, filter := range mb.resourceAttributeIncludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) {
return
}
}
for attr, filter := range mb.resourceAttributeExcludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) {
return
}
}
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
}
}
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user config, e.g. delta or cumulative.
func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics {
mb.EmitForResource(options...)
metrics := mb.metricsBuffer
mb.metricsBuffer = pmetric.NewMetrics()
return metrics
}
// RecordSnowflakeBillingCloudServiceTotalDataPoint adds a data point to snowflake.billing.cloud_service.total metric.
func (mb *MetricsBuilder) RecordSnowflakeBillingCloudServiceTotalDataPoint(ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) {
mb.metricSnowflakeBillingCloudServiceTotal.recordDataPoint(mb.startTime, ts, val, serviceTypeAttributeValue)
}
// RecordSnowflakeBillingTotalCreditTotalDataPoint adds a data point to snowflake.billing.total_credit.total metric.
func (mb *MetricsBuilder) RecordSnowflakeBillingTotalCreditTotalDataPoint(ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) {
mb.metricSnowflakeBillingTotalCreditTotal.recordDataPoint(mb.startTime, ts, val, serviceTypeAttributeValue)
}
// RecordSnowflakeBillingVirtualWarehouseTotalDataPoint adds a data point to snowflake.billing.virtual_warehouse.total metric.
func (mb *MetricsBuilder) RecordSnowflakeBillingVirtualWarehouseTotalDataPoint(ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) {
mb.metricSnowflakeBillingVirtualWarehouseTotal.recordDataPoint(mb.startTime, ts, val, serviceTypeAttributeValue)
}
// RecordSnowflakeBillingWarehouseCloudServiceTotalDataPoint adds a data point to snowflake.billing.warehouse.cloud_service.total metric.
func (mb *MetricsBuilder) RecordSnowflakeBillingWarehouseCloudServiceTotalDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
mb.metricSnowflakeBillingWarehouseCloudServiceTotal.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue)
}
// RecordSnowflakeBillingWarehouseTotalCreditTotalDataPoint adds a data point to snowflake.billing.warehouse.total_credit.total metric.
func (mb *MetricsBuilder) RecordSnowflakeBillingWarehouseTotalCreditTotalDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
mb.metricSnowflakeBillingWarehouseTotalCreditTotal.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue)
}
// RecordSnowflakeBillingWarehouseVirtualWarehouseTotalDataPoint adds a data point to snowflake.billing.warehouse.virtual_warehouse.total metric.
func (mb *MetricsBuilder) RecordSnowflakeBillingWarehouseVirtualWarehouseTotalDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
mb.metricSnowflakeBillingWarehouseVirtualWarehouseTotal.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue)
}
// RecordSnowflakeDatabaseBytesScannedAvgDataPoint adds a data point to snowflake.database.bytes_scanned.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeDatabaseBytesScannedAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeDatabaseBytesScannedAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeDatabaseQueryCountDataPoint adds a data point to snowflake.database.query.count metric.
func (mb *MetricsBuilder) RecordSnowflakeDatabaseQueryCountDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeDatabaseQueryCount.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeLoginsTotalDataPoint adds a data point to snowflake.logins.total metric.
func (mb *MetricsBuilder) RecordSnowflakeLoginsTotalDataPoint(ts pcommon.Timestamp, val int64, errorMessageAttributeValue string, reportedClientTypeAttributeValue string, isSuccessAttributeValue string) {
mb.metricSnowflakeLoginsTotal.recordDataPoint(mb.startTime, ts, val, errorMessageAttributeValue, reportedClientTypeAttributeValue, isSuccessAttributeValue)
}
// RecordSnowflakePipeCreditsUsedTotalDataPoint adds a data point to snowflake.pipe.credits_used.total metric.
func (mb *MetricsBuilder) RecordSnowflakePipeCreditsUsedTotalDataPoint(ts pcommon.Timestamp, val float64, pipeNameAttributeValue string) {
mb.metricSnowflakePipeCreditsUsedTotal.recordDataPoint(mb.startTime, ts, val, pipeNameAttributeValue)
}
// RecordSnowflakeQueryBlockedDataPoint adds a data point to snowflake.query.blocked metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryBlockedDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
mb.metricSnowflakeQueryBlocked.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue)
}
// RecordSnowflakeQueryBytesDeletedAvgDataPoint adds a data point to snowflake.query.bytes_deleted.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryBytesDeletedAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueryBytesDeletedAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeQueryBytesSpilledLocalAvgDataPoint adds a data point to snowflake.query.bytes_spilled.local.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryBytesSpilledLocalAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueryBytesSpilledLocalAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeQueryBytesSpilledRemoteAvgDataPoint adds a data point to snowflake.query.bytes_spilled.remote.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryBytesSpilledRemoteAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueryBytesSpilledRemoteAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeQueryBytesWrittenAvgDataPoint adds a data point to snowflake.query.bytes_written.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryBytesWrittenAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueryBytesWrittenAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeQueryCompilationTimeAvgDataPoint adds a data point to snowflake.query.compilation_time.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryCompilationTimeAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueryCompilationTimeAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeQueryDataScannedCacheAvgDataPoint adds a data point to snowflake.query.data_scanned_cache.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryDataScannedCacheAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueryDataScannedCacheAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeQueryExecutedDataPoint adds a data point to snowflake.query.executed metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryExecutedDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
mb.metricSnowflakeQueryExecuted.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue)
}
// RecordSnowflakeQueryExecutionTimeAvgDataPoint adds a data point to snowflake.query.execution_time.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryExecutionTimeAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueryExecutionTimeAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeQueryPartitionsScannedAvgDataPoint adds a data point to snowflake.query.partitions_scanned.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryPartitionsScannedAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueryPartitionsScannedAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeQueryQueuedOverloadDataPoint adds a data point to snowflake.query.queued_overload metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryQueuedOverloadDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
mb.metricSnowflakeQueryQueuedOverload.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue)
}
// RecordSnowflakeQueryQueuedProvisionDataPoint adds a data point to snowflake.query.queued_provision metric.
func (mb *MetricsBuilder) RecordSnowflakeQueryQueuedProvisionDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) {
mb.metricSnowflakeQueryQueuedProvision.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue)
}
// RecordSnowflakeQueuedOverloadTimeAvgDataPoint adds a data point to snowflake.queued_overload_time.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueuedOverloadTimeAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueuedOverloadTimeAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeQueuedProvisioningTimeAvgDataPoint adds a data point to snowflake.queued_provisioning_time.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueuedProvisioningTimeAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueuedProvisioningTimeAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeQueuedRepairTimeAvgDataPoint adds a data point to snowflake.queued_repair_time.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeQueuedRepairTimeAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeQueuedRepairTimeAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeRowsDeletedAvgDataPoint adds a data point to snowflake.rows_deleted.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeRowsDeletedAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeRowsDeletedAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeRowsInsertedAvgDataPoint adds a data point to snowflake.rows_inserted.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeRowsInsertedAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeRowsInsertedAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeRowsProducedAvgDataPoint adds a data point to snowflake.rows_produced.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeRowsProducedAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeRowsProducedAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeRowsUnloadedAvgDataPoint adds a data point to snowflake.rows_unloaded.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeRowsUnloadedAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeRowsUnloadedAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeRowsUpdatedAvgDataPoint adds a data point to snowflake.rows_updated.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeRowsUpdatedAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeRowsUpdatedAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// RecordSnowflakeSessionIDCountDataPoint adds a data point to snowflake.session_id.count metric.
func (mb *MetricsBuilder) RecordSnowflakeSessionIDCountDataPoint(ts pcommon.Timestamp, val int64, userNameAttributeValue string) {
mb.metricSnowflakeSessionIDCount.recordDataPoint(mb.startTime, ts, val, userNameAttributeValue)
}
// RecordSnowflakeStorageFailsafeBytesTotalDataPoint adds a data point to snowflake.storage.failsafe_bytes.total metric.
func (mb *MetricsBuilder) RecordSnowflakeStorageFailsafeBytesTotalDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricSnowflakeStorageFailsafeBytesTotal.recordDataPoint(mb.startTime, ts, val)
}
// RecordSnowflakeStorageStageBytesTotalDataPoint adds a data point to snowflake.storage.stage_bytes.total metric.
func (mb *MetricsBuilder) RecordSnowflakeStorageStageBytesTotalDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricSnowflakeStorageStageBytesTotal.recordDataPoint(mb.startTime, ts, val)
}
// RecordSnowflakeStorageStorageBytesTotalDataPoint adds a data point to snowflake.storage.storage_bytes.total metric.
func (mb *MetricsBuilder) RecordSnowflakeStorageStorageBytesTotalDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricSnowflakeStorageStorageBytesTotal.recordDataPoint(mb.startTime, ts, val)
}
// RecordSnowflakeTotalElapsedTimeAvgDataPoint adds a data point to snowflake.total_elapsed_time.avg metric.
func (mb *MetricsBuilder) RecordSnowflakeTotalElapsedTimeAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) {
mb.metricSnowflakeTotalElapsedTimeAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue)
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) {
mb.startTime = pcommon.NewTimestampFromTime(time.Now())
for _, op := range options {
op.apply(mb)
}
}