in redshift_monitoring.py [0:0]
def gather_service_class_stats(cursor, cluster):
metrics = []
runtime = run_command(cursor,'''
SELECT DATE_TRUNC('hour', a.service_class_start_time) AS metrics_ts,
TRIM(d.name) as service_class,
COUNT(a.query) AS query_count,
SUM(a.total_exec_time) AS sum_exec_time,
sum(case when a.total_queue_time > 0 then 1 else 0 end) count_queued_queries,
SUM(a.total_queue_time) AS sum_queue_time,
count(c.is_diskbased) as count_diskbased_segments
FROM stl_wlm_query a
JOIN stv_wlm_classification_config b ON a.service_class = b.action_service_class
LEFT OUTER JOIN (select query, SUM(CASE when is_diskbased = 't' then 1 else 0 end) is_diskbased
from svl_query_summary
group by query) c on a.query = c.query
JOIN stv_wlm_service_class_config d on a.service_class = d.service_class
WHERE a.service_class > 5
AND a.service_class_start_time > DATEADD(hour, -2, current_date)
GROUP BY DATE_TRUNC('hour', a.service_class_start_time),
d.name
''')
service_class_info = cursor.fetchall()
def add_metric(metric_name, service_class_id, metric_value, ts):
metrics.append({
'MetricName': metric_name,
'Dimensions': [{'Name': 'ClusterIdentifier', 'Value': cluster},
{'Name': 'ServiceClassID', 'Value': str(service_class_id)}],
'Timestamp': ts,
'Value': metric_value
})
for service_class in service_class_info:
add_metric('ServiceClass-Queued', service_class[1], service_class[4], service_class[0])
add_metric('ServiceClass-QueueTime', service_class[1], service_class[5], service_class[0])
add_metric('ServiceClass-Executed', service_class[1], service_class[2], service_class[0])
add_metric('ServiceClass-ExecTime', service_class[1], service_class[3], service_class[0])
add_metric('ServiceClass-DiskbasedQuerySegments', service_class[1], service_class[6], service_class[0])
return metrics