in source/code/plugins/mysql_workload_lib.rb [132:318]
def get_server_stats
global_stats = Array.new
env_var_stats = Array.new
final_records = Array.new
q_global_status = query(@query_global_status)
q_global_status.each do |row|
new_row = transform_row(row)
global_stats.push(new_row)
end
q_variables = query(@query_variables)
q_variables.each do |row|
new_row = transform_row(row)
env_var_stats.push(new_row)
end
num_connections = Hash.new
num_connections_value = get_value(global_stats, "Threads_connected")
num_connections.store("Number of Connections", num_connections_value)
final_records.push(num_connections)
max_connections = Hash.new
max_connections_value = get_value(env_var_stats, "max_connections")
max_connections.store("Maximum Allowed Connections", max_connections_value)
final_records.push(max_connections)
failed_connections = Hash.new
failedconnections_value = get_value(global_stats, "Aborted_connects")
failed_connections.store("Aborted Connections", failedconnections_value)
final_records.push(failed_connections)
uptime = Hash.new
uptime_value = get_value(global_stats, "Uptime")
uptime.store("Uptime", uptime_value)
final_records.push(uptime)
key_cache_hit_pct = Hash.new
key_reads = get_value(global_stats,'Key_reads')
key_read_requests = get_value(global_stats,'Key_read_requests')
value = 0.0
if key_read_requests.to_f != 0
value = (key_reads.to_f / key_read_requests.to_f) * 100
end
key_cache_hit_pct.store("Key Cache Hit Pct", value.round(2))
final_records.push(key_cache_hit_pct)
server_max_ram= Hash.new
value = 0.0
key_buffer_size = get_value(env_var_stats, 'key_buffer_size')
read_buffer_size = get_value(env_var_stats, 'read_buffer_size')
sort_buffer_size = get_value(env_var_stats, 'sort_buffer_size')
max_connections = get_value(env_var_stats, 'max_connections')
value = key_buffer_size.to_f + (read_buffer_size.to_f + sort_buffer_size.to_f)*max_connections.to_f
server_max_ram.store("MySQL Worst Case RAM Usage", value.round(2))
final_records.push(server_max_ram)
q_size_all_db = query(@query_sizeof_all_databases)
q_size_all_db.each do |row|
s_database = Hash.new()
s_database.store("MySQL Server Disk Usage In Bytes",row["Size (Bytes)"])
final_records.push(s_database)
end
slow_query_pct = Hash.new
queries = get_value(global_stats, "Queries")
if queries == -1
questions = get_value(global_stats, "Questions")
com_stmt_close = get_value(global_stats, "Com_stmt_close")
com_stmt_reset = get_Value(global_stats, "Com_stmt_reset")
com_stmt_prepare = get_value(global_stats, "Com_stmt_prepare")
queries = questions.to_i + com_stmt_close.to_i + com_stmt_reset.to_i + com_stmt_prepare.to_i
end
slow_query_pct.store("Slow Query Pct", queries )
final_records.push(slow_query_pct)
key_cache_write_pct = Hash.new()
key_writes = get_value(global_stats,"Key_writes")
key_writes_requests = get_value(global_stats,"Key_writes_requests")
value = 0.0
if(key_writes_requests.to_f != 0)
value = (key_writes.to_f)/(key_writes_requests.to_f) * 100
end
key_cache_write_pct.store("Key Cache Write Pct", value.round(2))
final_records.push(key_cache_write_pct)
query_cache_hit_pct = Hash.new()
qcache_hits = get_value(global_stats,"Qcache_hits")
com_select = get_value(global_stats,"Com_select")
value = 0.0
if (qcache_hits.to_f + com_select.to_f) != 0
value = qcache_hits.to_f/(qcache_hits.to_f + com_select.to_f)*100
end
query_cache_hit_pct.store("Query Cache Hit Pct", value.round(2))
final_records.push(query_cache_hit_pct)
query_cache_prunes_pct = Hash.new()
qcache_prunes = get_value(global_stats,"Qcache_lowmem_prunes")
value = 0.0
if queries.to_f != 0
value = (qcache_prunes.to_f/queries.to_f) * 100
end
query_cache_prunes_pct.store("Query Cache Low memory Prunes", value.round(2))
final_records.push(query_cache_prunes_pct)
table_hit_pct = Hash.new()
open_tables = get_value(global_stats,"Open_tables")
opened_tables = get_value(global_stats,"Opened_tables")
value = 0.0
if opened_tables.to_f != 0
value = open_tables.to_f/opened_tables.to_f * 100
end
table_hit_pct.store("Table Cache Hit Pct", value.round(2))
final_records.push(table_hit_pct)
table_lock_pct = Hash.new()
table_lock_waited = get_value(global_stats,"Table_locks_waited")
table_lock_immediate = get_value(global_stats,"Table_locks_immediate")
value = 0.0
if (table_lock_waited.to_f + table_lock_immediate.to_f) != 0
value = table_lock_waited.to_f/(table_lock_waited.to_f + table_lock_immediate.to_f) * 100
end
table_lock_pct.store("Table Lock Contention Pct", value.round(2))
final_records.push(table_lock_pct)
idb_hit_pct = Hash.new()
innodb_buffer_pool_reads = get_value(global_stats,"Innodb_buffer_pool_reads")
innodb_buffer_pool_read_requests = get_value(global_stats,"Innodb_buffer_pool_read_requests")
value = 0.0
if (innodb_buffer_pool_reads.to_f + innodb_buffer_pool_read_requests.to_f) != 0
value = innodb_buffer_pool_reads.to_f/(innodb_buffer_pool_reads.to_f + innodb_buffer_pool_read_requests.to_f) * 100
end
idb_hit_pct.store("InnoDB Buffer Pool Hit Percent", value.round(2))
final_records.push(idb_hit_pct)
idb_use_pct = Hash.new()
innodb_buffer_pool_pages_data = get_value(global_stats,"Innodb_buffer_pool_pages_data")
innodb_buffer_pool_pages_total = get_value(global_stats,"Innodb_buffer_pool_pages_total")
value = 0.0
if( innodb_buffer_pool_pages_total.to_f != 0)
value = innodb_buffer_pool_pages_data.to_f/innodb_buffer_pool_pages_total.to_f * 100
end
idb_use_pct.store("InnoDB Buffer Pool Percent Use", value.round(2) )
final_records.push(idb_use_pct)
full_table_pct = Hash.new()
handler_read_rnd = get_value(global_stats,"Handler_read_rnd")
handler_read_first = get_value(global_stats,"Handler_read_first")
handler_read_key = get_value(global_stats,"Handler_read_key")
handler_read_next = get_value(global_stats,"Handler_read_next")
handler_read_prev = get_value(global_stats,"Handler_read_prev")
handler_read_rnd_next = get_value(global_stats,"Handler_read_rnd_next")
full_scan_reads = handler_read_rnd.to_f + handler_read_rnd_next.to_f
all_row_access = handler_read_rnd.to_f + handler_read_first.to_f + handler_read_key.to_f + handler_read_next.to_f + handler_read_prev.to_f + handler_read_rnd_next.to_f
value = 0.0
if all_row_access != 0
value = full_scan_reads/all_row_access * 100
end
full_table_pct.store("Full Table Scan Pct", value.round(2) )
final_records.push(full_table_pct)
return final_records
end