in tools/lldbmacros/scheduler.py [0:0]
def ShowScheduler(cmd_args=None):
""" Routine to print information of all psets and processors
Usage: showscheduler
"""
node = addressof(kern.globals.pset_node0)
show_grrr = 0
show_priority_runq = 0
show_priority_pset_runq = 0
show_group_pset_runq = 0
show_clutch = 0
show_edge = 0
sched_string = str(kern.globals.sched_string)
if sched_string == "traditional":
show_priority_runq = 1
elif sched_string == "traditional_with_pset_runqueue":
show_priority_pset_runq = 1
elif sched_string == "grrr":
show_grrr = 1
elif sched_string == "multiq":
show_priority_runq = 1
show_group_pset_runq = 1
elif sched_string == "dualq":
show_priority_pset_runq = 1
show_priority_runq = 1
elif sched_string == "amp":
show_priority_pset_runq = 1
show_priority_runq = 1
elif sched_string == "clutch":
show_clutch = 1
elif sched_string == "edge":
show_edge = 1
else :
print "Unknown sched_string {:s}".format(sched_string)
print "Scheduler: {:s}\n".format(sched_string)
if show_clutch == 0 and show_edge == 0:
run_buckets = kern.globals.sched_run_buckets
run_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')]
fixpri_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')]
share_fg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')]
share_df_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')]
share_ut_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')]
share_bg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')]
print "Processors: {g.processor_avail_count:d} Runnable threads: {:d} Fixpri threads: {:d}\n".format(run_count, fixpri_count, g=kern.globals)
print "FG Timeshare threads: {:d} DF Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_df_count, share_ut_count, share_bg_count)
processor_offline = GetEnumValue('processor_state_t::PROCESSOR_OFF_LINE')
processor_idle = GetEnumValue('processor_state_t::PROCESSOR_IDLE')
processor_dispatching = GetEnumValue('processor_state_t::PROCESSOR_DISPATCHING')
processor_running = GetEnumValue('processor_state_t::PROCESSOR_RUNNING')
if show_group_pset_runq:
if hasattr(kern.globals, "multiq_sanity_check"):
print "multiq scheduler config: deep-drain {g.deep_drain:d}, ceiling {g.drain_ceiling:d}, depth limit {g.drain_depth_limit:d}, band limit {g.drain_band_limit:d}, sanity check {g.multiq_sanity_check:d}\n".format(g=kern.globals)
else:
print "multiq scheduler config: deep-drain {g.deep_drain:d}, ceiling {g.drain_ceiling:d}, depth limit {g.drain_depth_limit:d}, band limit {g.drain_band_limit:d}\n".format(g=kern.globals)
# Create a group->task mapping
task_map = {}
for task in kern.tasks:
task_map[unsigned(task.sched_group)] = task
for task in kern.terminated_tasks:
task_map[unsigned(task.sched_group)] = task
print " \n"
while node != 0:
pset = node.psets
pset = kern.GetValueFromAddress(unsigned(pset), 'struct processor_set *')
while pset != 0:
print "Processor Set {: <#012x} Count {:d} (cpu_id {:<#x}-{:<#x})\n".format(pset,
unsigned(pset.cpu_set_count), pset.cpu_set_low, pset.cpu_set_hi)
rt_runq = kern.GetValueFromAddress(unsigned(addressof(pset.rt_runq)), 'struct rt_queue *')
ShowRTRunQSummary(rt_runq)
if show_priority_pset_runq:
runq = kern.GetValueFromAddress(unsigned(addressof(pset.pset_runq)), 'struct run_queue *')
ShowRunQSummary(runq)
if show_group_pset_runq:
print "Main Runq:\n"
runq = kern.GetValueFromAddress(unsigned(addressof(pset.pset_runq)), 'struct run_queue *')
ShowGroupSetSummary(runq, task_map)
print "All Groups:\n"
# TODO: Possibly output task header for each group
for group in IterateQueue(kern.globals.sched_groups, "sched_group_t", "sched_groups"):
if (group.runq.count != 0) :
task = task_map.get(unsigned(group), "Unknown task!")
print "Group {: <#012x} Task {: <#012x}\n".format(unsigned(group), unsigned(task))
ShowRunQSummary(group.runq)
print " \n"
processor_array = kern.globals.processor_array
print "Active Processors:\n"
active_bitmap = int(pset.cpu_state_map[processor_dispatching]) | int(pset.cpu_state_map[processor_running])
for cpuid in IterateBitmap(active_bitmap):
processor = processor_array[cpuid]
if processor != 0:
print " " + GetProcessorSummary(processor)
ShowActiveThread(processor)
if show_priority_runq:
runq = processor.runq
ShowRunQSummary(runq)
if show_grrr:
grrr_runq = processor.grrr_runq
ShowGrrrSummary(grrr_runq)
print " \n"
print "Idle Processors:\n"
idle_bitmap = int(pset.cpu_state_map[processor_idle]) & int(pset.primary_map)
for cpuid in IterateBitmap(idle_bitmap):
processor = processor_array[cpuid]
if processor != 0:
print " " + GetProcessorSummary(processor)
ShowActiveThread(processor)
if show_priority_runq:
ShowRunQSummary(processor.runq)
print " \n"
print "Idle Secondary Processors:\n"
idle_bitmap = int(pset.cpu_state_map[processor_idle]) & ~(int(pset.primary_map))
for cpuid in IterateBitmap(idle_bitmap):
processor = processor_array[cpuid]
if processor != 0:
print " " + GetProcessorSummary(processor)
ShowActiveThread(processor)
if show_priority_runq:
print ShowRunQSummary(processor.runq)
print " \n"
print "Other Processors:\n"
other_bitmap = 0
for i in range(processor_offline, processor_idle):
other_bitmap |= int(pset.cpu_state_map[i])
other_bitmap &= int(pset.cpu_bitmask)
for cpuid in IterateBitmap(other_bitmap):
processor = processor_array[cpuid]
if processor != 0:
print " " + GetProcessorSummary(processor)
ShowActiveThread(processor)
if show_priority_runq:
ShowRunQSummary(processor.runq)
print " \n"
if show_clutch or show_edge:
cluster_type = "SMP"
if pset.pset_type == 1:
cluster_type = "E"
elif pset.pset_type == 2:
cluster_type = "P"
print "=== Clutch Scheduler Hierarchy Pset{:d} (Type: {:s}) ] ===\n\n".format(pset.pset_cluster_id, cluster_type)
ShowSchedClutchForPset(pset)
pset = pset.pset_list
node = node.node_list
print "\nCrashed Threads Queue: ({:<#012x})\n".format(addressof(kern.globals.crashed_threads_queue))
first = True
for thread in ParanoidIterateLinkageChain(kern.globals.crashed_threads_queue, "thread_t", "runq_links"):
if first:
print "\t" + GetThreadSummary.header
first = False
print "\t" + GetThreadSummary(thread)
def dump_mpsc_thread_queue(name, head):
head = addressof(head)
print "\n{:s}: ({:<#012x})\n".format(name, head)
first = True
for thread in IterateMPSCQueue(head.mpd_queue, 'struct thread', 'mpsc_links'):
if first:
print "\t" + GetThreadSummary.header
first = False
print "\t" + GetThreadSummary(thread)
dump_mpsc_thread_queue("Terminate Queue", kern.globals.thread_terminate_queue)
dump_mpsc_thread_queue("Waiting For Kernel Stacks Queue", kern.globals.thread_stack_queue)
dump_mpsc_thread_queue("Thread Exception Queue", kern.globals.thread_exception_queue)
dump_mpsc_thread_queue("Thread Deallocate Queue", kern.globals.thread_deallocate_queue)
print "\n"
print "\n"