in internal/gocore/process.go [275:542]
func (p *Process) readSpans(mheap region, arenas []arena) {
var all int64
var text int64
var readOnly int64
var heap int64
var spanTable int64
var bitmap int64
var data int64
var bss int64 // also includes mmap'd regions
for _, m := range p.proc.Mappings() {
size := m.Size()
all += size
switch m.Perm() {
case core.Read:
readOnly += size
case core.Read | core.Exec:
text += size
case core.Read | core.Write:
if m.CopyOnWrite() {
// Check if m.file == text's file? That could distinguish
// data segment from mmapped file.
data += size
break
}
attribute := func(x, y core.Address, p *int64) {
a := x.Max(m.Min())
b := y.Min(m.Max())
if a < b {
*p += b.Sub(a)
size -= b.Sub(a)
}
}
for _, a := range arenas {
attribute(a.heapMin, a.heapMax, &heap)
attribute(a.bitmapMin, a.bitmapMax, &bitmap)
attribute(a.spanTableMin, a.spanTableMax, &spanTable)
}
// Any other anonymous mapping is bss.
// TODO: how to distinguish original bss from anonymous mmap?
bss += size
default:
panic("weird mapping " + m.Perm().String())
}
}
if mheap.HasField("curArena") { // go1.13.3 and up
// Subtract from the heap unallocated space
// in the current arena.
ca := mheap.Field("curArena")
unused := int64(ca.Field("end").Uintptr() - ca.Field("base").Uintptr())
heap -= unused
all -= unused
}
pageSize := p.rtConstants["_PageSize"]
// Span types
spanInUse := uint8(p.rtConstants["_MSpanInUse"])
spanManual := uint8(p.rtConstants["_MSpanManual"])
spanDead := uint8(p.rtConstants["_MSpanDead"])
spanFree := uint8(p.rtConstants["_MSpanFree"])
// Process spans.
if pageSize%heapInfoSize != 0 {
panic(fmt.Sprintf("page size not a multiple of %d", heapInfoSize))
}
allspans := mheap.Field("allspans")
var freeSpanSize int64
var releasedSpanSize int64
var manualSpanSize int64
var inUseSpanSize int64
var allocSize int64
var freeSize int64
var spanRoundSize int64
var manualAllocSize int64
var manualFreeSize int64
n := allspans.SliceLen()
for i := int64(0); i < n; i++ {
s := allspans.SliceIndex(i).Deref()
min := core.Address(s.Field("startAddr").Uintptr())
elemSize := int64(s.Field("elemsize").Uintptr())
nPages := int64(s.Field("npages").Uintptr())
spanSize := nPages * pageSize
max := min.Add(spanSize)
for a := min; a != max; a = a.Add(pageSize) {
if !p.proc.Readable(a) {
// Sometimes allocated but not yet touched pages or
// MADV_DONTNEEDed pages are not written
// to the core file. Don't count these pages toward
// space usage (otherwise it can look like the heap
// is larger than the total memory used).
spanSize -= pageSize
}
}
st := s.Field("state")
if st.IsStruct() && st.HasField("s") { // go1.14+
st = st.Field("s")
}
switch st.Uint8() {
case spanInUse:
inUseSpanSize += spanSize
n := int64(s.Field("nelems").Uintptr())
// An object is allocated if it is marked as
// allocated or it is below freeindex.
x := s.Field("allocBits").Address()
alloc := make([]bool, n)
for i := int64(0); i < n; i++ {
alloc[i] = p.proc.ReadUint8(x.Add(i/8))>>uint(i%8)&1 != 0
}
k := int64(s.Field("freeindex").Uintptr())
for i := int64(0); i < k; i++ {
alloc[i] = true
}
for i := int64(0); i < n; i++ {
if alloc[i] {
allocSize += elemSize
} else {
freeSize += elemSize
}
}
spanRoundSize += spanSize - n*elemSize
// initialize heap info records for all inuse spans.
for a := min; a < max; a += heapInfoSize {
h := p.allocHeapInfo(a)
h.base = min
h.size = elemSize
}
// Process special records.
for sp := s.Field("specials"); sp.Address() != 0; sp = sp.Field("next") {
sp = sp.Deref() // *special to special
if sp.Field("kind").Uint8() != uint8(p.rtConstants["_KindSpecialFinalizer"]) {
// All other specials (just profile records) can't point into the heap.
continue
}
obj := min.Add(int64(sp.Field("offset").Uint16()))
p.globals = append(p.globals,
&Root{
Name: fmt.Sprintf("finalizer for %x", obj),
Addr: sp.a,
Type: p.findType("runtime.specialfinalizer"),
Frame: nil,
})
// TODO: these aren't really "globals", as they
// are kept alive by the object they reference being alive.
// But we have no way of adding edges from an object to
// the corresponding finalizer data, so we punt on that thorny
// issue for now.
}
case spanFree:
freeSpanSize += spanSize
if s.HasField("npreleased") { // go 1.11 and earlier
nReleased := int64(s.Field("npreleased").Uintptr())
releasedSpanSize += nReleased * pageSize
} else { // go 1.12 and beyond
if s.Field("scavenged").Bool() {
releasedSpanSize += spanSize
}
}
case spanDead:
// These are just deallocated span descriptors. They use no heap.
case spanManual:
manualSpanSize += spanSize
manualAllocSize += spanSize
for x := core.Address(s.Field("manualFreeList").Cast("uintptr").Uintptr()); x != 0; x = p.proc.ReadPtr(x) {
manualAllocSize -= elemSize
manualFreeSize += elemSize
}
}
}
if mheap.HasField("pages") { // go1.14+
// There are no longer "free" mspans to represent unused pages.
// Instead, there are just holes in the pagemap into which we can allocate.
// Look through the page allocator and count the total free space.
// Also keep track of how much has been scavenged.
pages := mheap.Field("pages")
chunks := pages.Field("chunks")
arenaBaseOffset := p.getArenaBaseOffset()
pallocChunkBytes := p.rtConstants["pallocChunkBytes"]
pallocChunksL1Bits := p.rtConstants["pallocChunksL1Bits"]
pallocChunksL2Bits := p.rtConstants["pallocChunksL2Bits"]
inuse := pages.Field("inUse")
ranges := inuse.Field("ranges")
for i := int64(0); i < ranges.SliceLen(); i++ {
r := ranges.SliceIndex(i)
baseField := r.Field("base")
if baseField.IsStruct() { // go 1.15+
baseField = baseField.Field("a")
}
base := core.Address(baseField.Uintptr())
limitField := r.Field("limit")
if limitField.IsStruct() { // go 1.15+
limitField = limitField.Field("a")
}
limit := core.Address(limitField.Uintptr())
chunkBase := (int64(base) + arenaBaseOffset) / pallocChunkBytes
chunkLimit := (int64(limit) + arenaBaseOffset) / pallocChunkBytes
for chunkIdx := chunkBase; chunkIdx < chunkLimit; chunkIdx++ {
var l1, l2 int64
if pallocChunksL1Bits == 0 {
l2 = chunkIdx
} else {
l1 = chunkIdx >> uint(pallocChunksL2Bits)
l2 = chunkIdx & (1<<uint(pallocChunksL2Bits) - 1)
}
chunk := chunks.ArrayIndex(l1).Deref().ArrayIndex(l2)
// Count the free bits in this chunk.
alloc := chunk.Field("pallocBits")
for i := int64(0); i < pallocChunkBytes/pageSize/64; i++ {
freeSpanSize += int64(bits.OnesCount64(^alloc.ArrayIndex(i).Uint64())) * pageSize
}
// Count the scavenged bits in this chunk.
scavenged := chunk.Field("scavenged")
for i := int64(0); i < pallocChunkBytes/pageSize/64; i++ {
releasedSpanSize += int64(bits.OnesCount64(scavenged.ArrayIndex(i).Uint64())) * pageSize
}
}
}
// Also count pages in the page cache for each P.
allp := p.rtGlobals["allp"]
for i := int64(0); i < allp.SliceLen(); i++ {
pcache := allp.SliceIndex(i).Deref().Field("pcache")
freeSpanSize += int64(bits.OnesCount64(pcache.Field("cache").Uint64())) * pageSize
releasedSpanSize += int64(bits.OnesCount64(pcache.Field("scav").Uint64())) * pageSize
}
}
p.stats = &Stats{"all", all, []*Stats{
&Stats{"text", text, nil},
&Stats{"readonly", readOnly, nil},
&Stats{"data", data, nil},
&Stats{"bss", bss, nil},
&Stats{"heap", heap, []*Stats{
&Stats{"in use spans", inUseSpanSize, []*Stats{
&Stats{"alloc", allocSize, nil},
&Stats{"free", freeSize, nil},
&Stats{"round", spanRoundSize, nil},
}},
&Stats{"manual spans", manualSpanSize, []*Stats{
&Stats{"alloc", manualAllocSize, nil},
&Stats{"free", manualFreeSize, nil},
}},
&Stats{"free spans", freeSpanSize, []*Stats{
&Stats{"retained", freeSpanSize - releasedSpanSize, nil},
&Stats{"released", releasedSpanSize, nil},
}},
}},
&Stats{"ptr bitmap", bitmap, nil},
&Stats{"span table", spanTable, nil},
}}
var check func(*Stats)
check = func(s *Stats) {
if len(s.Children) == 0 {
return
}
var sum int64
for _, c := range s.Children {
sum += c.Size
}
if sum != s.Size {
panic(fmt.Sprintf("check failed for %s: %d vs %d", s.Name, s.Size, sum))
}
for _, c := range s.Children {
check(c)
}
}
check(p.stats)
}