// The bootstrap sequence is: // // call osinit // call schedinit // make & queue new G // call runtime·mstart // // The new G calls runtime·main. funcschedinit() { // raceinit must be the first call to race detector. // In particular, it must be done before mallocinit below calls racemapshadow. //... gcinit() //... }
funcgcinit() { if unsafe.Sizeof(workbuf{}) != _WorkbufSize { throw("size of Workbuf is suboptimal") }
// No sweep on the first cycle. mheap_.sweepdone = 1
// Set a reasonable initial GC trigger. 核心关注与这个!就是触发GC回收的阈值,默认是0.875 memstats.triggerRatio = 7 / 8.0
// Fake a heap_marked value so it looks like a trigger at // heapminimum is the appropriate growth from heap_marked. // This will go into computing the initial GC goal. memstats.heap_marked = uint64(float64(heapminimum) / (1 + memstats.triggerRatio))
// Set gcpercent from the environment. This will also compute // and set the GC trigger and goal. _ = setGCPercent(readgogc())
work.startSema = 1 work.markDoneSema = 1 }
1、triggerRatio
Set a reasonable initial GC trigger. 核心关注与这个!就是触发GC回收的阈值,默认是0.875,含义是这次堆中存活的对象是上一次的 1+(7/0.8)值要大的时候就回收
假如上次完成后堆内存是 100M 现在是 200M,此时 (200M-100M)/100M>7/0.8的,所以需要进行回收!
func(t gcTrigger)test()bool { if !memstats.enablegc || panicking != 0 || gcphase != _GCoff { returnfalse } switch t.kind { case gcTriggerHeap: // Non-atomic access to heap_live for performance. If // we are going to trigger on this, this thread just // atomically wrote heap_live anyway and we'll see our // own write. return memstats.heap_live >= memstats.gc_trigger // heap中存活的对象大于gc需要触发的阈值(这个阈值时上一次gc设置的) case gcTriggerTime: if gcpercent < 0 { returnfalse } lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime)) return lastgc != 0 && t.now-lastgc > forcegcperiod // 当前时间与上次gc时间相差2分钟 case gcTriggerCycle: // t.n > work.cycles, but accounting for wraparound. returnint32(t.n-work.cycles) > 0 } returntrue }
1、周期性GC
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
funcforcegchelper() { forcegc.g = getg() for { lock(&forcegc.lock) if forcegc.idle != 0 { throw("forcegc: phase error") } atomic.Store(&forcegc.idle, 1) goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1) // this goroutine is explicitly resumed by sysmon if debug.gctrace > 0 { println("GC forced") } // Time-triggered, fully concurrent. gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()}) } }
// forcegcperiod is the maximum time in nanoseconds between garbage // collections. If we go this long without a garbage collection, one // is forced to run. // // This is a variable for testing purposes. It normally doesn't change. var forcegcperiod int64 = 2 * 60 * 1e9
// Always runs without a P, so write barriers are not allowed. // //go:nowritebarrierrec funcsysmon() { // 。。。。。。。。 // check if we need to force a GC // 要求第一符合gc周期,第二 forcegc.idle 不为0 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { lock(&forcegc.lock) forcegc.idle = 0 var list gList list.push(forcegc.g) injectglist(&list) unlock(&forcegc.lock) } ////。。。。。。。。 }
// Allocate an object of size bytes. // Small objects are allocated from the per-P cache's free lists. // Large objects (> 32 kB) are allocated straight from the heap. funcmallocgc(size uintptr, typ *_type, needzero bool)unsafe.Pointer { // ........ // 是否需要gc shouldhelpgc := false if size <= maxSmallSize { // 小于32k if noscan && size < maxTinySize { //小于16字节
} else { } else { var s *mspan shouldhelpgc = true systemstack(func() { s = largeAlloc(size, needzero, noscan) }) s.freeindex = 1 s.allocCount = 1 x = unsafe.Pointer(s.base()) size = s.elemsize } // 。。。。。。。 if shouldhelpgc { if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { gcStart(t) } }
// GC runs a garbage collection and blocks the caller until the // garbage collection is complete. It may also block the entire // program. funcGC() { // We consider a cycle to be: sweep termination, mark, mark // termination, and sweep. This function shouldn't return // until a full cycle has been completed, from beginning to // end. Hence, we always want to finish up the current cycle // and start a new one. That means: // // 1. In sweep termination, mark, or mark termination of cycle // N, wait until mark termination N completes and transitions // to sweep N. // // 2. In sweep N, help with sweep N. // // At this point we can begin a full cycle N+1. // // 3. Trigger cycle N+1 by starting sweep termination N+1. // // 4. Wait for mark termination N+1 to complete. // // 5. Help with sweep N+1 until it's done. // // This all has to be written to deal with the fact that the // GC may move ahead on its own. For example, when we block // until mark termination N, we may wake up in cycle N+2.
// Wait until the current sweep termination, mark, and mark // termination complete. n := atomic.Load(&work.cycles) gcWaitOnMark(n)
// We're now in sweep N or later. Trigger GC cycle N+1, which // will first finish sweep N if necessary and then enter sweep // termination N+1. gcStart(gcTrigger{kind: gcTriggerCycle, n: n + 1})
// Wait for mark termination N+1 to complete. gcWaitOnMark(n + 1)
gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard error at each collection, summarizing the amount of memory collected and the length of the pause. The format of this line is subject to change. Currently, it is: gc # @#s #%: #+#+# ms clock, #+#/#/#+# ms cpu, #->#-># MB, # MB goal, # P where the fields are as follows: gc # the GC number, incremented at each GC @#s time in seconds since program start,距离程序的启动时间,单位s #% percentage of time spent in GC since program start,花费时间的百分比 #+...+# wall-clock/CPU times for the phases of the GC, cpu花费的时间 #->#-># MB heap size at GC start, at GC end, and live heap,gc开始-gc结束-存活的对象 (堆内存) # MB goal goal heap size (全局堆内存大小) # P number of processors used p的数量
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 5.6827e-05 go_gc_duration_seconds{quantile="0.25"} 8.1842e-05 go_gc_duration_seconds{quantile="0.5"} 9.8818e-05 go_gc_duration_seconds{quantile="0.75"} 0.000125499 go_gc_duration_seconds{quantile="1"} 0.000555719 go_gc_duration_seconds_sum 0.247680951 go_gc_duration_seconds_count 2366 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 50 # HELP go_info Information about the Go environment. # TYPE go_info gauge go_info{version="go1.13.5"} 1 # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge go_memstats_alloc_bytes 8.338104e+06 # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter go_memstats_alloc_bytes_total 1.3874634688e+10 # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge go_memstats_buck_hash_sys_bytes 1.922436e+06 # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter go_memstats_frees_total 8.9915565e+07 # HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. # TYPE go_memstats_gc_cpu_fraction gauge go_memstats_gc_cpu_fraction 5.2633836319412915e-06 # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge go_memstats_gc_sys_bytes 2.398208e+06 # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge go_memstats_heap_alloc_bytes 8.338104e+06 # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge go_memstats_heap_idle_bytes 5.1625984e+07 # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge go_memstats_heap_inuse_bytes 1.0829824e+07 # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge go_memstats_heap_objects 42405 # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge go_memstats_heap_released_bytes 4.9709056e+07 # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge go_memstats_heap_sys_bytes 6.2455808e+07 # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge go_memstats_last_gc_time_seconds 1.6172457774344466e+09 # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter go_memstats_lookups_total 0 # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter go_memstats_mallocs_total 8.995797e+07 # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge go_memstats_mcache_inuse_bytes 83328 # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge go_memstats_mcache_sys_bytes 98304 # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge go_memstats_mspan_inuse_bytes 142528 # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge go_memstats_mspan_sys_bytes 196608 # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge go_memstats_next_gc_bytes 1.0362992e+07 # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge go_memstats_other_sys_bytes 5.542772e+06 # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge go_memstats_stack_inuse_bytes 4.653056e+06 # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge go_memstats_stack_sys_bytes 4.653056e+06 # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge go_memstats_sys_bytes 7.7267192e+07 # HELP go_threads Number of OS threads created. # TYPE go_threads gauge go_threads 48 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter process_cpu_seconds_total 3875.24 # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge process_max_fds 1.048576e+06 # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge process_open_fds 29 # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge process_resident_memory_bytes 7.5575296e+07 # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge process_start_time_seconds 1.61709350436e+09 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes 2.018103296e+09 # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. # TYPE process_virtual_memory_max_bytes gauge process_virtual_memory_max_bytes -1 # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. # TYPE promhttp_metric_handler_requests_in_flight gauge promhttp_metric_handler_requests_in_flight 1 # HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. # TYPE promhttp_metric_handler_requests_total counter promhttp_metric_handler_requests_total{code="200"} 25373 promhttp_metric_handler_requests_total{code="500"} 0 promhttp_metric_handler_requests_total{code="503"} 0
➜ ~ curl http://localhost:8080/debug/pprof/goroutine\?debug\=1 -v goroutine profile: total 6
➜ ~ curl http://localhost:8080/debug/pprof/threadcreate\?debug\=1 -v threadcreate profile: total 14
3、配合go tool pprof 命令
主要需要加参数 seconds,默认收集30s,下面例子是10s
1 2 3 4
➜ ~ go tool pprof -http ":8888" http://localhost:62316/debug/pprof/allocs\?seconds\=10 Fetching profile over HTTP from http://localhost:62316/debug/pprof/allocs?seconds=10 Saved profile in /Users/fanhaodong/pprof/pprof.alloc_objects.alloc_space.inuse_objects.inuse_space.011.pb.gz Serving web UI on http://localhost:8888
可以看到采集10s最后效果
1、比如查找goroutine 使用率较高!
我们首先要查询 个数
1 2
➜ ~ curl http://localhost:62316/debug/pprof/goroutine\?debug\=1 goroutine profile: total 100
为啥这么多来
1 2 3 4
➜ ~ go tool pprof -http ":8888" http://localhost:62316/debug/pprof/goroutine\?seconds\=10 Fetching profile over HTTP from http://localhost:62316/debug/pprof/goroutine?seconds=10 Saved profile in /Users/fanhaodong/pprof/pprof.goroutine.001.pb.gz Serving web UI on http://localhost:8888
可以查看csv图,前提是你是在本地,首先如果在线上服务器,显然是不可能的!线上需要执行
1
➜ ~ go tool pprof http://localhost:62316/debug/pprof/goroutine\?seconds\=
var ( buffer = makeArr(240 << 20) // 240m ,意思就是堆2*240M是不会触发gc的 ) var ( appender = make([][]byte, 0, 100) ) // GODEBUG=gctrace=1 funcmain() { count := 0 alloc := 4 << 20 for x := 0; x < 100; x++ { appender = append(appender, makeArr(alloc)) count++ time.Sleep(time.Millisecond * 10)
// 到50就回收历史的数据,那么内存达到 480m的时候就会触发gc,所以这个程序结束后内存使用一般是在480m左右 if x == 50 { for index := range appender { appender[index] = nil } println(len(appender)) } } }