Source file
src/runtime/proc.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goexperiment"
11 "runtime/internal/atomic"
12 "runtime/internal/sys"
13 "unsafe"
14 )
15
16
17 var modinfo string
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113 var (
114 m0 m
115 g0 g
116 mcache0 *mcache
117 raceprocctx0 uintptr
118 )
119
120
121 var runtime_inittask initTask
122
123
124 var main_inittask initTask
125
126
127
128
129
130 var main_init_done chan bool
131
132
133 func main_main()
134
135
136 var mainStarted bool
137
138
139 var runtimeInitTime int64
140
141
142 var initSigmask sigset
143
144
145 func main() {
146 g := getg()
147
148
149
150 g.m.g0.racectx = 0
151
152
153
154
155 if sys.PtrSize == 8 {
156 maxstacksize = 1000000000
157 } else {
158 maxstacksize = 250000000
159 }
160
161
162
163
164 maxstackceiling = 2 * maxstacksize
165
166
167 mainStarted = true
168
169 if GOARCH != "wasm" {
170
171
172
173 atomic.Store(&sched.sysmonStarting, 1)
174 systemstack(func() {
175 newm(sysmon, nil, -1)
176 })
177 }
178
179
180
181
182
183
184
185 lockOSThread()
186
187 if g.m != &m0 {
188 throw("runtime.main not on m0")
189 }
190 m0.doesPark = true
191
192
193
194 runtimeInitTime = nanotime()
195 if runtimeInitTime == 0 {
196 throw("nanotime returning zero")
197 }
198
199 if debug.inittrace != 0 {
200 inittrace.id = getg().goid
201 inittrace.active = true
202 }
203
204 doInit(&runtime_inittask)
205
206
207 needUnlock := true
208 defer func() {
209 if needUnlock {
210 unlockOSThread()
211 }
212 }()
213
214 gcenable()
215
216 main_init_done = make(chan bool)
217 if iscgo {
218 if _cgo_thread_start == nil {
219 throw("_cgo_thread_start missing")
220 }
221 if GOOS != "windows" {
222 if _cgo_setenv == nil {
223 throw("_cgo_setenv missing")
224 }
225 if _cgo_unsetenv == nil {
226 throw("_cgo_unsetenv missing")
227 }
228 }
229 if _cgo_notify_runtime_init_done == nil {
230 throw("_cgo_notify_runtime_init_done missing")
231 }
232
233
234 startTemplateThread()
235 cgocall(_cgo_notify_runtime_init_done, nil)
236 }
237
238 doInit(&main_inittask)
239
240
241
242 inittrace.active = false
243
244 close(main_init_done)
245
246 needUnlock = false
247 unlockOSThread()
248
249 if isarchive || islibrary {
250
251
252 return
253 }
254 fn := main_main
255 fn()
256 if raceenabled {
257 racefini()
258 }
259
260
261
262
263
264 if atomic.Load(&runningPanicDefers) != 0 {
265
266 for c := 0; c < 1000; c++ {
267 if atomic.Load(&runningPanicDefers) == 0 {
268 break
269 }
270 Gosched()
271 }
272 }
273 if atomic.Load(&panicking) != 0 {
274 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
275 }
276
277 exit(0)
278 for {
279 var x *int32
280 *x = 0
281 }
282 }
283
284
285
286 func os_beforeExit() {
287 if raceenabled {
288 racefini()
289 }
290 }
291
292
293 func init() {
294 go forcegchelper()
295 }
296
297 func forcegchelper() {
298 forcegc.g = getg()
299 lockInit(&forcegc.lock, lockRankForcegc)
300 for {
301 lock(&forcegc.lock)
302 if forcegc.idle != 0 {
303 throw("forcegc: phase error")
304 }
305 atomic.Store(&forcegc.idle, 1)
306 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
307
308 if debug.gctrace > 0 {
309 println("GC forced")
310 }
311
312 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
313 }
314 }
315
316
317
318
319
320 func Gosched() {
321 checkTimeouts()
322 mcall(gosched_m)
323 }
324
325
326
327
328 func goschedguarded() {
329 mcall(goschedguarded_m)
330 }
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
350 if reason != waitReasonSleep {
351 checkTimeouts()
352 }
353 mp := acquirem()
354 gp := mp.curg
355 status := readgstatus(gp)
356 if status != _Grunning && status != _Gscanrunning {
357 throw("gopark: bad g status")
358 }
359 mp.waitlock = lock
360 mp.waitunlockf = unlockf
361 gp.waitreason = reason
362 mp.waittraceev = traceEv
363 mp.waittraceskip = traceskip
364 releasem(mp)
365
366 mcall(park_m)
367 }
368
369
370
371 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
372 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
373 }
374
375 func goready(gp *g, traceskip int) {
376 systemstack(func() {
377 ready(gp, traceskip, true)
378 })
379 }
380
381
382 func acquireSudog() *sudog {
383
384
385
386
387
388
389
390
391 mp := acquirem()
392 pp := mp.p.ptr()
393 if len(pp.sudogcache) == 0 {
394 lock(&sched.sudoglock)
395
396 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
397 s := sched.sudogcache
398 sched.sudogcache = s.next
399 s.next = nil
400 pp.sudogcache = append(pp.sudogcache, s)
401 }
402 unlock(&sched.sudoglock)
403
404 if len(pp.sudogcache) == 0 {
405 pp.sudogcache = append(pp.sudogcache, new(sudog))
406 }
407 }
408 n := len(pp.sudogcache)
409 s := pp.sudogcache[n-1]
410 pp.sudogcache[n-1] = nil
411 pp.sudogcache = pp.sudogcache[:n-1]
412 if s.elem != nil {
413 throw("acquireSudog: found s.elem != nil in cache")
414 }
415 releasem(mp)
416 return s
417 }
418
419
420 func releaseSudog(s *sudog) {
421 if s.elem != nil {
422 throw("runtime: sudog with non-nil elem")
423 }
424 if s.isSelect {
425 throw("runtime: sudog with non-false isSelect")
426 }
427 if s.next != nil {
428 throw("runtime: sudog with non-nil next")
429 }
430 if s.prev != nil {
431 throw("runtime: sudog with non-nil prev")
432 }
433 if s.waitlink != nil {
434 throw("runtime: sudog with non-nil waitlink")
435 }
436 if s.c != nil {
437 throw("runtime: sudog with non-nil c")
438 }
439 gp := getg()
440 if gp.param != nil {
441 throw("runtime: releaseSudog with non-nil gp.param")
442 }
443 mp := acquirem()
444 pp := mp.p.ptr()
445 if len(pp.sudogcache) == cap(pp.sudogcache) {
446
447 var first, last *sudog
448 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
449 n := len(pp.sudogcache)
450 p := pp.sudogcache[n-1]
451 pp.sudogcache[n-1] = nil
452 pp.sudogcache = pp.sudogcache[:n-1]
453 if first == nil {
454 first = p
455 } else {
456 last.next = p
457 }
458 last = p
459 }
460 lock(&sched.sudoglock)
461 last.next = sched.sudogcache
462 sched.sudogcache = first
463 unlock(&sched.sudoglock)
464 }
465 pp.sudogcache = append(pp.sudogcache, s)
466 releasem(mp)
467 }
468
469
470
471
472
473
474
475
476
477 func funcPC(f interface{}) uintptr {
478 return *(*uintptr)(efaceOf(&f).data)
479 }
480
481
482 func badmcall(fn func(*g)) {
483 throw("runtime: mcall called on m->g0 stack")
484 }
485
486 func badmcall2(fn func(*g)) {
487 throw("runtime: mcall function returned")
488 }
489
490 func badreflectcall() {
491 panic(plainError("arg size to reflect.call more than 1GB"))
492 }
493
494 var badmorestackg0Msg = "fatal: morestack on g0\n"
495
496
497
498 func badmorestackg0() {
499 sp := stringStructOf(&badmorestackg0Msg)
500 write(2, sp.str, int32(sp.len))
501 }
502
503 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
504
505
506
507 func badmorestackgsignal() {
508 sp := stringStructOf(&badmorestackgsignalMsg)
509 write(2, sp.str, int32(sp.len))
510 }
511
512
513 func badctxt() {
514 throw("ctxt != 0")
515 }
516
517 func lockedOSThread() bool {
518 gp := getg()
519 return gp.lockedm != 0 && gp.m.lockedg != 0
520 }
521
522 var (
523
524
525
526
527
528
529 allglock mutex
530 allgs []*g
531
532
533
534
535
536
537
538
539
540
541
542
543
544 allglen uintptr
545 allgptr **g
546 )
547
548 func allgadd(gp *g) {
549 if readgstatus(gp) == _Gidle {
550 throw("allgadd: bad status Gidle")
551 }
552
553 lock(&allglock)
554 allgs = append(allgs, gp)
555 if &allgs[0] != allgptr {
556 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
557 }
558 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
559 unlock(&allglock)
560 }
561
562
563 func atomicAllG() (**g, uintptr) {
564 length := atomic.Loaduintptr(&allglen)
565 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
566 return ptr, length
567 }
568
569
570 func atomicAllGIndex(ptr **g, i uintptr) *g {
571 return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
572 }
573
574
575
576
577 func forEachG(fn func(gp *g)) {
578 lock(&allglock)
579 for _, gp := range allgs {
580 fn(gp)
581 }
582 unlock(&allglock)
583 }
584
585
586
587
588
589 func forEachGRace(fn func(gp *g)) {
590 ptr, length := atomicAllG()
591 for i := uintptr(0); i < length; i++ {
592 gp := atomicAllGIndex(ptr, i)
593 fn(gp)
594 }
595 return
596 }
597
598 const (
599
600
601 _GoidCacheBatch = 16
602 )
603
604
605
606 func cpuinit() {
607 const prefix = "GODEBUG="
608 var env string
609
610 switch GOOS {
611 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
612 cpu.DebugOptions = true
613
614
615
616
617 n := int32(0)
618 for argv_index(argv, argc+1+n) != nil {
619 n++
620 }
621
622 for i := int32(0); i < n; i++ {
623 p := argv_index(argv, argc+1+i)
624 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
625
626 if hasPrefix(s, prefix) {
627 env = gostring(p)[len(prefix):]
628 break
629 }
630 }
631 }
632
633 cpu.Initialize(env)
634
635
636
637 x86HasPOPCNT = cpu.X86.HasPOPCNT
638 x86HasSSE41 = cpu.X86.HasSSE41
639 x86HasFMA = cpu.X86.HasFMA
640
641 armHasVFPv4 = cpu.ARM.HasVFPv4
642
643 arm64HasATOMICS = cpu.ARM64.HasATOMICS
644 }
645
646
647
648
649
650
651
652
653
654 func schedinit() {
655 lockInit(&sched.lock, lockRankSched)
656 lockInit(&sched.sysmonlock, lockRankSysmon)
657 lockInit(&sched.deferlock, lockRankDefer)
658 lockInit(&sched.sudoglock, lockRankSudog)
659 lockInit(&deadlock, lockRankDeadlock)
660 lockInit(&paniclk, lockRankPanic)
661 lockInit(&allglock, lockRankAllg)
662 lockInit(&allpLock, lockRankAllp)
663 lockInit(&reflectOffs.lock, lockRankReflectOffs)
664 lockInit(&finlock, lockRankFin)
665 lockInit(&trace.bufLock, lockRankTraceBuf)
666 lockInit(&trace.stringsLock, lockRankTraceStrings)
667 lockInit(&trace.lock, lockRankTrace)
668 lockInit(&cpuprof.lock, lockRankCpuprof)
669 lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
670
671
672
673 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
674
675
676
677 _g_ := getg()
678 if raceenabled {
679 _g_.racectx, raceprocctx0 = raceinit()
680 }
681
682 sched.maxmcount = 10000
683
684
685 worldStopped()
686
687 moduledataverify()
688 stackinit()
689 mallocinit()
690 fastrandinit()
691 mcommoninit(_g_.m, -1)
692 cpuinit()
693 alginit()
694 modulesinit()
695 typelinksinit()
696 itabsinit()
697
698 sigsave(&_g_.m.sigmask)
699 initSigmask = _g_.m.sigmask
700
701 if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
702 println(offset)
703 throw("sched.timeToRun not aligned to 8 bytes")
704 }
705
706 goargs()
707 goenvs()
708 parsedebugvars()
709 gcinit()
710
711 lock(&sched.lock)
712 sched.lastpoll = uint64(nanotime())
713 procs := ncpu
714 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
715 procs = n
716 }
717 if procresize(procs) != nil {
718 throw("unknown runnable goroutine during bootstrap")
719 }
720 unlock(&sched.lock)
721
722
723 worldStarted()
724
725
726
727
728 if debug.cgocheck > 1 {
729 writeBarrier.cgo = true
730 writeBarrier.enabled = true
731 for _, p := range allp {
732 p.wbBuf.reset()
733 }
734 }
735
736 if buildVersion == "" {
737
738
739 buildVersion = "unknown"
740 }
741 if len(modinfo) == 1 {
742
743
744 modinfo = ""
745 }
746 }
747
748 func dumpgstatus(gp *g) {
749 _g_ := getg()
750 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
751 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
752 }
753
754
755 func checkmcount() {
756 assertLockHeld(&sched.lock)
757
758 if mcount() > sched.maxmcount {
759 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
760 throw("thread exhaustion")
761 }
762 }
763
764
765
766
767
768 func mReserveID() int64 {
769 assertLockHeld(&sched.lock)
770
771 if sched.mnext+1 < sched.mnext {
772 throw("runtime: thread ID overflow")
773 }
774 id := sched.mnext
775 sched.mnext++
776 checkmcount()
777 return id
778 }
779
780
781 func mcommoninit(mp *m, id int64) {
782 _g_ := getg()
783
784
785 if _g_ != _g_.m.g0 {
786 callers(1, mp.createstack[:])
787 }
788
789 lock(&sched.lock)
790
791 if id >= 0 {
792 mp.id = id
793 } else {
794 mp.id = mReserveID()
795 }
796
797 mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed))
798 mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
799 if mp.fastrand[0]|mp.fastrand[1] == 0 {
800 mp.fastrand[1] = 1
801 }
802
803 mpreinit(mp)
804 if mp.gsignal != nil {
805 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
806 }
807
808
809
810 mp.alllink = allm
811
812
813
814 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
815 unlock(&sched.lock)
816
817
818 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
819 mp.cgoCallers = new(cgoCallers)
820 }
821 }
822
823 var fastrandseed uintptr
824
825 func fastrandinit() {
826 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
827 getRandomData(s)
828 }
829
830
831 func ready(gp *g, traceskip int, next bool) {
832 if trace.enabled {
833 traceGoUnpark(gp, traceskip)
834 }
835
836 status := readgstatus(gp)
837
838
839 _g_ := getg()
840 mp := acquirem()
841 if status&^_Gscan != _Gwaiting {
842 dumpgstatus(gp)
843 throw("bad g->status in ready")
844 }
845
846
847 casgstatus(gp, _Gwaiting, _Grunnable)
848 runqput(_g_.m.p.ptr(), gp, next)
849 wakep()
850 releasem(mp)
851 }
852
853
854
855 const freezeStopWait = 0x7fffffff
856
857
858
859 var freezing uint32
860
861
862
863
864 func freezetheworld() {
865 atomic.Store(&freezing, 1)
866
867
868
869 for i := 0; i < 5; i++ {
870
871 sched.stopwait = freezeStopWait
872 atomic.Store(&sched.gcwaiting, 1)
873
874 if !preemptall() {
875 break
876 }
877 usleep(1000)
878 }
879
880 usleep(1000)
881 preemptall()
882 usleep(1000)
883 }
884
885
886
887
888 func readgstatus(gp *g) uint32 {
889 return atomic.Load(&gp.atomicstatus)
890 }
891
892
893
894
895
896 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
897 success := false
898
899
900 switch oldval {
901 default:
902 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
903 dumpgstatus(gp)
904 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
905 case _Gscanrunnable,
906 _Gscanwaiting,
907 _Gscanrunning,
908 _Gscansyscall,
909 _Gscanpreempted:
910 if newval == oldval&^_Gscan {
911 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
912 }
913 }
914 if !success {
915 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
916 dumpgstatus(gp)
917 throw("casfrom_Gscanstatus: gp->status is not in scan state")
918 }
919 releaseLockRank(lockRankGscan)
920 }
921
922
923
924 func castogscanstatus(gp *g, oldval, newval uint32) bool {
925 switch oldval {
926 case _Grunnable,
927 _Grunning,
928 _Gwaiting,
929 _Gsyscall:
930 if newval == oldval|_Gscan {
931 r := atomic.Cas(&gp.atomicstatus, oldval, newval)
932 if r {
933 acquireLockRank(lockRankGscan)
934 }
935 return r
936
937 }
938 }
939 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
940 throw("castogscanstatus")
941 panic("not reached")
942 }
943
944
945
946
947
948
949 func casgstatus(gp *g, oldval, newval uint32) {
950 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
951 systemstack(func() {
952 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
953 throw("casgstatus: bad incoming values")
954 })
955 }
956
957 acquireLockRank(lockRankGscan)
958 releaseLockRank(lockRankGscan)
959
960
961 const yieldDelay = 5 * 1000
962 var nextYield int64
963
964
965
966 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
967 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
968 throw("casgstatus: waiting for Gwaiting but is Grunnable")
969 }
970 if i == 0 {
971 nextYield = nanotime() + yieldDelay
972 }
973 if nanotime() < nextYield {
974 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
975 procyield(1)
976 }
977 } else {
978 osyield()
979 nextYield = nanotime() + yieldDelay/2
980 }
981 }
982
983
984 if oldval == _Grunning {
985
986 if gp.trackingSeq%gTrackingPeriod == 0 {
987 gp.tracking = true
988 }
989 gp.trackingSeq++
990 }
991 if gp.tracking {
992 now := nanotime()
993 if oldval == _Grunnable {
994
995
996
997 gp.runnableTime += now - gp.runnableStamp
998 gp.runnableStamp = 0
999 }
1000 if newval == _Grunnable {
1001
1002
1003 gp.runnableStamp = now
1004 } else if newval == _Grunning {
1005
1006
1007
1008 gp.tracking = false
1009 sched.timeToRun.record(gp.runnableTime)
1010 gp.runnableTime = 0
1011 }
1012 }
1013 }
1014
1015
1016
1017
1018
1019
1020
1021 func casgcopystack(gp *g) uint32 {
1022 for {
1023 oldstatus := readgstatus(gp) &^ _Gscan
1024 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1025 throw("copystack: bad status, not Gwaiting or Grunnable")
1026 }
1027 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
1028 return oldstatus
1029 }
1030 }
1031 }
1032
1033
1034
1035
1036
1037 func casGToPreemptScan(gp *g, old, new uint32) {
1038 if old != _Grunning || new != _Gscan|_Gpreempted {
1039 throw("bad g transition")
1040 }
1041 acquireLockRank(lockRankGscan)
1042 for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) {
1043 }
1044 }
1045
1046
1047
1048
1049 func casGFromPreempted(gp *g, old, new uint32) bool {
1050 if old != _Gpreempted || new != _Gwaiting {
1051 throw("bad g transition")
1052 }
1053 return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting)
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 func stopTheWorld(reason string) {
1071 semacquire(&worldsema)
1072 gp := getg()
1073 gp.m.preemptoff = reason
1074 systemstack(func() {
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 casgstatus(gp, _Grunning, _Gwaiting)
1086 stopTheWorldWithSema()
1087 casgstatus(gp, _Gwaiting, _Grunning)
1088 })
1089 }
1090
1091
1092 func startTheWorld() {
1093 systemstack(func() { startTheWorldWithSema(false) })
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110 mp := acquirem()
1111 mp.preemptoff = ""
1112 semrelease1(&worldsema, true, 0)
1113 releasem(mp)
1114 }
1115
1116
1117
1118
1119 func stopTheWorldGC(reason string) {
1120 semacquire(&gcsema)
1121 stopTheWorld(reason)
1122 }
1123
1124
1125 func startTheWorldGC() {
1126 startTheWorld()
1127 semrelease(&gcsema)
1128 }
1129
1130
1131 var worldsema uint32 = 1
1132
1133
1134
1135
1136
1137
1138
1139 var gcsema uint32 = 1
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 func stopTheWorldWithSema() {
1164 _g_ := getg()
1165
1166
1167
1168 if _g_.m.locks > 0 {
1169 throw("stopTheWorld: holding locks")
1170 }
1171
1172 lock(&sched.lock)
1173 sched.stopwait = gomaxprocs
1174 atomic.Store(&sched.gcwaiting, 1)
1175 preemptall()
1176
1177 _g_.m.p.ptr().status = _Pgcstop
1178 sched.stopwait--
1179
1180 for _, p := range allp {
1181 s := p.status
1182 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1183 if trace.enabled {
1184 traceGoSysBlock(p)
1185 traceProcStop(p)
1186 }
1187 p.syscalltick++
1188 sched.stopwait--
1189 }
1190 }
1191
1192 for {
1193 p := pidleget()
1194 if p == nil {
1195 break
1196 }
1197 p.status = _Pgcstop
1198 sched.stopwait--
1199 }
1200 wait := sched.stopwait > 0
1201 unlock(&sched.lock)
1202
1203
1204 if wait {
1205 for {
1206
1207 if notetsleep(&sched.stopnote, 100*1000) {
1208 noteclear(&sched.stopnote)
1209 break
1210 }
1211 preemptall()
1212 }
1213 }
1214
1215
1216 bad := ""
1217 if sched.stopwait != 0 {
1218 bad = "stopTheWorld: not stopped (stopwait != 0)"
1219 } else {
1220 for _, p := range allp {
1221 if p.status != _Pgcstop {
1222 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1223 }
1224 }
1225 }
1226 if atomic.Load(&freezing) != 0 {
1227
1228
1229
1230
1231 lock(&deadlock)
1232 lock(&deadlock)
1233 }
1234 if bad != "" {
1235 throw(bad)
1236 }
1237
1238 worldStopped()
1239 }
1240
1241 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1242 assertWorldStopped()
1243
1244 mp := acquirem()
1245 if netpollinited() {
1246 list := netpoll(0)
1247 injectglist(&list)
1248 }
1249 lock(&sched.lock)
1250
1251 procs := gomaxprocs
1252 if newprocs != 0 {
1253 procs = newprocs
1254 newprocs = 0
1255 }
1256 p1 := procresize(procs)
1257 sched.gcwaiting = 0
1258 if sched.sysmonwait != 0 {
1259 sched.sysmonwait = 0
1260 notewakeup(&sched.sysmonnote)
1261 }
1262 unlock(&sched.lock)
1263
1264 worldStarted()
1265
1266 for p1 != nil {
1267 p := p1
1268 p1 = p1.link.ptr()
1269 if p.m != 0 {
1270 mp := p.m.ptr()
1271 p.m = 0
1272 if mp.nextp != 0 {
1273 throw("startTheWorld: inconsistent mp->nextp")
1274 }
1275 mp.nextp.set(p)
1276 notewakeup(&mp.park)
1277 } else {
1278
1279 newm(nil, p, -1)
1280 }
1281 }
1282
1283
1284 startTime := nanotime()
1285 if emitTraceEvent {
1286 traceGCSTWDone()
1287 }
1288
1289
1290
1291
1292 wakep()
1293
1294 releasem(mp)
1295
1296 return startTime
1297 }
1298
1299
1300
1301 func usesLibcall() bool {
1302 switch GOOS {
1303 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1304 return true
1305 case "openbsd":
1306 return GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64"
1307 }
1308 return false
1309 }
1310
1311
1312
1313 func mStackIsSystemAllocated() bool {
1314 switch GOOS {
1315 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1316 return true
1317 case "openbsd":
1318 switch GOARCH {
1319 case "386", "amd64", "arm", "arm64":
1320 return true
1321 }
1322 }
1323 return false
1324 }
1325
1326
1327
1328 func mstart()
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 func mstart0() {
1340 _g_ := getg()
1341
1342 osStack := _g_.stack.lo == 0
1343 if osStack {
1344
1345
1346
1347
1348
1349
1350
1351
1352 size := _g_.stack.hi
1353 if size == 0 {
1354 size = 8192 * sys.StackGuardMultiplier
1355 }
1356 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1357 _g_.stack.lo = _g_.stack.hi - size + 1024
1358 }
1359
1360
1361 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1362
1363
1364 _g_.stackguard1 = _g_.stackguard0
1365 mstart1()
1366
1367
1368 if mStackIsSystemAllocated() {
1369
1370
1371
1372 osStack = true
1373 }
1374 mexit(osStack)
1375 }
1376
1377
1378
1379
1380 func mstart1() {
1381 _g_ := getg()
1382
1383 if _g_ != _g_.m.g0 {
1384 throw("bad runtime·mstart")
1385 }
1386
1387
1388
1389
1390
1391
1392
1393 _g_.sched.g = guintptr(unsafe.Pointer(_g_))
1394 _g_.sched.pc = getcallerpc()
1395 _g_.sched.sp = getcallersp()
1396
1397 asminit()
1398 minit()
1399
1400
1401
1402 if _g_.m == &m0 {
1403 mstartm0()
1404 }
1405
1406 if fn := _g_.m.mstartfn; fn != nil {
1407 fn()
1408 }
1409
1410 if _g_.m != &m0 {
1411 acquirep(_g_.m.nextp.ptr())
1412 _g_.m.nextp = 0
1413 }
1414 schedule()
1415 }
1416
1417
1418
1419
1420
1421
1422
1423 func mstartm0() {
1424
1425
1426
1427 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1428 cgoHasExtraM = true
1429 newextram()
1430 }
1431 initsig(false)
1432 }
1433
1434
1435
1436
1437
1438 func mPark() {
1439 g := getg()
1440 for {
1441 notesleep(&g.m.park)
1442
1443
1444
1445 noteclear(&g.m.park)
1446 if !mDoFixup() {
1447 return
1448 }
1449 }
1450 }
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462 func mexit(osStack bool) {
1463 g := getg()
1464 m := g.m
1465
1466 if m == &m0 {
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478 handoffp(releasep())
1479 lock(&sched.lock)
1480 sched.nmfreed++
1481 checkdead()
1482 unlock(&sched.lock)
1483 mPark()
1484 throw("locked m0 woke up")
1485 }
1486
1487 sigblock(true)
1488 unminit()
1489
1490
1491 if m.gsignal != nil {
1492 stackfree(m.gsignal.stack)
1493
1494
1495
1496
1497 m.gsignal = nil
1498 }
1499
1500
1501 lock(&sched.lock)
1502 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1503 if *pprev == m {
1504 *pprev = m.alllink
1505 goto found
1506 }
1507 }
1508 throw("m not found in allm")
1509 found:
1510 if !osStack {
1511
1512
1513
1514
1515 atomic.Store(&m.freeWait, 1)
1516
1517
1518
1519
1520 m.freelink = sched.freem
1521 sched.freem = m
1522 }
1523 unlock(&sched.lock)
1524
1525 atomic.Xadd64(&ncgocall, int64(m.ncgocall))
1526
1527
1528 handoffp(releasep())
1529
1530
1531
1532
1533
1534 lock(&sched.lock)
1535 sched.nmfreed++
1536 checkdead()
1537 unlock(&sched.lock)
1538
1539 if GOOS == "darwin" || GOOS == "ios" {
1540
1541
1542 if atomic.Load(&m.signalPending) != 0 {
1543 atomic.Xadd(&pendingPreemptSignals, -1)
1544 }
1545 }
1546
1547
1548
1549 mdestroy(m)
1550
1551 if osStack {
1552
1553
1554 return
1555 }
1556
1557
1558
1559
1560
1561 exitThread(&m.freeWait)
1562 }
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575 func forEachP(fn func(*p)) {
1576 mp := acquirem()
1577 _p_ := getg().m.p.ptr()
1578
1579 lock(&sched.lock)
1580 if sched.safePointWait != 0 {
1581 throw("forEachP: sched.safePointWait != 0")
1582 }
1583 sched.safePointWait = gomaxprocs - 1
1584 sched.safePointFn = fn
1585
1586
1587 for _, p := range allp {
1588 if p != _p_ {
1589 atomic.Store(&p.runSafePointFn, 1)
1590 }
1591 }
1592 preemptall()
1593
1594
1595
1596
1597
1598
1599
1600 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1601 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1602 fn(p)
1603 sched.safePointWait--
1604 }
1605 }
1606
1607 wait := sched.safePointWait > 0
1608 unlock(&sched.lock)
1609
1610
1611 fn(_p_)
1612
1613
1614
1615 for _, p := range allp {
1616 s := p.status
1617 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1618 if trace.enabled {
1619 traceGoSysBlock(p)
1620 traceProcStop(p)
1621 }
1622 p.syscalltick++
1623 handoffp(p)
1624 }
1625 }
1626
1627
1628 if wait {
1629 for {
1630
1631
1632
1633
1634 if notetsleep(&sched.safePointNote, 100*1000) {
1635 noteclear(&sched.safePointNote)
1636 break
1637 }
1638 preemptall()
1639 }
1640 }
1641 if sched.safePointWait != 0 {
1642 throw("forEachP: not done")
1643 }
1644 for _, p := range allp {
1645 if p.runSafePointFn != 0 {
1646 throw("forEachP: P did not run fn")
1647 }
1648 }
1649
1650 lock(&sched.lock)
1651 sched.safePointFn = nil
1652 unlock(&sched.lock)
1653 releasem(mp)
1654 }
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670 func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
1671 if iscgo {
1672 panic("doAllThreadsSyscall not supported with cgo enabled")
1673 }
1674 if fn == nil {
1675 return
1676 }
1677 for atomic.Load(&sched.sysmonStarting) != 0 {
1678 osyield()
1679 }
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691 lockOSThread()
1692 var sigmask sigset
1693 sigsave(&sigmask)
1694 sigblock(false)
1695
1696 stopTheWorldGC("doAllThreadsSyscall")
1697 if atomic.Load(&newmHandoff.haveTemplateThread) != 0 {
1698
1699
1700 lock(&newmHandoff.lock)
1701 for !newmHandoff.waiting {
1702 unlock(&newmHandoff.lock)
1703 osyield()
1704 lock(&newmHandoff.lock)
1705 }
1706 unlock(&newmHandoff.lock)
1707 }
1708 if netpollinited() {
1709 netpollBreak()
1710 }
1711 sigRecvPrepareForFixup()
1712 _g_ := getg()
1713 if raceenabled {
1714
1715
1716 lock(&mFixupRace.lock)
1717 mFixupRace.ctx = _g_.racectx
1718 unlock(&mFixupRace.lock)
1719 }
1720 if ok := fn(true); ok {
1721 tid := _g_.m.procid
1722 for mp := allm; mp != nil; mp = mp.alllink {
1723 if mp.procid == tid {
1724
1725
1726 continue
1727 }
1728
1729
1730
1731
1732
1733
1734
1735 if mp.procid == 0 && !mp.doesPark {
1736
1737
1738
1739
1740 throw("unsupported runtime environment")
1741 }
1742
1743
1744
1745 lock(&mp.mFixup.lock)
1746 mp.mFixup.fn = fn
1747 atomic.Store(&mp.mFixup.used, 1)
1748 if mp.doesPark {
1749
1750
1751
1752
1753
1754 notewakeup(&mp.park)
1755 }
1756 unlock(&mp.mFixup.lock)
1757 }
1758 for {
1759 done := true
1760 for mp := allm; done && mp != nil; mp = mp.alllink {
1761 if mp.procid == tid {
1762 continue
1763 }
1764 done = atomic.Load(&mp.mFixup.used) == 0
1765 }
1766 if done {
1767 break
1768 }
1769
1770 lock(&sched.lock)
1771 if atomic.Load(&sched.sysmonwait) != 0 {
1772 atomic.Store(&sched.sysmonwait, 0)
1773 notewakeup(&sched.sysmonnote)
1774 }
1775 unlock(&sched.lock)
1776 lock(&newmHandoff.lock)
1777 if newmHandoff.waiting {
1778 newmHandoff.waiting = false
1779 notewakeup(&newmHandoff.wake)
1780 }
1781 unlock(&newmHandoff.lock)
1782 osyield()
1783 }
1784 }
1785 if raceenabled {
1786 lock(&mFixupRace.lock)
1787 mFixupRace.ctx = 0
1788 unlock(&mFixupRace.lock)
1789 }
1790 startTheWorldGC()
1791 msigrestore(sigmask)
1792 unlockOSThread()
1793 }
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806 func runSafePointFn() {
1807 p := getg().m.p.ptr()
1808
1809
1810
1811 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1812 return
1813 }
1814 sched.safePointFn(p)
1815 lock(&sched.lock)
1816 sched.safePointWait--
1817 if sched.safePointWait == 0 {
1818 notewakeup(&sched.safePointNote)
1819 }
1820 unlock(&sched.lock)
1821 }
1822
1823
1824
1825
1826 var cgoThreadStart unsafe.Pointer
1827
1828 type cgothreadstart struct {
1829 g guintptr
1830 tls *uint64
1831 fn unsafe.Pointer
1832 }
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843 func allocm(_p_ *p, fn func(), id int64) *m {
1844 _g_ := getg()
1845 acquirem()
1846 if _g_.m.p == 0 {
1847 acquirep(_p_)
1848 }
1849
1850
1851
1852 if sched.freem != nil {
1853 lock(&sched.lock)
1854 var newList *m
1855 for freem := sched.freem; freem != nil; {
1856 if freem.freeWait != 0 {
1857 next := freem.freelink
1858 freem.freelink = newList
1859 newList = freem
1860 freem = next
1861 continue
1862 }
1863
1864
1865
1866 systemstack(func() {
1867 stackfree(freem.g0.stack)
1868 })
1869 freem = freem.freelink
1870 }
1871 sched.freem = newList
1872 unlock(&sched.lock)
1873 }
1874
1875 mp := new(m)
1876 mp.mstartfn = fn
1877 mcommoninit(mp, id)
1878
1879
1880
1881 if iscgo || mStackIsSystemAllocated() {
1882 mp.g0 = malg(-1)
1883 } else {
1884 mp.g0 = malg(8192 * sys.StackGuardMultiplier)
1885 }
1886 mp.g0.m = mp
1887
1888 if _p_ == _g_.m.p.ptr() {
1889 releasep()
1890 }
1891 releasem(_g_.m)
1892
1893 return mp
1894 }
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930 func needm() {
1931 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1932
1933
1934
1935
1936
1937
1938 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1939 exit(1)
1940 }
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950 var sigmask sigset
1951 sigsave(&sigmask)
1952 sigblock(false)
1953
1954
1955
1956
1957
1958 mp := lockextra(false)
1959
1960
1961
1962
1963
1964
1965
1966
1967 mp.needextram = mp.schedlink == 0
1968 extraMCount--
1969 unlockextra(mp.schedlink.ptr())
1970
1971
1972 mp.sigmask = sigmask
1973
1974
1975
1976 osSetupTLS(mp)
1977
1978
1979
1980
1981
1982
1983 setg(mp.g0)
1984 _g_ := getg()
1985 _g_.stack.hi = getcallersp() + 1024
1986 _g_.stack.lo = getcallersp() - 32*1024
1987 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1988
1989
1990 asminit()
1991 minit()
1992
1993
1994 casgstatus(mp.curg, _Gdead, _Gsyscall)
1995 atomic.Xadd(&sched.ngsys, -1)
1996 }
1997
1998 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1999
2000
2001
2002
2003 func newextram() {
2004 c := atomic.Xchg(&extraMWaiters, 0)
2005 if c > 0 {
2006 for i := uint32(0); i < c; i++ {
2007 oneNewExtraM()
2008 }
2009 } else {
2010
2011 mp := lockextra(true)
2012 unlockextra(mp)
2013 if mp == nil {
2014 oneNewExtraM()
2015 }
2016 }
2017 }
2018
2019
2020 func oneNewExtraM() {
2021
2022
2023
2024
2025
2026 mp := allocm(nil, nil, -1)
2027 gp := malg(4096)
2028 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2029 gp.sched.sp = gp.stack.hi
2030 gp.sched.sp -= 4 * sys.PtrSize
2031 gp.sched.lr = 0
2032 gp.sched.g = guintptr(unsafe.Pointer(gp))
2033 gp.syscallpc = gp.sched.pc
2034 gp.syscallsp = gp.sched.sp
2035 gp.stktopsp = gp.sched.sp
2036
2037
2038
2039
2040 casgstatus(gp, _Gidle, _Gdead)
2041 gp.m = mp
2042 mp.curg = gp
2043 mp.lockedInt++
2044 mp.lockedg.set(gp)
2045 gp.lockedm.set(mp)
2046 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
2047 if raceenabled {
2048 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
2049 }
2050
2051 allgadd(gp)
2052
2053
2054
2055
2056
2057 atomic.Xadd(&sched.ngsys, +1)
2058
2059
2060 mnext := lockextra(true)
2061 mp.schedlink.set(mnext)
2062 extraMCount++
2063 unlockextra(mp)
2064 }
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089 func dropm() {
2090
2091
2092
2093 mp := getg().m
2094
2095
2096 casgstatus(mp.curg, _Gsyscall, _Gdead)
2097 mp.curg.preemptStop = false
2098 atomic.Xadd(&sched.ngsys, +1)
2099
2100
2101
2102
2103
2104 sigmask := mp.sigmask
2105 sigblock(false)
2106 unminit()
2107
2108 mnext := lockextra(true)
2109 extraMCount++
2110 mp.schedlink.set(mnext)
2111
2112 setg(nil)
2113
2114
2115 unlockextra(mp)
2116
2117 msigrestore(sigmask)
2118 }
2119
2120
2121 func getm() uintptr {
2122 return uintptr(unsafe.Pointer(getg().m))
2123 }
2124
2125 var extram uintptr
2126 var extraMCount uint32
2127 var extraMWaiters uint32
2128
2129
2130
2131
2132
2133
2134
2135 func lockextra(nilokay bool) *m {
2136 const locked = 1
2137
2138 incr := false
2139 for {
2140 old := atomic.Loaduintptr(&extram)
2141 if old == locked {
2142 osyield_no_g()
2143 continue
2144 }
2145 if old == 0 && !nilokay {
2146 if !incr {
2147
2148
2149
2150 atomic.Xadd(&extraMWaiters, 1)
2151 incr = true
2152 }
2153 usleep_no_g(1)
2154 continue
2155 }
2156 if atomic.Casuintptr(&extram, old, locked) {
2157 return (*m)(unsafe.Pointer(old))
2158 }
2159 osyield_no_g()
2160 continue
2161 }
2162 }
2163
2164
2165 func unlockextra(mp *m) {
2166 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
2167 }
2168
2169
2170
2171 var execLock rwmutex
2172
2173
2174
2175
2176 var newmHandoff struct {
2177 lock mutex
2178
2179
2180
2181 newm muintptr
2182
2183
2184
2185 waiting bool
2186 wake note
2187
2188
2189
2190
2191 haveTemplateThread uint32
2192 }
2193
2194
2195
2196
2197
2198
2199
2200 func newm(fn func(), _p_ *p, id int64) {
2201 mp := allocm(_p_, fn, id)
2202 mp.doesPark = (_p_ != nil)
2203 mp.nextp.set(_p_)
2204 mp.sigmask = initSigmask
2205 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217 lock(&newmHandoff.lock)
2218 if newmHandoff.haveTemplateThread == 0 {
2219 throw("on a locked thread with no template thread")
2220 }
2221 mp.schedlink = newmHandoff.newm
2222 newmHandoff.newm.set(mp)
2223 if newmHandoff.waiting {
2224 newmHandoff.waiting = false
2225 notewakeup(&newmHandoff.wake)
2226 }
2227 unlock(&newmHandoff.lock)
2228 return
2229 }
2230 newm1(mp)
2231 }
2232
2233 func newm1(mp *m) {
2234 if iscgo {
2235 var ts cgothreadstart
2236 if _cgo_thread_start == nil {
2237 throw("_cgo_thread_start missing")
2238 }
2239 ts.g.set(mp.g0)
2240 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2241 ts.fn = unsafe.Pointer(funcPC(mstart))
2242 if msanenabled {
2243 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2244 }
2245 execLock.rlock()
2246 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2247 execLock.runlock()
2248 return
2249 }
2250 execLock.rlock()
2251 newosproc(mp)
2252 execLock.runlock()
2253 }
2254
2255
2256
2257
2258
2259 func startTemplateThread() {
2260 if GOARCH == "wasm" {
2261 return
2262 }
2263
2264
2265
2266 mp := acquirem()
2267 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2268 releasem(mp)
2269 return
2270 }
2271 newm(templateThread, nil, -1)
2272 releasem(mp)
2273 }
2274
2275
2276
2277
2278
2279 var mFixupRace struct {
2280 lock mutex
2281 ctx uintptr
2282 }
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292 func mDoFixup() bool {
2293 _g_ := getg()
2294 if used := atomic.Load(&_g_.m.mFixup.used); used == 0 {
2295 return false
2296 }
2297
2298
2299 var sigmask sigset
2300 sigsave(&sigmask)
2301 sigblock(false)
2302 lock(&_g_.m.mFixup.lock)
2303 fn := _g_.m.mFixup.fn
2304 if fn != nil {
2305 if gcphase != _GCoff {
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316 throw("GC must be disabled to protect validity of fn value")
2317 }
2318 if _g_.racectx != 0 || !raceenabled {
2319 fn(false)
2320 } else {
2321
2322
2323
2324
2325
2326 lock(&mFixupRace.lock)
2327 _g_.racectx = mFixupRace.ctx
2328 fn(false)
2329 _g_.racectx = 0
2330 unlock(&mFixupRace.lock)
2331 }
2332 *(*uintptr)(unsafe.Pointer(&_g_.m.mFixup.fn)) = 0
2333 atomic.Store(&_g_.m.mFixup.used, 0)
2334 }
2335 unlock(&_g_.m.mFixup.lock)
2336 msigrestore(sigmask)
2337 return fn != nil
2338 }
2339
2340
2341
2342
2343
2344
2345 func mDoFixupAndOSYield() {
2346 mDoFixup()
2347 osyield()
2348 }
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362 func templateThread() {
2363 lock(&sched.lock)
2364 sched.nmsys++
2365 checkdead()
2366 unlock(&sched.lock)
2367
2368 for {
2369 lock(&newmHandoff.lock)
2370 for newmHandoff.newm != 0 {
2371 newm := newmHandoff.newm.ptr()
2372 newmHandoff.newm = 0
2373 unlock(&newmHandoff.lock)
2374 for newm != nil {
2375 next := newm.schedlink.ptr()
2376 newm.schedlink = 0
2377 newm1(newm)
2378 newm = next
2379 }
2380 lock(&newmHandoff.lock)
2381 }
2382 newmHandoff.waiting = true
2383 noteclear(&newmHandoff.wake)
2384 unlock(&newmHandoff.lock)
2385 notesleep(&newmHandoff.wake)
2386 mDoFixup()
2387 }
2388 }
2389
2390
2391
2392 func stopm() {
2393 _g_ := getg()
2394
2395 if _g_.m.locks != 0 {
2396 throw("stopm holding locks")
2397 }
2398 if _g_.m.p != 0 {
2399 throw("stopm holding p")
2400 }
2401 if _g_.m.spinning {
2402 throw("stopm spinning")
2403 }
2404
2405 lock(&sched.lock)
2406 mput(_g_.m)
2407 unlock(&sched.lock)
2408 mPark()
2409 acquirep(_g_.m.nextp.ptr())
2410 _g_.m.nextp = 0
2411 }
2412
2413 func mspinning() {
2414
2415 getg().m.spinning = true
2416 }
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429 func startm(_p_ *p, spinning bool) {
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446 mp := acquirem()
2447 lock(&sched.lock)
2448 if _p_ == nil {
2449 _p_ = pidleget()
2450 if _p_ == nil {
2451 unlock(&sched.lock)
2452 if spinning {
2453
2454
2455 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2456 throw("startm: negative nmspinning")
2457 }
2458 }
2459 releasem(mp)
2460 return
2461 }
2462 }
2463 nmp := mget()
2464 if nmp == nil {
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477 id := mReserveID()
2478 unlock(&sched.lock)
2479
2480 var fn func()
2481 if spinning {
2482
2483 fn = mspinning
2484 }
2485 newm(fn, _p_, id)
2486
2487
2488 releasem(mp)
2489 return
2490 }
2491 unlock(&sched.lock)
2492 if nmp.spinning {
2493 throw("startm: m is spinning")
2494 }
2495 if nmp.nextp != 0 {
2496 throw("startm: m has p")
2497 }
2498 if spinning && !runqempty(_p_) {
2499 throw("startm: p has runnable gs")
2500 }
2501
2502 nmp.spinning = spinning
2503 nmp.nextp.set(_p_)
2504 notewakeup(&nmp.park)
2505
2506
2507 releasem(mp)
2508 }
2509
2510
2511
2512
2513 func handoffp(_p_ *p) {
2514
2515
2516
2517
2518 if !runqempty(_p_) || sched.runqsize != 0 {
2519 startm(_p_, false)
2520 return
2521 }
2522
2523 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2524 startm(_p_, false)
2525 return
2526 }
2527
2528
2529 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) {
2530 startm(_p_, true)
2531 return
2532 }
2533 lock(&sched.lock)
2534 if sched.gcwaiting != 0 {
2535 _p_.status = _Pgcstop
2536 sched.stopwait--
2537 if sched.stopwait == 0 {
2538 notewakeup(&sched.stopnote)
2539 }
2540 unlock(&sched.lock)
2541 return
2542 }
2543 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
2544 sched.safePointFn(_p_)
2545 sched.safePointWait--
2546 if sched.safePointWait == 0 {
2547 notewakeup(&sched.safePointNote)
2548 }
2549 }
2550 if sched.runqsize != 0 {
2551 unlock(&sched.lock)
2552 startm(_p_, false)
2553 return
2554 }
2555
2556
2557 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2558 unlock(&sched.lock)
2559 startm(_p_, false)
2560 return
2561 }
2562
2563
2564
2565 when := nobarrierWakeTime(_p_)
2566 pidleput(_p_)
2567 unlock(&sched.lock)
2568
2569 if when != 0 {
2570 wakeNetPoller(when)
2571 }
2572 }
2573
2574
2575
2576 func wakep() {
2577 if atomic.Load(&sched.npidle) == 0 {
2578 return
2579 }
2580
2581 if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) {
2582 return
2583 }
2584 startm(nil, true)
2585 }
2586
2587
2588
2589 func stoplockedm() {
2590 _g_ := getg()
2591
2592 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2593 throw("stoplockedm: inconsistent locking")
2594 }
2595 if _g_.m.p != 0 {
2596
2597 _p_ := releasep()
2598 handoffp(_p_)
2599 }
2600 incidlelocked(1)
2601
2602 mPark()
2603 status := readgstatus(_g_.m.lockedg.ptr())
2604 if status&^_Gscan != _Grunnable {
2605 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
2606 dumpgstatus(_g_.m.lockedg.ptr())
2607 throw("stoplockedm: not runnable")
2608 }
2609 acquirep(_g_.m.nextp.ptr())
2610 _g_.m.nextp = 0
2611 }
2612
2613
2614
2615
2616 func startlockedm(gp *g) {
2617 _g_ := getg()
2618
2619 mp := gp.lockedm.ptr()
2620 if mp == _g_.m {
2621 throw("startlockedm: locked to me")
2622 }
2623 if mp.nextp != 0 {
2624 throw("startlockedm: m has p")
2625 }
2626
2627 incidlelocked(-1)
2628 _p_ := releasep()
2629 mp.nextp.set(_p_)
2630 notewakeup(&mp.park)
2631 stopm()
2632 }
2633
2634
2635
2636 func gcstopm() {
2637 _g_ := getg()
2638
2639 if sched.gcwaiting == 0 {
2640 throw("gcstopm: not waiting for gc")
2641 }
2642 if _g_.m.spinning {
2643 _g_.m.spinning = false
2644
2645
2646 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2647 throw("gcstopm: negative nmspinning")
2648 }
2649 }
2650 _p_ := releasep()
2651 lock(&sched.lock)
2652 _p_.status = _Pgcstop
2653 sched.stopwait--
2654 if sched.stopwait == 0 {
2655 notewakeup(&sched.stopnote)
2656 }
2657 unlock(&sched.lock)
2658 stopm()
2659 }
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670 func execute(gp *g, inheritTime bool) {
2671 _g_ := getg()
2672
2673
2674
2675 _g_.m.curg = gp
2676 gp.m = _g_.m
2677 casgstatus(gp, _Grunnable, _Grunning)
2678 gp.waitsince = 0
2679 gp.preempt = false
2680 gp.stackguard0 = gp.stack.lo + _StackGuard
2681 if !inheritTime {
2682 _g_.m.p.ptr().schedtick++
2683 }
2684
2685
2686 hz := sched.profilehz
2687 if _g_.m.profilehz != hz {
2688 setThreadCPUProfiler(hz)
2689 }
2690
2691 if trace.enabled {
2692
2693
2694 if gp.syscallsp != 0 && gp.sysblocktraced {
2695 traceGoSysExit(gp.sysexitticks)
2696 }
2697 traceGoStart()
2698 }
2699
2700 gogo(&gp.sched)
2701 }
2702
2703
2704
2705 func findrunnable() (gp *g, inheritTime bool) {
2706 _g_ := getg()
2707
2708
2709
2710
2711
2712 top:
2713 _p_ := _g_.m.p.ptr()
2714 if sched.gcwaiting != 0 {
2715 gcstopm()
2716 goto top
2717 }
2718 if _p_.runSafePointFn != 0 {
2719 runSafePointFn()
2720 }
2721
2722 now, pollUntil, _ := checkTimers(_p_, 0)
2723
2724 if fingwait && fingwake {
2725 if gp := wakefing(); gp != nil {
2726 ready(gp, 0, true)
2727 }
2728 }
2729 if *cgo_yield != nil {
2730 asmcgocall(*cgo_yield, nil)
2731 }
2732
2733
2734 if gp, inheritTime := runqget(_p_); gp != nil {
2735 return gp, inheritTime
2736 }
2737
2738
2739 if sched.runqsize != 0 {
2740 lock(&sched.lock)
2741 gp := globrunqget(_p_, 0)
2742 unlock(&sched.lock)
2743 if gp != nil {
2744 return gp, false
2745 }
2746 }
2747
2748
2749
2750
2751
2752
2753
2754
2755 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2756 if list := netpoll(0); !list.empty() {
2757 gp := list.pop()
2758 injectglist(&list)
2759 casgstatus(gp, _Gwaiting, _Grunnable)
2760 if trace.enabled {
2761 traceGoUnpark(gp, 0)
2762 }
2763 return gp, false
2764 }
2765 }
2766
2767
2768
2769
2770
2771
2772 procs := uint32(gomaxprocs)
2773 if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
2774 if !_g_.m.spinning {
2775 _g_.m.spinning = true
2776 atomic.Xadd(&sched.nmspinning, 1)
2777 }
2778
2779 gp, inheritTime, tnow, w, newWork := stealWork(now)
2780 now = tnow
2781 if gp != nil {
2782
2783 return gp, inheritTime
2784 }
2785 if newWork {
2786
2787
2788 goto top
2789 }
2790 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2791
2792 pollUntil = w
2793 }
2794 }
2795
2796
2797
2798
2799
2800
2801 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2802 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
2803 if node != nil {
2804 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2805 gp := node.gp.ptr()
2806 casgstatus(gp, _Gwaiting, _Grunnable)
2807 if trace.enabled {
2808 traceGoUnpark(gp, 0)
2809 }
2810 return gp, false
2811 }
2812 }
2813
2814
2815
2816
2817
2818 gp, otherReady := beforeIdle(now, pollUntil)
2819 if gp != nil {
2820 casgstatus(gp, _Gwaiting, _Grunnable)
2821 if trace.enabled {
2822 traceGoUnpark(gp, 0)
2823 }
2824 return gp, false
2825 }
2826 if otherReady {
2827 goto top
2828 }
2829
2830
2831
2832
2833
2834 allpSnapshot := allp
2835
2836
2837 idlepMaskSnapshot := idlepMask
2838 timerpMaskSnapshot := timerpMask
2839
2840
2841 lock(&sched.lock)
2842 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2843 unlock(&sched.lock)
2844 goto top
2845 }
2846 if sched.runqsize != 0 {
2847 gp := globrunqget(_p_, 0)
2848 unlock(&sched.lock)
2849 return gp, false
2850 }
2851 if releasep() != _p_ {
2852 throw("findrunnable: wrong p")
2853 }
2854 pidleput(_p_)
2855 unlock(&sched.lock)
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877 wasSpinning := _g_.m.spinning
2878 if _g_.m.spinning {
2879 _g_.m.spinning = false
2880 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2881 throw("findrunnable: negative nmspinning")
2882 }
2883
2884
2885
2886
2887
2888
2889
2890
2891 _p_ = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
2892 if _p_ != nil {
2893 acquirep(_p_)
2894 _g_.m.spinning = true
2895 atomic.Xadd(&sched.nmspinning, 1)
2896 goto top
2897 }
2898
2899
2900 _p_, gp = checkIdleGCNoP()
2901 if _p_ != nil {
2902 acquirep(_p_)
2903 _g_.m.spinning = true
2904 atomic.Xadd(&sched.nmspinning, 1)
2905
2906
2907 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2908 casgstatus(gp, _Gwaiting, _Grunnable)
2909 if trace.enabled {
2910 traceGoUnpark(gp, 0)
2911 }
2912 return gp, false
2913 }
2914
2915
2916
2917
2918
2919
2920
2921 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
2922 }
2923
2924
2925 if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2926 atomic.Store64(&sched.pollUntil, uint64(pollUntil))
2927 if _g_.m.p != 0 {
2928 throw("findrunnable: netpoll with p")
2929 }
2930 if _g_.m.spinning {
2931 throw("findrunnable: netpoll with spinning")
2932 }
2933 delay := int64(-1)
2934 if pollUntil != 0 {
2935 if now == 0 {
2936 now = nanotime()
2937 }
2938 delay = pollUntil - now
2939 if delay < 0 {
2940 delay = 0
2941 }
2942 }
2943 if faketime != 0 {
2944
2945 delay = 0
2946 }
2947 list := netpoll(delay)
2948 atomic.Store64(&sched.pollUntil, 0)
2949 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2950 if faketime != 0 && list.empty() {
2951
2952
2953 stopm()
2954 goto top
2955 }
2956 lock(&sched.lock)
2957 _p_ = pidleget()
2958 unlock(&sched.lock)
2959 if _p_ == nil {
2960 injectglist(&list)
2961 } else {
2962 acquirep(_p_)
2963 if !list.empty() {
2964 gp := list.pop()
2965 injectglist(&list)
2966 casgstatus(gp, _Gwaiting, _Grunnable)
2967 if trace.enabled {
2968 traceGoUnpark(gp, 0)
2969 }
2970 return gp, false
2971 }
2972 if wasSpinning {
2973 _g_.m.spinning = true
2974 atomic.Xadd(&sched.nmspinning, 1)
2975 }
2976 goto top
2977 }
2978 } else if pollUntil != 0 && netpollinited() {
2979 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
2980 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
2981 netpollBreak()
2982 }
2983 }
2984 stopm()
2985 goto top
2986 }
2987
2988
2989
2990
2991
2992 func pollWork() bool {
2993 if sched.runqsize != 0 {
2994 return true
2995 }
2996 p := getg().m.p.ptr()
2997 if !runqempty(p) {
2998 return true
2999 }
3000 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
3001 if list := netpoll(0); !list.empty() {
3002 injectglist(&list)
3003 return true
3004 }
3005 }
3006 return false
3007 }
3008
3009
3010
3011
3012
3013
3014
3015 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3016 pp := getg().m.p.ptr()
3017
3018 ranTimer := false
3019
3020 const stealTries = 4
3021 for i := 0; i < stealTries; i++ {
3022 stealTimersOrRunNextG := i == stealTries-1
3023
3024 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
3025 if sched.gcwaiting != 0 {
3026
3027 return nil, false, now, pollUntil, true
3028 }
3029 p2 := allp[enum.position()]
3030 if pp == p2 {
3031 continue
3032 }
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3048 tnow, w, ran := checkTimers(p2, now)
3049 now = tnow
3050 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3051 pollUntil = w
3052 }
3053 if ran {
3054
3055
3056
3057
3058
3059
3060
3061
3062 if gp, inheritTime := runqget(pp); gp != nil {
3063 return gp, inheritTime, now, pollUntil, ranTimer
3064 }
3065 ranTimer = true
3066 }
3067 }
3068
3069
3070 if !idlepMask.read(enum.position()) {
3071 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3072 return gp, false, now, pollUntil, ranTimer
3073 }
3074 }
3075 }
3076 }
3077
3078
3079
3080
3081 return nil, false, now, pollUntil, ranTimer
3082 }
3083
3084
3085
3086
3087
3088
3089 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3090 for id, p2 := range allpSnapshot {
3091 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3092 lock(&sched.lock)
3093 pp := pidleget()
3094 unlock(&sched.lock)
3095 if pp != nil {
3096 return pp
3097 }
3098
3099
3100 break
3101 }
3102 }
3103
3104 return nil
3105 }
3106
3107
3108
3109
3110 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3111 for id, p2 := range allpSnapshot {
3112 if timerpMaskSnapshot.read(uint32(id)) {
3113 w := nobarrierWakeTime(p2)
3114 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3115 pollUntil = w
3116 }
3117 }
3118 }
3119
3120 return pollUntil
3121 }
3122
3123
3124
3125
3126
3127 func checkIdleGCNoP() (*p, *g) {
3128
3129
3130 if atomic.Load(&gcBlackenEnabled) == 0 {
3131 return nil, nil
3132 }
3133 if !gcMarkWorkAvailable(nil) {
3134 return nil, nil
3135 }
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154 lock(&sched.lock)
3155 pp := pidleget()
3156 if pp == nil {
3157 unlock(&sched.lock)
3158 return nil, nil
3159 }
3160
3161
3162
3163 if gcBlackenEnabled == 0 {
3164 pidleput(pp)
3165 unlock(&sched.lock)
3166 return nil, nil
3167 }
3168
3169 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3170 if node == nil {
3171 pidleput(pp)
3172 unlock(&sched.lock)
3173 return nil, nil
3174 }
3175
3176 unlock(&sched.lock)
3177
3178 return pp, node.gp.ptr()
3179 }
3180
3181
3182
3183
3184 func wakeNetPoller(when int64) {
3185 if atomic.Load64(&sched.lastpoll) == 0 {
3186
3187
3188
3189
3190 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
3191 if pollerPollUntil == 0 || pollerPollUntil > when {
3192 netpollBreak()
3193 }
3194 } else {
3195
3196
3197 if GOOS != "plan9" {
3198 wakep()
3199 }
3200 }
3201 }
3202
3203 func resetspinning() {
3204 _g_ := getg()
3205 if !_g_.m.spinning {
3206 throw("resetspinning: not a spinning m")
3207 }
3208 _g_.m.spinning = false
3209 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
3210 if int32(nmspinning) < 0 {
3211 throw("findrunnable: negative nmspinning")
3212 }
3213
3214
3215
3216 wakep()
3217 }
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227 func injectglist(glist *gList) {
3228 if glist.empty() {
3229 return
3230 }
3231 if trace.enabled {
3232 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3233 traceGoUnpark(gp, 0)
3234 }
3235 }
3236
3237
3238
3239 head := glist.head.ptr()
3240 var tail *g
3241 qsize := 0
3242 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3243 tail = gp
3244 qsize++
3245 casgstatus(gp, _Gwaiting, _Grunnable)
3246 }
3247
3248
3249 var q gQueue
3250 q.head.set(head)
3251 q.tail.set(tail)
3252 *glist = gList{}
3253
3254 startIdle := func(n int) {
3255 for ; n != 0 && sched.npidle != 0; n-- {
3256 startm(nil, false)
3257 }
3258 }
3259
3260 pp := getg().m.p.ptr()
3261 if pp == nil {
3262 lock(&sched.lock)
3263 globrunqputbatch(&q, int32(qsize))
3264 unlock(&sched.lock)
3265 startIdle(qsize)
3266 return
3267 }
3268
3269 npidle := int(atomic.Load(&sched.npidle))
3270 var globq gQueue
3271 var n int
3272 for n = 0; n < npidle && !q.empty(); n++ {
3273 g := q.pop()
3274 globq.pushBack(g)
3275 }
3276 if n > 0 {
3277 lock(&sched.lock)
3278 globrunqputbatch(&globq, int32(n))
3279 unlock(&sched.lock)
3280 startIdle(n)
3281 qsize -= n
3282 }
3283
3284 if !q.empty() {
3285 runqputbatch(pp, &q, qsize)
3286 }
3287 }
3288
3289
3290
3291 func schedule() {
3292 _g_ := getg()
3293
3294 if _g_.m.locks != 0 {
3295 throw("schedule: holding locks")
3296 }
3297
3298 if _g_.m.lockedg != 0 {
3299 stoplockedm()
3300 execute(_g_.m.lockedg.ptr(), false)
3301 }
3302
3303
3304
3305 if _g_.m.incgo {
3306 throw("schedule: in cgo")
3307 }
3308
3309 top:
3310 pp := _g_.m.p.ptr()
3311 pp.preempt = false
3312
3313 if sched.gcwaiting != 0 {
3314 gcstopm()
3315 goto top
3316 }
3317 if pp.runSafePointFn != 0 {
3318 runSafePointFn()
3319 }
3320
3321
3322
3323
3324 if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3325 throw("schedule: spinning with local work")
3326 }
3327
3328 checkTimers(pp, 0)
3329
3330 var gp *g
3331 var inheritTime bool
3332
3333
3334
3335
3336 tryWakeP := false
3337 if trace.enabled || trace.shutdown {
3338 gp = traceReader()
3339 if gp != nil {
3340 casgstatus(gp, _Gwaiting, _Grunnable)
3341 traceGoUnpark(gp, 0)
3342 tryWakeP = true
3343 }
3344 }
3345 if gp == nil && gcBlackenEnabled != 0 {
3346 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
3347 if gp != nil {
3348 tryWakeP = true
3349 }
3350 }
3351 if gp == nil {
3352
3353
3354
3355 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
3356 lock(&sched.lock)
3357 gp = globrunqget(_g_.m.p.ptr(), 1)
3358 unlock(&sched.lock)
3359 }
3360 }
3361 if gp == nil {
3362 gp, inheritTime = runqget(_g_.m.p.ptr())
3363
3364
3365 }
3366 if gp == nil {
3367 gp, inheritTime = findrunnable()
3368 }
3369
3370
3371
3372
3373 if _g_.m.spinning {
3374 resetspinning()
3375 }
3376
3377 if sched.disable.user && !schedEnabled(gp) {
3378
3379
3380
3381 lock(&sched.lock)
3382 if schedEnabled(gp) {
3383
3384
3385 unlock(&sched.lock)
3386 } else {
3387 sched.disable.runnable.pushBack(gp)
3388 sched.disable.n++
3389 unlock(&sched.lock)
3390 goto top
3391 }
3392 }
3393
3394
3395
3396 if tryWakeP {
3397 wakep()
3398 }
3399 if gp.lockedm != 0 {
3400
3401
3402 startlockedm(gp)
3403 goto top
3404 }
3405
3406 execute(gp, inheritTime)
3407 }
3408
3409
3410
3411
3412
3413
3414
3415
3416 func dropg() {
3417 _g_ := getg()
3418
3419 setMNoWB(&_g_.m.curg.m, nil)
3420 setGNoWB(&_g_.m.curg, nil)
3421 }
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
3433
3434
3435 next := int64(atomic.Load64(&pp.timer0When))
3436 nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
3437 if next == 0 || (nextAdj != 0 && nextAdj < next) {
3438 next = nextAdj
3439 }
3440
3441 if next == 0 {
3442
3443 return now, 0, false
3444 }
3445
3446 if now == 0 {
3447 now = nanotime()
3448 }
3449 if now < next {
3450
3451
3452
3453
3454 if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
3455 return now, next, false
3456 }
3457 }
3458
3459 lock(&pp.timersLock)
3460
3461 if len(pp.timers) > 0 {
3462 adjusttimers(pp, now)
3463 for len(pp.timers) > 0 {
3464
3465
3466 if tw := runtimer(pp, now); tw != 0 {
3467 if tw > 0 {
3468 pollUntil = tw
3469 }
3470 break
3471 }
3472 ran = true
3473 }
3474 }
3475
3476
3477
3478
3479 if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 {
3480 clearDeletedTimers(pp)
3481 }
3482
3483 unlock(&pp.timersLock)
3484
3485 return now, pollUntil, ran
3486 }
3487
3488 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
3489 unlock((*mutex)(lock))
3490 return true
3491 }
3492
3493
3494 func park_m(gp *g) {
3495 _g_ := getg()
3496
3497 if trace.enabled {
3498 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
3499 }
3500
3501 casgstatus(gp, _Grunning, _Gwaiting)
3502 dropg()
3503
3504 if fn := _g_.m.waitunlockf; fn != nil {
3505 ok := fn(gp, _g_.m.waitlock)
3506 _g_.m.waitunlockf = nil
3507 _g_.m.waitlock = nil
3508 if !ok {
3509 if trace.enabled {
3510 traceGoUnpark(gp, 2)
3511 }
3512 casgstatus(gp, _Gwaiting, _Grunnable)
3513 execute(gp, true)
3514 }
3515 }
3516 schedule()
3517 }
3518
3519 func goschedImpl(gp *g) {
3520 status := readgstatus(gp)
3521 if status&^_Gscan != _Grunning {
3522 dumpgstatus(gp)
3523 throw("bad g status")
3524 }
3525 casgstatus(gp, _Grunning, _Grunnable)
3526 dropg()
3527 lock(&sched.lock)
3528 globrunqput(gp)
3529 unlock(&sched.lock)
3530
3531 schedule()
3532 }
3533
3534
3535 func gosched_m(gp *g) {
3536 if trace.enabled {
3537 traceGoSched()
3538 }
3539 goschedImpl(gp)
3540 }
3541
3542
3543 func goschedguarded_m(gp *g) {
3544
3545 if !canPreemptM(gp.m) {
3546 gogo(&gp.sched)
3547 }
3548
3549 if trace.enabled {
3550 traceGoSched()
3551 }
3552 goschedImpl(gp)
3553 }
3554
3555 func gopreempt_m(gp *g) {
3556 if trace.enabled {
3557 traceGoPreempt()
3558 }
3559 goschedImpl(gp)
3560 }
3561
3562
3563
3564
3565 func preemptPark(gp *g) {
3566 if trace.enabled {
3567 traceGoPark(traceEvGoBlock, 0)
3568 }
3569 status := readgstatus(gp)
3570 if status&^_Gscan != _Grunning {
3571 dumpgstatus(gp)
3572 throw("bad g status")
3573 }
3574 gp.waitreason = waitReasonPreempted
3575
3576 if gp.asyncSafePoint {
3577
3578
3579
3580 f := findfunc(gp.sched.pc)
3581 if !f.valid() {
3582 throw("preempt at unknown pc")
3583 }
3584 if f.flag&funcFlag_SPWRITE != 0 {
3585 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
3586 throw("preempt SPWRITE")
3587 }
3588 }
3589
3590
3591
3592
3593
3594
3595
3596 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
3597 dropg()
3598 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
3599 schedule()
3600 }
3601
3602
3603
3604
3605 func goyield() {
3606 checkTimeouts()
3607 mcall(goyield_m)
3608 }
3609
3610 func goyield_m(gp *g) {
3611 if trace.enabled {
3612 traceGoPreempt()
3613 }
3614 pp := gp.m.p.ptr()
3615 casgstatus(gp, _Grunning, _Grunnable)
3616 dropg()
3617 runqput(pp, gp, false)
3618 schedule()
3619 }
3620
3621
3622 func goexit1() {
3623 if raceenabled {
3624 racegoend()
3625 }
3626 if trace.enabled {
3627 traceGoEnd()
3628 }
3629 mcall(goexit0)
3630 }
3631
3632
3633 func goexit0(gp *g) {
3634 _g_ := getg()
3635
3636 casgstatus(gp, _Grunning, _Gdead)
3637 if isSystemGoroutine(gp, false) {
3638 atomic.Xadd(&sched.ngsys, -1)
3639 }
3640 gp.m = nil
3641 locked := gp.lockedm != 0
3642 gp.lockedm = 0
3643 _g_.m.lockedg = 0
3644 gp.preemptStop = false
3645 gp.paniconfault = false
3646 gp._defer = nil
3647 gp._panic = nil
3648 gp.writebuf = nil
3649 gp.waitreason = 0
3650 gp.param = nil
3651 gp.labels = nil
3652 gp.timer = nil
3653
3654 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
3655
3656
3657
3658 assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
3659 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
3660 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
3661 gp.gcAssistBytes = 0
3662 }
3663
3664 dropg()
3665
3666 if GOARCH == "wasm" {
3667 gfput(_g_.m.p.ptr(), gp)
3668 schedule()
3669 }
3670
3671 if _g_.m.lockedInt != 0 {
3672 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
3673 throw("internal lockOSThread error")
3674 }
3675 gfput(_g_.m.p.ptr(), gp)
3676 if locked {
3677
3678
3679
3680
3681
3682
3683 if GOOS != "plan9" {
3684 gogo(&_g_.m.g0.sched)
3685 } else {
3686
3687
3688 _g_.m.lockedExt = 0
3689 }
3690 }
3691 schedule()
3692 }
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702 func save(pc, sp uintptr) {
3703 _g_ := getg()
3704
3705 if _g_ == _g_.m.g0 || _g_ == _g_.m.gsignal {
3706
3707
3708
3709
3710
3711 throw("save on system g not allowed")
3712 }
3713
3714 _g_.sched.pc = pc
3715 _g_.sched.sp = sp
3716 _g_.sched.lr = 0
3717 _g_.sched.ret = 0
3718
3719
3720
3721 if _g_.sched.ctxt != nil {
3722 badctxt()
3723 }
3724 }
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763 func reentersyscall(pc, sp uintptr) {
3764 _g_ := getg()
3765
3766
3767
3768 _g_.m.locks++
3769
3770
3771
3772
3773
3774 _g_.stackguard0 = stackPreempt
3775 _g_.throwsplit = true
3776
3777
3778 save(pc, sp)
3779 _g_.syscallsp = sp
3780 _g_.syscallpc = pc
3781 casgstatus(_g_, _Grunning, _Gsyscall)
3782 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3783 systemstack(func() {
3784 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3785 throw("entersyscall")
3786 })
3787 }
3788
3789 if trace.enabled {
3790 systemstack(traceGoSysCall)
3791
3792
3793
3794 save(pc, sp)
3795 }
3796
3797 if atomic.Load(&sched.sysmonwait) != 0 {
3798 systemstack(entersyscall_sysmon)
3799 save(pc, sp)
3800 }
3801
3802 if _g_.m.p.ptr().runSafePointFn != 0 {
3803
3804 systemstack(runSafePointFn)
3805 save(pc, sp)
3806 }
3807
3808 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3809 _g_.sysblocktraced = true
3810 pp := _g_.m.p.ptr()
3811 pp.m = 0
3812 _g_.m.oldp.set(pp)
3813 _g_.m.p = 0
3814 atomic.Store(&pp.status, _Psyscall)
3815 if sched.gcwaiting != 0 {
3816 systemstack(entersyscall_gcwait)
3817 save(pc, sp)
3818 }
3819
3820 _g_.m.locks--
3821 }
3822
3823
3824
3825
3826
3827
3828
3829 func entersyscall() {
3830 reentersyscall(getcallerpc(), getcallersp())
3831 }
3832
3833 func entersyscall_sysmon() {
3834 lock(&sched.lock)
3835 if atomic.Load(&sched.sysmonwait) != 0 {
3836 atomic.Store(&sched.sysmonwait, 0)
3837 notewakeup(&sched.sysmonnote)
3838 }
3839 unlock(&sched.lock)
3840 }
3841
3842 func entersyscall_gcwait() {
3843 _g_ := getg()
3844 _p_ := _g_.m.oldp.ptr()
3845
3846 lock(&sched.lock)
3847 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
3848 if trace.enabled {
3849 traceGoSysBlock(_p_)
3850 traceProcStop(_p_)
3851 }
3852 _p_.syscalltick++
3853 if sched.stopwait--; sched.stopwait == 0 {
3854 notewakeup(&sched.stopnote)
3855 }
3856 }
3857 unlock(&sched.lock)
3858 }
3859
3860
3861
3862 func entersyscallblock() {
3863 _g_ := getg()
3864
3865 _g_.m.locks++
3866 _g_.throwsplit = true
3867 _g_.stackguard0 = stackPreempt
3868 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3869 _g_.sysblocktraced = true
3870 _g_.m.p.ptr().syscalltick++
3871
3872
3873 pc := getcallerpc()
3874 sp := getcallersp()
3875 save(pc, sp)
3876 _g_.syscallsp = _g_.sched.sp
3877 _g_.syscallpc = _g_.sched.pc
3878 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3879 sp1 := sp
3880 sp2 := _g_.sched.sp
3881 sp3 := _g_.syscallsp
3882 systemstack(func() {
3883 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3884 throw("entersyscallblock")
3885 })
3886 }
3887 casgstatus(_g_, _Grunning, _Gsyscall)
3888 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3889 systemstack(func() {
3890 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3891 throw("entersyscallblock")
3892 })
3893 }
3894
3895 systemstack(entersyscallblock_handoff)
3896
3897
3898 save(getcallerpc(), getcallersp())
3899
3900 _g_.m.locks--
3901 }
3902
3903 func entersyscallblock_handoff() {
3904 if trace.enabled {
3905 traceGoSysCall()
3906 traceGoSysBlock(getg().m.p.ptr())
3907 }
3908 handoffp(releasep())
3909 }
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923 func exitsyscall() {
3924 _g_ := getg()
3925
3926 _g_.m.locks++
3927 if getcallersp() > _g_.syscallsp {
3928 throw("exitsyscall: syscall frame is no longer valid")
3929 }
3930
3931 _g_.waitsince = 0
3932 oldp := _g_.m.oldp.ptr()
3933 _g_.m.oldp = 0
3934 if exitsyscallfast(oldp) {
3935 if trace.enabled {
3936 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3937 systemstack(traceGoStart)
3938 }
3939 }
3940
3941 _g_.m.p.ptr().syscalltick++
3942
3943 casgstatus(_g_, _Gsyscall, _Grunning)
3944
3945
3946
3947 _g_.syscallsp = 0
3948 _g_.m.locks--
3949 if _g_.preempt {
3950
3951 _g_.stackguard0 = stackPreempt
3952 } else {
3953
3954 _g_.stackguard0 = _g_.stack.lo + _StackGuard
3955 }
3956 _g_.throwsplit = false
3957
3958 if sched.disable.user && !schedEnabled(_g_) {
3959
3960 Gosched()
3961 }
3962
3963 return
3964 }
3965
3966 _g_.sysexitticks = 0
3967 if trace.enabled {
3968
3969
3970 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
3971 osyield()
3972 }
3973
3974
3975
3976
3977 _g_.sysexitticks = cputicks()
3978 }
3979
3980 _g_.m.locks--
3981
3982
3983 mcall(exitsyscall0)
3984
3985
3986
3987
3988
3989
3990
3991 _g_.syscallsp = 0
3992 _g_.m.p.ptr().syscalltick++
3993 _g_.throwsplit = false
3994 }
3995
3996
3997 func exitsyscallfast(oldp *p) bool {
3998 _g_ := getg()
3999
4000
4001 if sched.stopwait == freezeStopWait {
4002 return false
4003 }
4004
4005
4006 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4007
4008 wirep(oldp)
4009 exitsyscallfast_reacquired()
4010 return true
4011 }
4012
4013
4014 if sched.pidle != 0 {
4015 var ok bool
4016 systemstack(func() {
4017 ok = exitsyscallfast_pidle()
4018 if ok && trace.enabled {
4019 if oldp != nil {
4020
4021
4022 for oldp.syscalltick == _g_.m.syscalltick {
4023 osyield()
4024 }
4025 }
4026 traceGoSysExit(0)
4027 }
4028 })
4029 if ok {
4030 return true
4031 }
4032 }
4033 return false
4034 }
4035
4036
4037
4038
4039
4040
4041 func exitsyscallfast_reacquired() {
4042 _g_ := getg()
4043 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
4044 if trace.enabled {
4045
4046
4047
4048 systemstack(func() {
4049
4050 traceGoSysBlock(_g_.m.p.ptr())
4051
4052 traceGoSysExit(0)
4053 })
4054 }
4055 _g_.m.p.ptr().syscalltick++
4056 }
4057 }
4058
4059 func exitsyscallfast_pidle() bool {
4060 lock(&sched.lock)
4061 _p_ := pidleget()
4062 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
4063 atomic.Store(&sched.sysmonwait, 0)
4064 notewakeup(&sched.sysmonnote)
4065 }
4066 unlock(&sched.lock)
4067 if _p_ != nil {
4068 acquirep(_p_)
4069 return true
4070 }
4071 return false
4072 }
4073
4074
4075
4076
4077
4078
4079
4080 func exitsyscall0(gp *g) {
4081 casgstatus(gp, _Gsyscall, _Grunnable)
4082 dropg()
4083 lock(&sched.lock)
4084 var _p_ *p
4085 if schedEnabled(gp) {
4086 _p_ = pidleget()
4087 }
4088 var locked bool
4089 if _p_ == nil {
4090 globrunqput(gp)
4091
4092
4093
4094
4095
4096
4097 locked = gp.lockedm != 0
4098 } else if atomic.Load(&sched.sysmonwait) != 0 {
4099 atomic.Store(&sched.sysmonwait, 0)
4100 notewakeup(&sched.sysmonnote)
4101 }
4102 unlock(&sched.lock)
4103 if _p_ != nil {
4104 acquirep(_p_)
4105 execute(gp, false)
4106 }
4107 if locked {
4108
4109
4110
4111
4112 stoplockedm()
4113 execute(gp, false)
4114 }
4115 stopm()
4116 schedule()
4117 }
4118
4119 func beforefork() {
4120 gp := getg().m.curg
4121
4122
4123
4124
4125 gp.m.locks++
4126 sigsave(&gp.m.sigmask)
4127 sigblock(false)
4128
4129
4130
4131
4132
4133 gp.stackguard0 = stackFork
4134 }
4135
4136
4137
4138
4139 func syscall_runtime_BeforeFork() {
4140 systemstack(beforefork)
4141 }
4142
4143 func afterfork() {
4144 gp := getg().m.curg
4145
4146
4147 gp.stackguard0 = gp.stack.lo + _StackGuard
4148
4149 msigrestore(gp.m.sigmask)
4150
4151 gp.m.locks--
4152 }
4153
4154
4155
4156
4157 func syscall_runtime_AfterFork() {
4158 systemstack(afterfork)
4159 }
4160
4161
4162
4163 var inForkedChild bool
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176 func syscall_runtime_AfterForkInChild() {
4177
4178
4179
4180
4181 inForkedChild = true
4182
4183 clearSignalHandlers()
4184
4185
4186
4187 msigrestore(getg().m.sigmask)
4188
4189 inForkedChild = false
4190 }
4191
4192
4193
4194
4195 var pendingPreemptSignals uint32
4196
4197
4198
4199 func syscall_runtime_BeforeExec() {
4200
4201 execLock.lock()
4202
4203
4204
4205 if GOOS == "darwin" || GOOS == "ios" {
4206 for int32(atomic.Load(&pendingPreemptSignals)) > 0 {
4207 osyield()
4208 }
4209 }
4210 }
4211
4212
4213
4214 func syscall_runtime_AfterExec() {
4215 execLock.unlock()
4216 }
4217
4218
4219 func malg(stacksize int32) *g {
4220 newg := new(g)
4221 if stacksize >= 0 {
4222 stacksize = round2(_StackSystem + stacksize)
4223 systemstack(func() {
4224 newg.stack = stackalloc(uint32(stacksize))
4225 })
4226 newg.stackguard0 = newg.stack.lo + _StackGuard
4227 newg.stackguard1 = ^uintptr(0)
4228
4229
4230 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4231 }
4232 return newg
4233 }
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250 func newproc(siz int32, fn *funcval) {
4251 argp := add(unsafe.Pointer(&fn), sys.PtrSize)
4252 gp := getg()
4253 pc := getcallerpc()
4254 systemstack(func() {
4255 newg := newproc1(fn, argp, siz, gp, pc)
4256
4257 _p_ := getg().m.p.ptr()
4258 runqput(_p_, newg, true)
4259
4260 if mainStarted {
4261 wakep()
4262 }
4263 })
4264 }
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275 func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) *g {
4276 if goexperiment.RegabiDefer && narg != 0 {
4277
4278
4279
4280
4281 throw("go with non-empty frame")
4282 }
4283
4284 _g_ := getg()
4285
4286 if fn == nil {
4287 _g_.m.throwing = -1
4288 throw("go of nil func value")
4289 }
4290 acquirem()
4291 siz := narg
4292 siz = (siz + 7) &^ 7
4293
4294
4295
4296
4297
4298 if siz >= _StackMin-4*sys.PtrSize-sys.PtrSize {
4299 throw("newproc: function arguments too large for new goroutine")
4300 }
4301
4302 _p_ := _g_.m.p.ptr()
4303 newg := gfget(_p_)
4304 if newg == nil {
4305 newg = malg(_StackMin)
4306 casgstatus(newg, _Gidle, _Gdead)
4307 allgadd(newg)
4308 }
4309 if newg.stack.hi == 0 {
4310 throw("newproc1: newg missing stack")
4311 }
4312
4313 if readgstatus(newg) != _Gdead {
4314 throw("newproc1: new g is not Gdead")
4315 }
4316
4317 totalSize := 4*sys.PtrSize + uintptr(siz) + sys.MinFrameSize
4318 totalSize += -totalSize & (sys.StackAlign - 1)
4319 sp := newg.stack.hi - totalSize
4320 spArg := sp
4321 if usesLR {
4322
4323 *(*uintptr)(unsafe.Pointer(sp)) = 0
4324 prepGoExitFrame(sp)
4325 spArg += sys.MinFrameSize
4326 }
4327 if narg > 0 {
4328 memmove(unsafe.Pointer(spArg), argp, uintptr(narg))
4329
4330
4331
4332
4333
4334
4335 if writeBarrier.needed && !_g_.m.curg.gcscandone {
4336 f := findfunc(fn.fn)
4337 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
4338 if stkmap.nbit > 0 {
4339
4340 bv := stackmapdata(stkmap, 0)
4341 bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)
4342 }
4343 }
4344 }
4345
4346 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
4347 newg.sched.sp = sp
4348 newg.stktopsp = sp
4349 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
4350 newg.sched.g = guintptr(unsafe.Pointer(newg))
4351 gostartcallfn(&newg.sched, fn)
4352 newg.gopc = callerpc
4353 newg.ancestors = saveAncestors(callergp)
4354 newg.startpc = fn.fn
4355 if _g_.m.curg != nil {
4356 newg.labels = _g_.m.curg.labels
4357 }
4358 if isSystemGoroutine(newg, false) {
4359 atomic.Xadd(&sched.ngsys, +1)
4360 }
4361
4362 newg.trackingSeq = uint8(fastrand())
4363 if newg.trackingSeq%gTrackingPeriod == 0 {
4364 newg.tracking = true
4365 }
4366 casgstatus(newg, _Gdead, _Grunnable)
4367
4368 if _p_.goidcache == _p_.goidcacheend {
4369
4370
4371
4372 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
4373 _p_.goidcache -= _GoidCacheBatch - 1
4374 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
4375 }
4376 newg.goid = int64(_p_.goidcache)
4377 _p_.goidcache++
4378 if raceenabled {
4379 newg.racectx = racegostart(callerpc)
4380 }
4381 if trace.enabled {
4382 traceGoCreate(newg, newg.startpc)
4383 }
4384 releasem(_g_.m)
4385
4386 return newg
4387 }
4388
4389
4390
4391
4392 func saveAncestors(callergp *g) *[]ancestorInfo {
4393
4394 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
4395 return nil
4396 }
4397 var callerAncestors []ancestorInfo
4398 if callergp.ancestors != nil {
4399 callerAncestors = *callergp.ancestors
4400 }
4401 n := int32(len(callerAncestors)) + 1
4402 if n > debug.tracebackancestors {
4403 n = debug.tracebackancestors
4404 }
4405 ancestors := make([]ancestorInfo, n)
4406 copy(ancestors[1:], callerAncestors)
4407
4408 var pcs [_TracebackMaxFrames]uintptr
4409 npcs := gcallers(callergp, 0, pcs[:])
4410 ipcs := make([]uintptr, npcs)
4411 copy(ipcs, pcs[:])
4412 ancestors[0] = ancestorInfo{
4413 pcs: ipcs,
4414 goid: callergp.goid,
4415 gopc: callergp.gopc,
4416 }
4417
4418 ancestorsp := new([]ancestorInfo)
4419 *ancestorsp = ancestors
4420 return ancestorsp
4421 }
4422
4423
4424
4425 func gfput(_p_ *p, gp *g) {
4426 if readgstatus(gp) != _Gdead {
4427 throw("gfput: bad status (not Gdead)")
4428 }
4429
4430 stksize := gp.stack.hi - gp.stack.lo
4431
4432 if stksize != _FixedStack {
4433
4434 stackfree(gp.stack)
4435 gp.stack.lo = 0
4436 gp.stack.hi = 0
4437 gp.stackguard0 = 0
4438 }
4439
4440 _p_.gFree.push(gp)
4441 _p_.gFree.n++
4442 if _p_.gFree.n >= 64 {
4443 var (
4444 inc int32
4445 stackQ gQueue
4446 noStackQ gQueue
4447 )
4448 for _p_.gFree.n >= 32 {
4449 gp = _p_.gFree.pop()
4450 _p_.gFree.n--
4451 if gp.stack.lo == 0 {
4452 noStackQ.push(gp)
4453 } else {
4454 stackQ.push(gp)
4455 }
4456 inc++
4457 }
4458 lock(&sched.gFree.lock)
4459 sched.gFree.noStack.pushAll(noStackQ)
4460 sched.gFree.stack.pushAll(stackQ)
4461 sched.gFree.n += inc
4462 unlock(&sched.gFree.lock)
4463 }
4464 }
4465
4466
4467
4468 func gfget(_p_ *p) *g {
4469 retry:
4470 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
4471 lock(&sched.gFree.lock)
4472
4473 for _p_.gFree.n < 32 {
4474
4475 gp := sched.gFree.stack.pop()
4476 if gp == nil {
4477 gp = sched.gFree.noStack.pop()
4478 if gp == nil {
4479 break
4480 }
4481 }
4482 sched.gFree.n--
4483 _p_.gFree.push(gp)
4484 _p_.gFree.n++
4485 }
4486 unlock(&sched.gFree.lock)
4487 goto retry
4488 }
4489 gp := _p_.gFree.pop()
4490 if gp == nil {
4491 return nil
4492 }
4493 _p_.gFree.n--
4494 if gp.stack.lo == 0 {
4495
4496 systemstack(func() {
4497 gp.stack = stackalloc(_FixedStack)
4498 })
4499 gp.stackguard0 = gp.stack.lo + _StackGuard
4500 } else {
4501 if raceenabled {
4502 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4503 }
4504 if msanenabled {
4505 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4506 }
4507 }
4508 return gp
4509 }
4510
4511
4512 func gfpurge(_p_ *p) {
4513 var (
4514 inc int32
4515 stackQ gQueue
4516 noStackQ gQueue
4517 )
4518 for !_p_.gFree.empty() {
4519 gp := _p_.gFree.pop()
4520 _p_.gFree.n--
4521 if gp.stack.lo == 0 {
4522 noStackQ.push(gp)
4523 } else {
4524 stackQ.push(gp)
4525 }
4526 inc++
4527 }
4528 lock(&sched.gFree.lock)
4529 sched.gFree.noStack.pushAll(noStackQ)
4530 sched.gFree.stack.pushAll(stackQ)
4531 sched.gFree.n += inc
4532 unlock(&sched.gFree.lock)
4533 }
4534
4535
4536 func Breakpoint() {
4537 breakpoint()
4538 }
4539
4540
4541
4542
4543
4544 func dolockOSThread() {
4545 if GOARCH == "wasm" {
4546 return
4547 }
4548 _g_ := getg()
4549 _g_.m.lockedg.set(_g_)
4550 _g_.lockedm.set(_g_.m)
4551 }
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569 func LockOSThread() {
4570 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
4571
4572
4573
4574 startTemplateThread()
4575 }
4576 _g_ := getg()
4577 _g_.m.lockedExt++
4578 if _g_.m.lockedExt == 0 {
4579 _g_.m.lockedExt--
4580 panic("LockOSThread nesting overflow")
4581 }
4582 dolockOSThread()
4583 }
4584
4585
4586 func lockOSThread() {
4587 getg().m.lockedInt++
4588 dolockOSThread()
4589 }
4590
4591
4592
4593
4594
4595 func dounlockOSThread() {
4596 if GOARCH == "wasm" {
4597 return
4598 }
4599 _g_ := getg()
4600 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
4601 return
4602 }
4603 _g_.m.lockedg = 0
4604 _g_.lockedm = 0
4605 }
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621 func UnlockOSThread() {
4622 _g_ := getg()
4623 if _g_.m.lockedExt == 0 {
4624 return
4625 }
4626 _g_.m.lockedExt--
4627 dounlockOSThread()
4628 }
4629
4630
4631 func unlockOSThread() {
4632 _g_ := getg()
4633 if _g_.m.lockedInt == 0 {
4634 systemstack(badunlockosthread)
4635 }
4636 _g_.m.lockedInt--
4637 dounlockOSThread()
4638 }
4639
4640 func badunlockosthread() {
4641 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
4642 }
4643
4644 func gcount() int32 {
4645 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
4646 for _, _p_ := range allp {
4647 n -= _p_.gFree.n
4648 }
4649
4650
4651
4652 if n < 1 {
4653 n = 1
4654 }
4655 return n
4656 }
4657
4658 func mcount() int32 {
4659 return int32(sched.mnext - sched.nmfreed)
4660 }
4661
4662 var prof struct {
4663 signalLock uint32
4664 hz int32
4665 }
4666
4667 func _System() { _System() }
4668 func _ExternalCode() { _ExternalCode() }
4669 func _LostExternalCode() { _LostExternalCode() }
4670 func _GC() { _GC() }
4671 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
4672 func _VDSO() { _VDSO() }
4673
4674
4675
4676
4677 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
4678 if prof.hz == 0 {
4679 return
4680 }
4681
4682
4683
4684
4685 if mp != nil && mp.profilehz == 0 {
4686 return
4687 }
4688
4689
4690
4691
4692
4693
4694
4695 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
4696 if f := findfunc(pc); f.valid() {
4697 if hasPrefix(funcname(f), "runtime/internal/atomic") {
4698 cpuprof.lostAtomic++
4699 return
4700 }
4701 }
4702 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
4703
4704
4705
4706 cpuprof.lostAtomic++
4707 return
4708 }
4709 }
4710
4711
4712
4713
4714
4715
4716
4717 getg().m.mallocing++
4718
4719 var stk [maxCPUProfStack]uintptr
4720 n := 0
4721 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
4722 cgoOff := 0
4723
4724
4725
4726
4727
4728 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
4729 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
4730 cgoOff++
4731 }
4732 copy(stk[:], mp.cgoCallers[:cgoOff])
4733 mp.cgoCallers[0] = 0
4734 }
4735
4736
4737 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
4738 if n > 0 {
4739 n += cgoOff
4740 }
4741 } else {
4742 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4743 }
4744
4745 if n <= 0 {
4746
4747
4748 n = 0
4749 if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
4750
4751
4752 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
4753 }
4754 if n == 0 && mp != nil && mp.vdsoSP != 0 {
4755 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4756 }
4757 if n == 0 {
4758
4759 n = 2
4760 if inVDSOPage(pc) {
4761 pc = funcPC(_VDSO) + sys.PCQuantum
4762 } else if pc > firstmoduledata.etext {
4763
4764 pc = funcPC(_ExternalCode) + sys.PCQuantum
4765 }
4766 stk[0] = pc
4767 if mp.preemptoff != "" {
4768 stk[1] = funcPC(_GC) + sys.PCQuantum
4769 } else {
4770 stk[1] = funcPC(_System) + sys.PCQuantum
4771 }
4772 }
4773 }
4774
4775 if prof.hz != 0 {
4776 cpuprof.add(gp, stk[:n])
4777 }
4778 getg().m.mallocing--
4779 }
4780
4781
4782
4783
4784 var sigprofCallers cgoCallers
4785 var sigprofCallersUse uint32
4786
4787
4788
4789
4790
4791
4792
4793 func sigprofNonGo() {
4794 if prof.hz != 0 {
4795 n := 0
4796 for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
4797 n++
4798 }
4799 cpuprof.addNonGo(sigprofCallers[:n])
4800 }
4801
4802 atomic.Store(&sigprofCallersUse, 0)
4803 }
4804
4805
4806
4807
4808
4809
4810 func sigprofNonGoPC(pc uintptr) {
4811 if prof.hz != 0 {
4812 stk := []uintptr{
4813 pc,
4814 funcPC(_ExternalCode) + sys.PCQuantum,
4815 }
4816 cpuprof.addNonGo(stk)
4817 }
4818 }
4819
4820
4821
4822 func setcpuprofilerate(hz int32) {
4823
4824 if hz < 0 {
4825 hz = 0
4826 }
4827
4828
4829
4830 _g_ := getg()
4831 _g_.m.locks++
4832
4833
4834
4835
4836 setThreadCPUProfiler(0)
4837
4838 for !atomic.Cas(&prof.signalLock, 0, 1) {
4839 osyield()
4840 }
4841 if prof.hz != hz {
4842 setProcessCPUProfiler(hz)
4843 prof.hz = hz
4844 }
4845 atomic.Store(&prof.signalLock, 0)
4846
4847 lock(&sched.lock)
4848 sched.profilehz = hz
4849 unlock(&sched.lock)
4850
4851 if hz != 0 {
4852 setThreadCPUProfiler(hz)
4853 }
4854
4855 _g_.m.locks--
4856 }
4857
4858
4859
4860 func (pp *p) init(id int32) {
4861 pp.id = id
4862 pp.status = _Pgcstop
4863 pp.sudogcache = pp.sudogbuf[:0]
4864 for i := range pp.deferpool {
4865 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
4866 }
4867 pp.wbBuf.reset()
4868 if pp.mcache == nil {
4869 if id == 0 {
4870 if mcache0 == nil {
4871 throw("missing mcache?")
4872 }
4873
4874
4875 pp.mcache = mcache0
4876 } else {
4877 pp.mcache = allocmcache()
4878 }
4879 }
4880 if raceenabled && pp.raceprocctx == 0 {
4881 if id == 0 {
4882 pp.raceprocctx = raceprocctx0
4883 raceprocctx0 = 0
4884 } else {
4885 pp.raceprocctx = raceproccreate()
4886 }
4887 }
4888 lockInit(&pp.timersLock, lockRankTimers)
4889
4890
4891
4892 timerpMask.set(id)
4893
4894
4895 idlepMask.clear(id)
4896 }
4897
4898
4899
4900
4901
4902 func (pp *p) destroy() {
4903 assertLockHeld(&sched.lock)
4904 assertWorldStopped()
4905
4906
4907 for pp.runqhead != pp.runqtail {
4908
4909 pp.runqtail--
4910 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
4911
4912 globrunqputhead(gp)
4913 }
4914 if pp.runnext != 0 {
4915 globrunqputhead(pp.runnext.ptr())
4916 pp.runnext = 0
4917 }
4918 if len(pp.timers) > 0 {
4919 plocal := getg().m.p.ptr()
4920
4921
4922
4923
4924 lock(&plocal.timersLock)
4925 lock(&pp.timersLock)
4926 moveTimers(plocal, pp.timers)
4927 pp.timers = nil
4928 pp.numTimers = 0
4929 pp.deletedTimers = 0
4930 atomic.Store64(&pp.timer0When, 0)
4931 unlock(&pp.timersLock)
4932 unlock(&plocal.timersLock)
4933 }
4934
4935 if gcphase != _GCoff {
4936 wbBufFlush1(pp)
4937 pp.gcw.dispose()
4938 }
4939 for i := range pp.sudogbuf {
4940 pp.sudogbuf[i] = nil
4941 }
4942 pp.sudogcache = pp.sudogbuf[:0]
4943 for i := range pp.deferpool {
4944 for j := range pp.deferpoolbuf[i] {
4945 pp.deferpoolbuf[i][j] = nil
4946 }
4947 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
4948 }
4949 systemstack(func() {
4950 for i := 0; i < pp.mspancache.len; i++ {
4951
4952 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
4953 }
4954 pp.mspancache.len = 0
4955 lock(&mheap_.lock)
4956 pp.pcache.flush(&mheap_.pages)
4957 unlock(&mheap_.lock)
4958 })
4959 freemcache(pp.mcache)
4960 pp.mcache = nil
4961 gfpurge(pp)
4962 traceProcFree(pp)
4963 if raceenabled {
4964 if pp.timerRaceCtx != 0 {
4965
4966
4967
4968
4969
4970 mp := getg().m
4971 phold := mp.p.ptr()
4972 mp.p.set(pp)
4973
4974 racectxend(pp.timerRaceCtx)
4975 pp.timerRaceCtx = 0
4976
4977 mp.p.set(phold)
4978 }
4979 raceprocdestroy(pp.raceprocctx)
4980 pp.raceprocctx = 0
4981 }
4982 pp.gcAssistTime = 0
4983 pp.status = _Pdead
4984 }
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994 func procresize(nprocs int32) *p {
4995 assertLockHeld(&sched.lock)
4996 assertWorldStopped()
4997
4998 old := gomaxprocs
4999 if old < 0 || nprocs <= 0 {
5000 throw("procresize: invalid arg")
5001 }
5002 if trace.enabled {
5003 traceGomaxprocs(nprocs)
5004 }
5005
5006
5007 now := nanotime()
5008 if sched.procresizetime != 0 {
5009 sched.totaltime += int64(old) * (now - sched.procresizetime)
5010 }
5011 sched.procresizetime = now
5012
5013 maskWords := (nprocs + 31) / 32
5014
5015
5016 if nprocs > int32(len(allp)) {
5017
5018
5019 lock(&allpLock)
5020 if nprocs <= int32(cap(allp)) {
5021 allp = allp[:nprocs]
5022 } else {
5023 nallp := make([]*p, nprocs)
5024
5025
5026 copy(nallp, allp[:cap(allp)])
5027 allp = nallp
5028 }
5029
5030 if maskWords <= int32(cap(idlepMask)) {
5031 idlepMask = idlepMask[:maskWords]
5032 timerpMask = timerpMask[:maskWords]
5033 } else {
5034 nidlepMask := make([]uint32, maskWords)
5035
5036 copy(nidlepMask, idlepMask)
5037 idlepMask = nidlepMask
5038
5039 ntimerpMask := make([]uint32, maskWords)
5040 copy(ntimerpMask, timerpMask)
5041 timerpMask = ntimerpMask
5042 }
5043 unlock(&allpLock)
5044 }
5045
5046
5047 for i := old; i < nprocs; i++ {
5048 pp := allp[i]
5049 if pp == nil {
5050 pp = new(p)
5051 }
5052 pp.init(i)
5053 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5054 }
5055
5056 _g_ := getg()
5057 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
5058
5059 _g_.m.p.ptr().status = _Prunning
5060 _g_.m.p.ptr().mcache.prepareForSweep()
5061 } else {
5062
5063
5064
5065
5066
5067 if _g_.m.p != 0 {
5068 if trace.enabled {
5069
5070
5071
5072 traceGoSched()
5073 traceProcStop(_g_.m.p.ptr())
5074 }
5075 _g_.m.p.ptr().m = 0
5076 }
5077 _g_.m.p = 0
5078 p := allp[0]
5079 p.m = 0
5080 p.status = _Pidle
5081 acquirep(p)
5082 if trace.enabled {
5083 traceGoStart()
5084 }
5085 }
5086
5087
5088 mcache0 = nil
5089
5090
5091 for i := nprocs; i < old; i++ {
5092 p := allp[i]
5093 p.destroy()
5094
5095 }
5096
5097
5098 if int32(len(allp)) != nprocs {
5099 lock(&allpLock)
5100 allp = allp[:nprocs]
5101 idlepMask = idlepMask[:maskWords]
5102 timerpMask = timerpMask[:maskWords]
5103 unlock(&allpLock)
5104 }
5105
5106 var runnablePs *p
5107 for i := nprocs - 1; i >= 0; i-- {
5108 p := allp[i]
5109 if _g_.m.p.ptr() == p {
5110 continue
5111 }
5112 p.status = _Pidle
5113 if runqempty(p) {
5114 pidleput(p)
5115 } else {
5116 p.m.set(mget())
5117 p.link.set(runnablePs)
5118 runnablePs = p
5119 }
5120 }
5121 stealOrder.reset(uint32(nprocs))
5122 var int32p *int32 = &gomaxprocs
5123 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5124 return runnablePs
5125 }
5126
5127
5128
5129
5130
5131
5132
5133 func acquirep(_p_ *p) {
5134
5135 wirep(_p_)
5136
5137
5138
5139
5140
5141 _p_.mcache.prepareForSweep()
5142
5143 if trace.enabled {
5144 traceProcStart()
5145 }
5146 }
5147
5148
5149
5150
5151
5152
5153
5154 func wirep(_p_ *p) {
5155 _g_ := getg()
5156
5157 if _g_.m.p != 0 {
5158 throw("wirep: already in go")
5159 }
5160 if _p_.m != 0 || _p_.status != _Pidle {
5161 id := int64(0)
5162 if _p_.m != 0 {
5163 id = _p_.m.ptr().id
5164 }
5165 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
5166 throw("wirep: invalid p state")
5167 }
5168 _g_.m.p.set(_p_)
5169 _p_.m.set(_g_.m)
5170 _p_.status = _Prunning
5171 }
5172
5173
5174 func releasep() *p {
5175 _g_ := getg()
5176
5177 if _g_.m.p == 0 {
5178 throw("releasep: invalid arg")
5179 }
5180 _p_ := _g_.m.p.ptr()
5181 if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
5182 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
5183 throw("releasep: invalid p state")
5184 }
5185 if trace.enabled {
5186 traceProcStop(_g_.m.p.ptr())
5187 }
5188 _g_.m.p = 0
5189 _p_.m = 0
5190 _p_.status = _Pidle
5191 return _p_
5192 }
5193
5194 func incidlelocked(v int32) {
5195 lock(&sched.lock)
5196 sched.nmidlelocked += v
5197 if v > 0 {
5198 checkdead()
5199 }
5200 unlock(&sched.lock)
5201 }
5202
5203
5204
5205
5206 func checkdead() {
5207 assertLockHeld(&sched.lock)
5208
5209
5210
5211
5212 if islibrary || isarchive {
5213 return
5214 }
5215
5216
5217
5218
5219
5220 if panicking > 0 {
5221 return
5222 }
5223
5224
5225
5226
5227
5228 var run0 int32
5229 if !iscgo && cgoHasExtraM {
5230 mp := lockextra(true)
5231 haveExtraM := extraMCount > 0
5232 unlockextra(mp)
5233 if haveExtraM {
5234 run0 = 1
5235 }
5236 }
5237
5238 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5239 if run > run0 {
5240 return
5241 }
5242 if run < 0 {
5243 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5244 throw("checkdead: inconsistent counts")
5245 }
5246
5247 grunning := 0
5248 forEachG(func(gp *g) {
5249 if isSystemGoroutine(gp, false) {
5250 return
5251 }
5252 s := readgstatus(gp)
5253 switch s &^ _Gscan {
5254 case _Gwaiting,
5255 _Gpreempted:
5256 grunning++
5257 case _Grunnable,
5258 _Grunning,
5259 _Gsyscall:
5260 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5261 throw("checkdead: runnable g")
5262 }
5263 })
5264 if grunning == 0 {
5265 unlock(&sched.lock)
5266 throw("no goroutines (main called runtime.Goexit) - deadlock!")
5267 }
5268
5269
5270 if faketime != 0 {
5271 when, _p_ := timeSleepUntil()
5272 if _p_ != nil {
5273 faketime = when
5274 for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
5275 if (*pp).ptr() == _p_ {
5276 *pp = _p_.link
5277 break
5278 }
5279 }
5280 mp := mget()
5281 if mp == nil {
5282
5283
5284 throw("checkdead: no m for timer")
5285 }
5286 mp.nextp.set(_p_)
5287 notewakeup(&mp.park)
5288 return
5289 }
5290 }
5291
5292
5293 for _, _p_ := range allp {
5294 if len(_p_.timers) > 0 {
5295 return
5296 }
5297 }
5298
5299 getg().m.throwing = -1
5300 unlock(&sched.lock)
5301 throw("all goroutines are asleep - deadlock!")
5302 }
5303
5304
5305
5306
5307
5308
5309 var forcegcperiod int64 = 2 * 60 * 1e9
5310
5311
5312
5313
5314 func sysmon() {
5315 lock(&sched.lock)
5316 sched.nmsys++
5317 checkdead()
5318 unlock(&sched.lock)
5319
5320
5321
5322 atomic.Store(&sched.sysmonStarting, 0)
5323
5324 lasttrace := int64(0)
5325 idle := 0
5326 delay := uint32(0)
5327
5328 for {
5329 if idle == 0 {
5330 delay = 20
5331 } else if idle > 50 {
5332 delay *= 2
5333 }
5334 if delay > 10*1000 {
5335 delay = 10 * 1000
5336 }
5337 usleep(delay)
5338 mDoFixup()
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355 now := nanotime()
5356 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
5357 lock(&sched.lock)
5358 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
5359 syscallWake := false
5360 next, _ := timeSleepUntil()
5361 if next > now {
5362 atomic.Store(&sched.sysmonwait, 1)
5363 unlock(&sched.lock)
5364
5365
5366 sleep := forcegcperiod / 2
5367 if next-now < sleep {
5368 sleep = next - now
5369 }
5370 shouldRelax := sleep >= osRelaxMinNS
5371 if shouldRelax {
5372 osRelax(true)
5373 }
5374 syscallWake = notetsleep(&sched.sysmonnote, sleep)
5375 mDoFixup()
5376 if shouldRelax {
5377 osRelax(false)
5378 }
5379 lock(&sched.lock)
5380 atomic.Store(&sched.sysmonwait, 0)
5381 noteclear(&sched.sysmonnote)
5382 }
5383 if syscallWake {
5384 idle = 0
5385 delay = 20
5386 }
5387 }
5388 unlock(&sched.lock)
5389 }
5390
5391 lock(&sched.sysmonlock)
5392
5393
5394 now = nanotime()
5395
5396
5397 if *cgo_yield != nil {
5398 asmcgocall(*cgo_yield, nil)
5399 }
5400
5401 lastpoll := int64(atomic.Load64(&sched.lastpoll))
5402 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
5403 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
5404 list := netpoll(0)
5405 if !list.empty() {
5406
5407
5408
5409
5410
5411
5412
5413 incidlelocked(-1)
5414 injectglist(&list)
5415 incidlelocked(1)
5416 }
5417 }
5418 mDoFixup()
5419 if GOOS == "netbsd" {
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435 if next, _ := timeSleepUntil(); next < now {
5436 startm(nil, false)
5437 }
5438 }
5439 if atomic.Load(&scavenge.sysmonWake) != 0 {
5440
5441 wakeScavenger()
5442 }
5443
5444
5445 if retake(now) != 0 {
5446 idle = 0
5447 } else {
5448 idle++
5449 }
5450
5451 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
5452 lock(&forcegc.lock)
5453 forcegc.idle = 0
5454 var list gList
5455 list.push(forcegc.g)
5456 injectglist(&list)
5457 unlock(&forcegc.lock)
5458 }
5459 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
5460 lasttrace = now
5461 schedtrace(debug.scheddetail > 0)
5462 }
5463 unlock(&sched.sysmonlock)
5464 }
5465 }
5466
5467 type sysmontick struct {
5468 schedtick uint32
5469 schedwhen int64
5470 syscalltick uint32
5471 syscallwhen int64
5472 }
5473
5474
5475
5476 const forcePreemptNS = 10 * 1000 * 1000
5477
5478 func retake(now int64) uint32 {
5479 n := 0
5480
5481
5482 lock(&allpLock)
5483
5484
5485
5486 for i := 0; i < len(allp); i++ {
5487 _p_ := allp[i]
5488 if _p_ == nil {
5489
5490
5491 continue
5492 }
5493 pd := &_p_.sysmontick
5494 s := _p_.status
5495 sysretake := false
5496 if s == _Prunning || s == _Psyscall {
5497
5498 t := int64(_p_.schedtick)
5499 if int64(pd.schedtick) != t {
5500 pd.schedtick = uint32(t)
5501 pd.schedwhen = now
5502 } else if pd.schedwhen+forcePreemptNS <= now {
5503 preemptone(_p_)
5504
5505
5506 sysretake = true
5507 }
5508 }
5509 if s == _Psyscall {
5510
5511 t := int64(_p_.syscalltick)
5512 if !sysretake && int64(pd.syscalltick) != t {
5513 pd.syscalltick = uint32(t)
5514 pd.syscallwhen = now
5515 continue
5516 }
5517
5518
5519
5520 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
5521 continue
5522 }
5523
5524 unlock(&allpLock)
5525
5526
5527
5528
5529 incidlelocked(-1)
5530 if atomic.Cas(&_p_.status, s, _Pidle) {
5531 if trace.enabled {
5532 traceGoSysBlock(_p_)
5533 traceProcStop(_p_)
5534 }
5535 n++
5536 _p_.syscalltick++
5537 handoffp(_p_)
5538 }
5539 incidlelocked(1)
5540 lock(&allpLock)
5541 }
5542 }
5543 unlock(&allpLock)
5544 return uint32(n)
5545 }
5546
5547
5548
5549
5550
5551
5552 func preemptall() bool {
5553 res := false
5554 for _, _p_ := range allp {
5555 if _p_.status != _Prunning {
5556 continue
5557 }
5558 if preemptone(_p_) {
5559 res = true
5560 }
5561 }
5562 return res
5563 }
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575 func preemptone(_p_ *p) bool {
5576 mp := _p_.m.ptr()
5577 if mp == nil || mp == getg().m {
5578 return false
5579 }
5580 gp := mp.curg
5581 if gp == nil || gp == mp.g0 {
5582 return false
5583 }
5584
5585 gp.preempt = true
5586
5587
5588
5589
5590
5591 gp.stackguard0 = stackPreempt
5592
5593
5594 if preemptMSupported && debug.asyncpreemptoff == 0 {
5595 _p_.preempt = true
5596 preemptM(mp)
5597 }
5598
5599 return true
5600 }
5601
5602 var starttime int64
5603
5604 func schedtrace(detailed bool) {
5605 now := nanotime()
5606 if starttime == 0 {
5607 starttime = now
5608 }
5609
5610 lock(&sched.lock)
5611 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
5612 if detailed {
5613 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
5614 }
5615
5616
5617
5618 for i, _p_ := range allp {
5619 mp := _p_.m.ptr()
5620 h := atomic.Load(&_p_.runqhead)
5621 t := atomic.Load(&_p_.runqtail)
5622 if detailed {
5623 id := int64(-1)
5624 if mp != nil {
5625 id = mp.id
5626 }
5627 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n")
5628 } else {
5629
5630
5631 print(" ")
5632 if i == 0 {
5633 print("[")
5634 }
5635 print(t - h)
5636 if i == len(allp)-1 {
5637 print("]\n")
5638 }
5639 }
5640 }
5641
5642 if !detailed {
5643 unlock(&sched.lock)
5644 return
5645 }
5646
5647 for mp := allm; mp != nil; mp = mp.alllink {
5648 _p_ := mp.p.ptr()
5649 gp := mp.curg
5650 lockedg := mp.lockedg.ptr()
5651 id1 := int32(-1)
5652 if _p_ != nil {
5653 id1 = _p_.id
5654 }
5655 id2 := int64(-1)
5656 if gp != nil {
5657 id2 = gp.goid
5658 }
5659 id3 := int64(-1)
5660 if lockedg != nil {
5661 id3 = lockedg.goid
5662 }
5663 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
5664 }
5665
5666 forEachG(func(gp *g) {
5667 mp := gp.m
5668 lockedm := gp.lockedm.ptr()
5669 id1 := int64(-1)
5670 if mp != nil {
5671 id1 = mp.id
5672 }
5673 id2 := int64(-1)
5674 if lockedm != nil {
5675 id2 = lockedm.id
5676 }
5677 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
5678 })
5679 unlock(&sched.lock)
5680 }
5681
5682
5683
5684
5685
5686
5687 func schedEnableUser(enable bool) {
5688 lock(&sched.lock)
5689 if sched.disable.user == !enable {
5690 unlock(&sched.lock)
5691 return
5692 }
5693 sched.disable.user = !enable
5694 if enable {
5695 n := sched.disable.n
5696 sched.disable.n = 0
5697 globrunqputbatch(&sched.disable.runnable, n)
5698 unlock(&sched.lock)
5699 for ; n != 0 && sched.npidle != 0; n-- {
5700 startm(nil, false)
5701 }
5702 } else {
5703 unlock(&sched.lock)
5704 }
5705 }
5706
5707
5708
5709
5710
5711 func schedEnabled(gp *g) bool {
5712 assertLockHeld(&sched.lock)
5713
5714 if sched.disable.user {
5715 return isSystemGoroutine(gp, true)
5716 }
5717 return true
5718 }
5719
5720
5721
5722
5723
5724 func mput(mp *m) {
5725 assertLockHeld(&sched.lock)
5726
5727 mp.schedlink = sched.midle
5728 sched.midle.set(mp)
5729 sched.nmidle++
5730 checkdead()
5731 }
5732
5733
5734
5735
5736
5737 func mget() *m {
5738 assertLockHeld(&sched.lock)
5739
5740 mp := sched.midle.ptr()
5741 if mp != nil {
5742 sched.midle = mp.schedlink
5743 sched.nmidle--
5744 }
5745 return mp
5746 }
5747
5748
5749
5750
5751
5752 func globrunqput(gp *g) {
5753 assertLockHeld(&sched.lock)
5754
5755 sched.runq.pushBack(gp)
5756 sched.runqsize++
5757 }
5758
5759
5760
5761
5762
5763 func globrunqputhead(gp *g) {
5764 assertLockHeld(&sched.lock)
5765
5766 sched.runq.push(gp)
5767 sched.runqsize++
5768 }
5769
5770
5771
5772
5773
5774
5775 func globrunqputbatch(batch *gQueue, n int32) {
5776 assertLockHeld(&sched.lock)
5777
5778 sched.runq.pushBackAll(*batch)
5779 sched.runqsize += n
5780 *batch = gQueue{}
5781 }
5782
5783
5784
5785 func globrunqget(_p_ *p, max int32) *g {
5786 assertLockHeld(&sched.lock)
5787
5788 if sched.runqsize == 0 {
5789 return nil
5790 }
5791
5792 n := sched.runqsize/gomaxprocs + 1
5793 if n > sched.runqsize {
5794 n = sched.runqsize
5795 }
5796 if max > 0 && n > max {
5797 n = max
5798 }
5799 if n > int32(len(_p_.runq))/2 {
5800 n = int32(len(_p_.runq)) / 2
5801 }
5802
5803 sched.runqsize -= n
5804
5805 gp := sched.runq.pop()
5806 n--
5807 for ; n > 0; n-- {
5808 gp1 := sched.runq.pop()
5809 runqput(_p_, gp1, false)
5810 }
5811 return gp
5812 }
5813
5814
5815 type pMask []uint32
5816
5817
5818 func (p pMask) read(id uint32) bool {
5819 word := id / 32
5820 mask := uint32(1) << (id % 32)
5821 return (atomic.Load(&p[word]) & mask) != 0
5822 }
5823
5824
5825 func (p pMask) set(id int32) {
5826 word := id / 32
5827 mask := uint32(1) << (id % 32)
5828 atomic.Or(&p[word], mask)
5829 }
5830
5831
5832 func (p pMask) clear(id int32) {
5833 word := id / 32
5834 mask := uint32(1) << (id % 32)
5835 atomic.And(&p[word], ^mask)
5836 }
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863 func updateTimerPMask(pp *p) {
5864 if atomic.Load(&pp.numTimers) > 0 {
5865 return
5866 }
5867
5868
5869
5870
5871 lock(&pp.timersLock)
5872 if atomic.Load(&pp.numTimers) == 0 {
5873 timerpMask.clear(pp.id)
5874 }
5875 unlock(&pp.timersLock)
5876 }
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887 func pidleput(_p_ *p) {
5888 assertLockHeld(&sched.lock)
5889
5890 if !runqempty(_p_) {
5891 throw("pidleput: P has non-empty run queue")
5892 }
5893 updateTimerPMask(_p_)
5894 idlepMask.set(_p_.id)
5895 _p_.link = sched.pidle
5896 sched.pidle.set(_p_)
5897 atomic.Xadd(&sched.npidle, 1)
5898 }
5899
5900
5901
5902
5903
5904
5905
5906 func pidleget() *p {
5907 assertLockHeld(&sched.lock)
5908
5909 _p_ := sched.pidle.ptr()
5910 if _p_ != nil {
5911
5912 timerpMask.set(_p_.id)
5913 idlepMask.clear(_p_.id)
5914 sched.pidle = _p_.link
5915 atomic.Xadd(&sched.npidle, -1)
5916 }
5917 return _p_
5918 }
5919
5920
5921
5922 func runqempty(_p_ *p) bool {
5923
5924
5925
5926
5927 for {
5928 head := atomic.Load(&_p_.runqhead)
5929 tail := atomic.Load(&_p_.runqtail)
5930 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
5931 if tail == atomic.Load(&_p_.runqtail) {
5932 return head == tail && runnext == 0
5933 }
5934 }
5935 }
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945
5946 const randomizeScheduler = raceenabled
5947
5948
5949
5950
5951
5952
5953 func runqput(_p_ *p, gp *g, next bool) {
5954 if randomizeScheduler && next && fastrand()%2 == 0 {
5955 next = false
5956 }
5957
5958 if next {
5959 retryNext:
5960 oldnext := _p_.runnext
5961 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
5962 goto retryNext
5963 }
5964 if oldnext == 0 {
5965 return
5966 }
5967
5968 gp = oldnext.ptr()
5969 }
5970
5971 retry:
5972 h := atomic.LoadAcq(&_p_.runqhead)
5973 t := _p_.runqtail
5974 if t-h < uint32(len(_p_.runq)) {
5975 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
5976 atomic.StoreRel(&_p_.runqtail, t+1)
5977 return
5978 }
5979 if runqputslow(_p_, gp, h, t) {
5980 return
5981 }
5982
5983 goto retry
5984 }
5985
5986
5987
5988 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
5989 var batch [len(_p_.runq)/2 + 1]*g
5990
5991
5992 n := t - h
5993 n = n / 2
5994 if n != uint32(len(_p_.runq)/2) {
5995 throw("runqputslow: queue is not full")
5996 }
5997 for i := uint32(0); i < n; i++ {
5998 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
5999 }
6000 if !atomic.CasRel(&_p_.runqhead, h, h+n) {
6001 return false
6002 }
6003 batch[n] = gp
6004
6005 if randomizeScheduler {
6006 for i := uint32(1); i <= n; i++ {
6007 j := fastrandn(i + 1)
6008 batch[i], batch[j] = batch[j], batch[i]
6009 }
6010 }
6011
6012
6013 for i := uint32(0); i < n; i++ {
6014 batch[i].schedlink.set(batch[i+1])
6015 }
6016 var q gQueue
6017 q.head.set(batch[0])
6018 q.tail.set(batch[n])
6019
6020
6021 lock(&sched.lock)
6022 globrunqputbatch(&q, int32(n+1))
6023 unlock(&sched.lock)
6024 return true
6025 }
6026
6027
6028
6029
6030
6031 func runqputbatch(pp *p, q *gQueue, qsize int) {
6032 h := atomic.LoadAcq(&pp.runqhead)
6033 t := pp.runqtail
6034 n := uint32(0)
6035 for !q.empty() && t-h < uint32(len(pp.runq)) {
6036 gp := q.pop()
6037 pp.runq[t%uint32(len(pp.runq))].set(gp)
6038 t++
6039 n++
6040 }
6041 qsize -= int(n)
6042
6043 if randomizeScheduler {
6044 off := func(o uint32) uint32 {
6045 return (pp.runqtail + o) % uint32(len(pp.runq))
6046 }
6047 for i := uint32(1); i < n; i++ {
6048 j := fastrandn(i + 1)
6049 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6050 }
6051 }
6052
6053 atomic.StoreRel(&pp.runqtail, t)
6054 if !q.empty() {
6055 lock(&sched.lock)
6056 globrunqputbatch(q, int32(qsize))
6057 unlock(&sched.lock)
6058 }
6059 }
6060
6061
6062
6063
6064
6065 func runqget(_p_ *p) (gp *g, inheritTime bool) {
6066
6067 for {
6068 next := _p_.runnext
6069 if next == 0 {
6070 break
6071 }
6072 if _p_.runnext.cas(next, 0) {
6073 return next.ptr(), true
6074 }
6075 }
6076
6077 for {
6078 h := atomic.LoadAcq(&_p_.runqhead)
6079 t := _p_.runqtail
6080 if t == h {
6081 return nil, false
6082 }
6083 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
6084 if atomic.CasRel(&_p_.runqhead, h, h+1) {
6085 return gp, false
6086 }
6087 }
6088 }
6089
6090
6091
6092 func runqdrain(_p_ *p) (drainQ gQueue, n uint32) {
6093 oldNext := _p_.runnext
6094 if oldNext != 0 && _p_.runnext.cas(oldNext, 0) {
6095 drainQ.pushBack(oldNext.ptr())
6096 n++
6097 }
6098
6099 retry:
6100 h := atomic.LoadAcq(&_p_.runqhead)
6101 t := _p_.runqtail
6102 qn := t - h
6103 if qn == 0 {
6104 return
6105 }
6106 if qn > uint32(len(_p_.runq)) {
6107 goto retry
6108 }
6109
6110 if !atomic.CasRel(&_p_.runqhead, h, h+qn) {
6111 goto retry
6112 }
6113
6114
6115
6116
6117
6118
6119
6120
6121 for i := uint32(0); i < qn; i++ {
6122 gp := _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
6123 drainQ.pushBack(gp)
6124 n++
6125 }
6126 return
6127 }
6128
6129
6130
6131
6132
6133 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6134 for {
6135 h := atomic.LoadAcq(&_p_.runqhead)
6136 t := atomic.LoadAcq(&_p_.runqtail)
6137 n := t - h
6138 n = n - n/2
6139 if n == 0 {
6140 if stealRunNextG {
6141
6142 if next := _p_.runnext; next != 0 {
6143 if _p_.status == _Prunning {
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154 if GOOS != "windows" {
6155 usleep(3)
6156 } else {
6157
6158
6159
6160 osyield()
6161 }
6162 }
6163 if !_p_.runnext.cas(next, 0) {
6164 continue
6165 }
6166 batch[batchHead%uint32(len(batch))] = next
6167 return 1
6168 }
6169 }
6170 return 0
6171 }
6172 if n > uint32(len(_p_.runq)/2) {
6173 continue
6174 }
6175 for i := uint32(0); i < n; i++ {
6176 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
6177 batch[(batchHead+i)%uint32(len(batch))] = g
6178 }
6179 if atomic.CasRel(&_p_.runqhead, h, h+n) {
6180 return n
6181 }
6182 }
6183 }
6184
6185
6186
6187
6188 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
6189 t := _p_.runqtail
6190 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
6191 if n == 0 {
6192 return nil
6193 }
6194 n--
6195 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
6196 if n == 0 {
6197 return gp
6198 }
6199 h := atomic.LoadAcq(&_p_.runqhead)
6200 if t-h+n >= uint32(len(_p_.runq)) {
6201 throw("runqsteal: runq overflow")
6202 }
6203 atomic.StoreRel(&_p_.runqtail, t+n)
6204 return gp
6205 }
6206
6207
6208
6209 type gQueue struct {
6210 head guintptr
6211 tail guintptr
6212 }
6213
6214
6215 func (q *gQueue) empty() bool {
6216 return q.head == 0
6217 }
6218
6219
6220 func (q *gQueue) push(gp *g) {
6221 gp.schedlink = q.head
6222 q.head.set(gp)
6223 if q.tail == 0 {
6224 q.tail.set(gp)
6225 }
6226 }
6227
6228
6229 func (q *gQueue) pushBack(gp *g) {
6230 gp.schedlink = 0
6231 if q.tail != 0 {
6232 q.tail.ptr().schedlink.set(gp)
6233 } else {
6234 q.head.set(gp)
6235 }
6236 q.tail.set(gp)
6237 }
6238
6239
6240
6241 func (q *gQueue) pushBackAll(q2 gQueue) {
6242 if q2.tail == 0 {
6243 return
6244 }
6245 q2.tail.ptr().schedlink = 0
6246 if q.tail != 0 {
6247 q.tail.ptr().schedlink = q2.head
6248 } else {
6249 q.head = q2.head
6250 }
6251 q.tail = q2.tail
6252 }
6253
6254
6255
6256 func (q *gQueue) pop() *g {
6257 gp := q.head.ptr()
6258 if gp != nil {
6259 q.head = gp.schedlink
6260 if q.head == 0 {
6261 q.tail = 0
6262 }
6263 }
6264 return gp
6265 }
6266
6267
6268 func (q *gQueue) popList() gList {
6269 stack := gList{q.head}
6270 *q = gQueue{}
6271 return stack
6272 }
6273
6274
6275
6276 type gList struct {
6277 head guintptr
6278 }
6279
6280
6281 func (l *gList) empty() bool {
6282 return l.head == 0
6283 }
6284
6285
6286 func (l *gList) push(gp *g) {
6287 gp.schedlink = l.head
6288 l.head.set(gp)
6289 }
6290
6291
6292 func (l *gList) pushAll(q gQueue) {
6293 if !q.empty() {
6294 q.tail.ptr().schedlink = l.head
6295 l.head = q.head
6296 }
6297 }
6298
6299
6300 func (l *gList) pop() *g {
6301 gp := l.head.ptr()
6302 if gp != nil {
6303 l.head = gp.schedlink
6304 }
6305 return gp
6306 }
6307
6308
6309 func setMaxThreads(in int) (out int) {
6310 lock(&sched.lock)
6311 out = int(sched.maxmcount)
6312 if in > 0x7fffffff {
6313 sched.maxmcount = 0x7fffffff
6314 } else {
6315 sched.maxmcount = int32(in)
6316 }
6317 checkmcount()
6318 unlock(&sched.lock)
6319 return
6320 }
6321
6322
6323 func procPin() int {
6324 _g_ := getg()
6325 mp := _g_.m
6326
6327 mp.locks++
6328 return int(mp.p.ptr().id)
6329 }
6330
6331
6332 func procUnpin() {
6333 _g_ := getg()
6334 _g_.m.locks--
6335 }
6336
6337
6338
6339 func sync_runtime_procPin() int {
6340 return procPin()
6341 }
6342
6343
6344
6345 func sync_runtime_procUnpin() {
6346 procUnpin()
6347 }
6348
6349
6350
6351 func sync_atomic_runtime_procPin() int {
6352 return procPin()
6353 }
6354
6355
6356
6357 func sync_atomic_runtime_procUnpin() {
6358 procUnpin()
6359 }
6360
6361
6362
6363
6364 func sync_runtime_canSpin(i int) bool {
6365
6366
6367
6368
6369
6370 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
6371 return false
6372 }
6373 if p := getg().m.p.ptr(); !runqempty(p) {
6374 return false
6375 }
6376 return true
6377 }
6378
6379
6380
6381 func sync_runtime_doSpin() {
6382 procyield(active_spin_cnt)
6383 }
6384
6385 var stealOrder randomOrder
6386
6387
6388
6389
6390
6391 type randomOrder struct {
6392 count uint32
6393 coprimes []uint32
6394 }
6395
6396 type randomEnum struct {
6397 i uint32
6398 count uint32
6399 pos uint32
6400 inc uint32
6401 }
6402
6403 func (ord *randomOrder) reset(count uint32) {
6404 ord.count = count
6405 ord.coprimes = ord.coprimes[:0]
6406 for i := uint32(1); i <= count; i++ {
6407 if gcd(i, count) == 1 {
6408 ord.coprimes = append(ord.coprimes, i)
6409 }
6410 }
6411 }
6412
6413 func (ord *randomOrder) start(i uint32) randomEnum {
6414 return randomEnum{
6415 count: ord.count,
6416 pos: i % ord.count,
6417 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
6418 }
6419 }
6420
6421 func (enum *randomEnum) done() bool {
6422 return enum.i == enum.count
6423 }
6424
6425 func (enum *randomEnum) next() {
6426 enum.i++
6427 enum.pos = (enum.pos + enum.inc) % enum.count
6428 }
6429
6430 func (enum *randomEnum) position() uint32 {
6431 return enum.pos
6432 }
6433
6434 func gcd(a, b uint32) uint32 {
6435 for b != 0 {
6436 a, b = b, a%b
6437 }
6438 return a
6439 }
6440
6441
6442
6443 type initTask struct {
6444
6445 state uintptr
6446 ndeps uintptr
6447 nfns uintptr
6448
6449
6450 }
6451
6452
6453
6454 var inittrace tracestat
6455
6456 type tracestat struct {
6457 active bool
6458 id int64
6459 allocs uint64
6460 bytes uint64
6461 }
6462
6463 func doInit(t *initTask) {
6464 switch t.state {
6465 case 2:
6466 return
6467 case 1:
6468 throw("recursive call during initialization - linker skew")
6469 default:
6470 t.state = 1
6471
6472 for i := uintptr(0); i < t.ndeps; i++ {
6473 p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize)
6474 t2 := *(**initTask)(p)
6475 doInit(t2)
6476 }
6477
6478 if t.nfns == 0 {
6479 t.state = 2
6480 return
6481 }
6482
6483 var (
6484 start int64
6485 before tracestat
6486 )
6487
6488 if inittrace.active {
6489 start = nanotime()
6490
6491 before = inittrace
6492 }
6493
6494 firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*sys.PtrSize)
6495 for i := uintptr(0); i < t.nfns; i++ {
6496 p := add(firstFunc, i*sys.PtrSize)
6497 f := *(*func())(unsafe.Pointer(&p))
6498 f()
6499 }
6500
6501 if inittrace.active {
6502 end := nanotime()
6503
6504 after := inittrace
6505
6506 pkg := funcpkgpath(findfunc(funcPC(firstFunc)))
6507
6508 var sbuf [24]byte
6509 print("init ", pkg, " @")
6510 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
6511 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
6512 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
6513 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
6514 print("\n")
6515 }
6516
6517 t.state = 2
6518 }
6519 }
6520
View as plain text