Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/debugcall.go

Documentation: runtime

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build amd64
     6  // +build amd64
     7  
     8  package runtime
     9  
    10  import "unsafe"
    11  
    12  const (
    13  	debugCallSystemStack = "executing on Go runtime stack"
    14  	debugCallUnknownFunc = "call from unknown function"
    15  	debugCallRuntime     = "call from within the Go runtime"
    16  	debugCallUnsafePoint = "call not at safe point"
    17  )
    18  
    19  func debugCallV2()
    20  func debugCallPanicked(val interface{})
    21  
    22  // debugCallCheck checks whether it is safe to inject a debugger
    23  // function call with return PC pc. If not, it returns a string
    24  // explaining why.
    25  //
    26  //go:nosplit
    27  func debugCallCheck(pc uintptr) string {
    28  	// No user calls from the system stack.
    29  	if getg() != getg().m.curg {
    30  		return debugCallSystemStack
    31  	}
    32  	if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
    33  		// Fast syscalls (nanotime) and racecall switch to the
    34  		// g0 stack without switching g. We can't safely make
    35  		// a call in this state. (We can't even safely
    36  		// systemstack.)
    37  		return debugCallSystemStack
    38  	}
    39  
    40  	// Switch to the system stack to avoid overflowing the user
    41  	// stack.
    42  	var ret string
    43  	systemstack(func() {
    44  		f := findfunc(pc)
    45  		if !f.valid() {
    46  			ret = debugCallUnknownFunc
    47  			return
    48  		}
    49  
    50  		name := funcname(f)
    51  
    52  		switch name {
    53  		case "debugCall32",
    54  			"debugCall64",
    55  			"debugCall128",
    56  			"debugCall256",
    57  			"debugCall512",
    58  			"debugCall1024",
    59  			"debugCall2048",
    60  			"debugCall4096",
    61  			"debugCall8192",
    62  			"debugCall16384",
    63  			"debugCall32768",
    64  			"debugCall65536":
    65  			// These functions are allowed so that the debugger can initiate multiple function calls.
    66  			// See: https://golang.org/cl/161137/
    67  			return
    68  		}
    69  
    70  		// Disallow calls from the runtime. We could
    71  		// potentially make this condition tighter (e.g., not
    72  		// when locks are held), but there are enough tightly
    73  		// coded sequences (e.g., defer handling) that it's
    74  		// better to play it safe.
    75  		if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx {
    76  			ret = debugCallRuntime
    77  			return
    78  		}
    79  
    80  		// Check that this isn't an unsafe-point.
    81  		if pc != f.entry {
    82  			pc--
    83  		}
    84  		up := pcdatavalue(f, _PCDATA_UnsafePoint, pc, nil)
    85  		if up != _PCDATA_UnsafePointSafe {
    86  			// Not at a safe point.
    87  			ret = debugCallUnsafePoint
    88  		}
    89  	})
    90  	return ret
    91  }
    92  
    93  // debugCallWrap starts a new goroutine to run a debug call and blocks
    94  // the calling goroutine. On the goroutine, it prepares to recover
    95  // panics from the debug call, and then calls the call dispatching
    96  // function at PC dispatch.
    97  //
    98  // This must be deeply nosplit because there are untyped values on the
    99  // stack from debugCallV2.
   100  //
   101  //go:nosplit
   102  func debugCallWrap(dispatch uintptr) {
   103  	var lockedm bool
   104  	var lockedExt uint32
   105  	callerpc := getcallerpc()
   106  	gp := getg()
   107  
   108  	// Create a new goroutine to execute the call on. Run this on
   109  	// the system stack to avoid growing our stack.
   110  	systemstack(func() {
   111  		// TODO(mknyszek): It would be nice to wrap these arguments in an allocated
   112  		// closure and start the goroutine with that closure, but the compiler disallows
   113  		// implicit closure allocation in the runtime.
   114  		fn := debugCallWrap1
   115  		newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), nil, 0, gp, callerpc)
   116  		args := &debugCallWrapArgs{
   117  			dispatch: dispatch,
   118  			callingG: gp,
   119  		}
   120  		newg.param = unsafe.Pointer(args)
   121  
   122  		// If the current G is locked, then transfer that
   123  		// locked-ness to the new goroutine.
   124  		if gp.lockedm != 0 {
   125  			// Save lock state to restore later.
   126  			mp := gp.m
   127  			if mp != gp.lockedm.ptr() {
   128  				throw("inconsistent lockedm")
   129  			}
   130  
   131  			lockedm = true
   132  			lockedExt = mp.lockedExt
   133  
   134  			// Transfer external lock count to internal so
   135  			// it can't be unlocked from the debug call.
   136  			mp.lockedInt++
   137  			mp.lockedExt = 0
   138  
   139  			mp.lockedg.set(newg)
   140  			newg.lockedm.set(mp)
   141  			gp.lockedm = 0
   142  		}
   143  
   144  		// Mark the calling goroutine as being at an async
   145  		// safe-point, since it has a few conservative frames
   146  		// at the bottom of the stack. This also prevents
   147  		// stack shrinks.
   148  		gp.asyncSafePoint = true
   149  
   150  		// Stash newg away so we can execute it below (mcall's
   151  		// closure can't capture anything).
   152  		gp.schedlink.set(newg)
   153  	})
   154  
   155  	// Switch to the new goroutine.
   156  	mcall(func(gp *g) {
   157  		// Get newg.
   158  		newg := gp.schedlink.ptr()
   159  		gp.schedlink = 0
   160  
   161  		// Park the calling goroutine.
   162  		gp.waitreason = waitReasonDebugCall
   163  		if trace.enabled {
   164  			traceGoPark(traceEvGoBlock, 1)
   165  		}
   166  		casgstatus(gp, _Grunning, _Gwaiting)
   167  		dropg()
   168  
   169  		// Directly execute the new goroutine. The debug
   170  		// protocol will continue on the new goroutine, so
   171  		// it's important we not just let the scheduler do
   172  		// this or it may resume a different goroutine.
   173  		execute(newg, true)
   174  	})
   175  
   176  	// We'll resume here when the call returns.
   177  
   178  	// Restore locked state.
   179  	if lockedm {
   180  		mp := gp.m
   181  		mp.lockedExt = lockedExt
   182  		mp.lockedInt--
   183  		mp.lockedg.set(gp)
   184  		gp.lockedm.set(mp)
   185  	}
   186  
   187  	gp.asyncSafePoint = false
   188  }
   189  
   190  type debugCallWrapArgs struct {
   191  	dispatch uintptr
   192  	callingG *g
   193  }
   194  
   195  // debugCallWrap1 is the continuation of debugCallWrap on the callee
   196  // goroutine.
   197  func debugCallWrap1() {
   198  	gp := getg()
   199  	args := (*debugCallWrapArgs)(gp.param)
   200  	dispatch, callingG := args.dispatch, args.callingG
   201  	gp.param = nil
   202  
   203  	// Dispatch call and trap panics.
   204  	debugCallWrap2(dispatch)
   205  
   206  	// Resume the caller goroutine.
   207  	getg().schedlink.set(callingG)
   208  	mcall(func(gp *g) {
   209  		callingG := gp.schedlink.ptr()
   210  		gp.schedlink = 0
   211  
   212  		// Unlock this goroutine from the M if necessary. The
   213  		// calling G will relock.
   214  		if gp.lockedm != 0 {
   215  			gp.lockedm = 0
   216  			gp.m.lockedg = 0
   217  		}
   218  
   219  		// Switch back to the calling goroutine. At some point
   220  		// the scheduler will schedule us again and we'll
   221  		// finish exiting.
   222  		if trace.enabled {
   223  			traceGoSched()
   224  		}
   225  		casgstatus(gp, _Grunning, _Grunnable)
   226  		dropg()
   227  		lock(&sched.lock)
   228  		globrunqput(gp)
   229  		unlock(&sched.lock)
   230  
   231  		if trace.enabled {
   232  			traceGoUnpark(callingG, 0)
   233  		}
   234  		casgstatus(callingG, _Gwaiting, _Grunnable)
   235  		execute(callingG, true)
   236  	})
   237  }
   238  
   239  func debugCallWrap2(dispatch uintptr) {
   240  	// Call the dispatch function and trap panics.
   241  	var dispatchF func()
   242  	dispatchFV := funcval{dispatch}
   243  	*(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))
   244  
   245  	var ok bool
   246  	defer func() {
   247  		if !ok {
   248  			err := recover()
   249  			debugCallPanicked(err)
   250  		}
   251  	}()
   252  	dispatchF()
   253  	ok = true
   254  }
   255  

View as plain text