Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/lockrank_on.go

Documentation: runtime

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.staticlockranking
     6  // +build goexperiment.staticlockranking
     7  
     8  package runtime
     9  
    10  import (
    11  	"runtime/internal/atomic"
    12  	"unsafe"
    13  )
    14  
    15  // worldIsStopped is accessed atomically to track world-stops. 1 == world
    16  // stopped.
    17  var worldIsStopped uint32
    18  
    19  // lockRankStruct is embedded in mutex
    20  type lockRankStruct struct {
    21  	// static lock ranking of the lock
    22  	rank lockRank
    23  	// pad field to make sure lockRankStruct is a multiple of 8 bytes, even on
    24  	// 32-bit systems.
    25  	pad int
    26  }
    27  
    28  func lockInit(l *mutex, rank lockRank) {
    29  	l.rank = rank
    30  }
    31  
    32  func getLockRank(l *mutex) lockRank {
    33  	return l.rank
    34  }
    35  
    36  // lockWithRank is like lock(l), but allows the caller to specify a lock rank
    37  // when acquiring a non-static lock.
    38  //
    39  // Note that we need to be careful about stack splits:
    40  //
    41  // This function is not nosplit, thus it may split at function entry. This may
    42  // introduce a new edge in the lock order, but it is no different from any
    43  // other (nosplit) call before this call (including the call to lock() itself).
    44  //
    45  // However, we switch to the systemstack to record the lock held to ensure that
    46  // we record an accurate lock ordering. e.g., without systemstack, a stack
    47  // split on entry to lock2() would record stack split locks as taken after l,
    48  // even though l is not actually locked yet.
    49  func lockWithRank(l *mutex, rank lockRank) {
    50  	if l == &debuglock || l == &paniclk {
    51  		// debuglock is only used for println/printlock(). Don't do lock
    52  		// rank recording for it, since print/println are used when
    53  		// printing out a lock ordering problem below.
    54  		//
    55  		// paniclk is only used for fatal throw/panic. Don't do lock
    56  		// ranking recording for it, since we throw after reporting a
    57  		// lock ordering problem. Additionally, paniclk may be taken
    58  		// after effectively any lock (anywhere we might panic), which
    59  		// the partial order doesn't cover.
    60  		lock2(l)
    61  		return
    62  	}
    63  	if rank == 0 {
    64  		rank = lockRankLeafRank
    65  	}
    66  	gp := getg()
    67  	// Log the new class.
    68  	systemstack(func() {
    69  		i := gp.m.locksHeldLen
    70  		if i >= len(gp.m.locksHeld) {
    71  			throw("too many locks held concurrently for rank checking")
    72  		}
    73  		gp.m.locksHeld[i].rank = rank
    74  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
    75  		gp.m.locksHeldLen++
    76  
    77  		// i is the index of the lock being acquired
    78  		if i > 0 {
    79  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
    80  		}
    81  		lock2(l)
    82  	})
    83  }
    84  
    85  // nosplit to ensure it can be called in as many contexts as possible.
    86  //go:nosplit
    87  func printHeldLocks(gp *g) {
    88  	if gp.m.locksHeldLen == 0 {
    89  		println("<none>")
    90  		return
    91  	}
    92  
    93  	for j, held := range gp.m.locksHeld[:gp.m.locksHeldLen] {
    94  		println(j, ":", held.rank.String(), held.rank, unsafe.Pointer(gp.m.locksHeld[j].lockAddr))
    95  	}
    96  }
    97  
    98  // acquireLockRank acquires a rank which is not associated with a mutex lock
    99  //
   100  // This function may be called in nosplit context and thus must be nosplit.
   101  //go:nosplit
   102  func acquireLockRank(rank lockRank) {
   103  	gp := getg()
   104  	// Log the new class. See comment on lockWithRank.
   105  	systemstack(func() {
   106  		i := gp.m.locksHeldLen
   107  		if i >= len(gp.m.locksHeld) {
   108  			throw("too many locks held concurrently for rank checking")
   109  		}
   110  		gp.m.locksHeld[i].rank = rank
   111  		gp.m.locksHeld[i].lockAddr = 0
   112  		gp.m.locksHeldLen++
   113  
   114  		// i is the index of the lock being acquired
   115  		if i > 0 {
   116  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   117  		}
   118  	})
   119  }
   120  
   121  // checkRanks checks if goroutine g, which has mostly recently acquired a lock
   122  // with rank 'prevRank', can now acquire a lock with rank 'rank'.
   123  //
   124  //go:systemstack
   125  func checkRanks(gp *g, prevRank, rank lockRank) {
   126  	rankOK := false
   127  	if rank < prevRank {
   128  		// If rank < prevRank, then we definitely have a rank error
   129  		rankOK = false
   130  	} else if rank == lockRankLeafRank {
   131  		// If new lock is a leaf lock, then the preceding lock can
   132  		// be anything except another leaf lock.
   133  		rankOK = prevRank < lockRankLeafRank
   134  	} else {
   135  		// We've now verified the total lock ranking, but we
   136  		// also enforce the partial ordering specified by
   137  		// lockPartialOrder as well. Two locks with the same rank
   138  		// can only be acquired at the same time if explicitly
   139  		// listed in the lockPartialOrder table.
   140  		list := lockPartialOrder[rank]
   141  		for _, entry := range list {
   142  			if entry == prevRank {
   143  				rankOK = true
   144  				break
   145  			}
   146  		}
   147  	}
   148  	if !rankOK {
   149  		printlock()
   150  		println(gp.m.procid, " ======")
   151  		printHeldLocks(gp)
   152  		throw("lock ordering problem")
   153  	}
   154  }
   155  
   156  // See comment on lockWithRank regarding stack splitting.
   157  func unlockWithRank(l *mutex) {
   158  	if l == &debuglock || l == &paniclk {
   159  		// See comment at beginning of lockWithRank.
   160  		unlock2(l)
   161  		return
   162  	}
   163  	gp := getg()
   164  	systemstack(func() {
   165  		found := false
   166  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   167  			if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   168  				found = true
   169  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   170  				gp.m.locksHeldLen--
   171  				break
   172  			}
   173  		}
   174  		if !found {
   175  			println(gp.m.procid, ":", l.rank.String(), l.rank, l)
   176  			throw("unlock without matching lock acquire")
   177  		}
   178  		unlock2(l)
   179  	})
   180  }
   181  
   182  // releaseLockRank releases a rank which is not associated with a mutex lock
   183  //
   184  // This function may be called in nosplit context and thus must be nosplit.
   185  //go:nosplit
   186  func releaseLockRank(rank lockRank) {
   187  	gp := getg()
   188  	systemstack(func() {
   189  		found := false
   190  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   191  			if gp.m.locksHeld[i].rank == rank && gp.m.locksHeld[i].lockAddr == 0 {
   192  				found = true
   193  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   194  				gp.m.locksHeldLen--
   195  				break
   196  			}
   197  		}
   198  		if !found {
   199  			println(gp.m.procid, ":", rank.String(), rank)
   200  			throw("lockRank release without matching lockRank acquire")
   201  		}
   202  	})
   203  }
   204  
   205  // See comment on lockWithRank regarding stack splitting.
   206  func lockWithRankMayAcquire(l *mutex, rank lockRank) {
   207  	gp := getg()
   208  	if gp.m.locksHeldLen == 0 {
   209  		// No possibility of lock ordering problem if no other locks held
   210  		return
   211  	}
   212  
   213  	systemstack(func() {
   214  		i := gp.m.locksHeldLen
   215  		if i >= len(gp.m.locksHeld) {
   216  			throw("too many locks held concurrently for rank checking")
   217  		}
   218  		// Temporarily add this lock to the locksHeld list, so
   219  		// checkRanks() will print out list, including this lock, if there
   220  		// is a lock ordering problem.
   221  		gp.m.locksHeld[i].rank = rank
   222  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
   223  		gp.m.locksHeldLen++
   224  		checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   225  		gp.m.locksHeldLen--
   226  	})
   227  }
   228  
   229  // nosplit to ensure it can be called in as many contexts as possible.
   230  //go:nosplit
   231  func checkLockHeld(gp *g, l *mutex) bool {
   232  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   233  		if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   234  			return true
   235  		}
   236  	}
   237  	return false
   238  }
   239  
   240  // assertLockHeld throws if l is not held by the caller.
   241  //
   242  // nosplit to ensure it can be called in as many contexts as possible.
   243  //go:nosplit
   244  func assertLockHeld(l *mutex) {
   245  	gp := getg()
   246  
   247  	held := checkLockHeld(gp, l)
   248  	if held {
   249  		return
   250  	}
   251  
   252  	// Crash from system stack to avoid splits that may cause
   253  	// additional issues.
   254  	systemstack(func() {
   255  		printlock()
   256  		print("caller requires lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   257  		printHeldLocks(gp)
   258  		throw("not holding required lock!")
   259  	})
   260  }
   261  
   262  // assertRankHeld throws if a mutex with rank r is not held by the caller.
   263  //
   264  // This is less precise than assertLockHeld, but can be used in places where a
   265  // pointer to the exact mutex is not available.
   266  //
   267  // nosplit to ensure it can be called in as many contexts as possible.
   268  //go:nosplit
   269  func assertRankHeld(r lockRank) {
   270  	gp := getg()
   271  
   272  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   273  		if gp.m.locksHeld[i].rank == r {
   274  			return
   275  		}
   276  	}
   277  
   278  	// Crash from system stack to avoid splits that may cause
   279  	// additional issues.
   280  	systemstack(func() {
   281  		printlock()
   282  		print("caller requires lock with rank ", r.String(), "), holding:\n")
   283  		printHeldLocks(gp)
   284  		throw("not holding required lock!")
   285  	})
   286  }
   287  
   288  // worldStopped notes that the world is stopped.
   289  //
   290  // Caller must hold worldsema.
   291  //
   292  // nosplit to ensure it can be called in as many contexts as possible.
   293  //go:nosplit
   294  func worldStopped() {
   295  	if stopped := atomic.Xadd(&worldIsStopped, 1); stopped != 1 {
   296  		systemstack(func() {
   297  			print("world stop count=", stopped, "\n")
   298  			throw("recursive world stop")
   299  		})
   300  	}
   301  }
   302  
   303  // worldStarted that the world is starting.
   304  //
   305  // Caller must hold worldsema.
   306  //
   307  // nosplit to ensure it can be called in as many contexts as possible.
   308  //go:nosplit
   309  func worldStarted() {
   310  	if stopped := atomic.Xadd(&worldIsStopped, -1); stopped != 0 {
   311  		systemstack(func() {
   312  			print("world stop count=", stopped, "\n")
   313  			throw("released non-stopped world stop")
   314  		})
   315  	}
   316  }
   317  
   318  // nosplit to ensure it can be called in as many contexts as possible.
   319  //go:nosplit
   320  func checkWorldStopped() bool {
   321  	stopped := atomic.Load(&worldIsStopped)
   322  	if stopped > 1 {
   323  		systemstack(func() {
   324  			print("inconsistent world stop count=", stopped, "\n")
   325  			throw("inconsistent world stop count")
   326  		})
   327  	}
   328  
   329  	return stopped == 1
   330  }
   331  
   332  // assertWorldStopped throws if the world is not stopped. It does not check
   333  // which M stopped the world.
   334  //
   335  // nosplit to ensure it can be called in as many contexts as possible.
   336  //go:nosplit
   337  func assertWorldStopped() {
   338  	if checkWorldStopped() {
   339  		return
   340  	}
   341  
   342  	throw("world not stopped")
   343  }
   344  
   345  // assertWorldStoppedOrLockHeld throws if the world is not stopped and the
   346  // passed lock is not held.
   347  //
   348  // nosplit to ensure it can be called in as many contexts as possible.
   349  //go:nosplit
   350  func assertWorldStoppedOrLockHeld(l *mutex) {
   351  	if checkWorldStopped() {
   352  		return
   353  	}
   354  
   355  	gp := getg()
   356  	held := checkLockHeld(gp, l)
   357  	if held {
   358  		return
   359  	}
   360  
   361  	// Crash from system stack to avoid splits that may cause
   362  	// additional issues.
   363  	systemstack(func() {
   364  		printlock()
   365  		print("caller requires world stop or lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   366  		println("<no world stop>")
   367  		printHeldLocks(gp)
   368  		throw("no world stop or required lock!")
   369  	})
   370  }
   371  

View as plain text