Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/race.go

Documentation: runtime

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build race
     6  // +build race
     7  
     8  package runtime
     9  
    10  import (
    11  	"unsafe"
    12  )
    13  
    14  // Public race detection API, present iff build with -race.
    15  
    16  func RaceRead(addr unsafe.Pointer)
    17  func RaceWrite(addr unsafe.Pointer)
    18  func RaceReadRange(addr unsafe.Pointer, len int)
    19  func RaceWriteRange(addr unsafe.Pointer, len int)
    20  
    21  func RaceErrors() int {
    22  	var n uint64
    23  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    24  	return int(n)
    25  }
    26  
    27  //go:nosplit
    28  
    29  // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
    30  // between goroutines. These inform the race detector about actual synchronization
    31  // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
    32  // sections of code).
    33  // RaceAcquire establishes a happens-before relation with the preceding
    34  // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
    35  // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
    36  // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
    37  func RaceAcquire(addr unsafe.Pointer) {
    38  	raceacquire(addr)
    39  }
    40  
    41  //go:nosplit
    42  
    43  // RaceRelease performs a release operation on addr that
    44  // can synchronize with a later RaceAcquire on addr.
    45  //
    46  // In terms of the C memory model, RaceRelease is equivalent to
    47  // atomic_store(memory_order_release).
    48  func RaceRelease(addr unsafe.Pointer) {
    49  	racerelease(addr)
    50  }
    51  
    52  //go:nosplit
    53  
    54  // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    55  // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    56  //
    57  // In terms of the C memory model, RaceReleaseMerge is equivalent to
    58  // atomic_exchange(memory_order_release).
    59  func RaceReleaseMerge(addr unsafe.Pointer) {
    60  	racereleasemerge(addr)
    61  }
    62  
    63  //go:nosplit
    64  
    65  // RaceDisable disables handling of race synchronization events in the current goroutine.
    66  // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
    67  // Non-synchronization events (memory accesses, function entry/exit) still affect
    68  // the race detector.
    69  func RaceDisable() {
    70  	_g_ := getg()
    71  	if _g_.raceignore == 0 {
    72  		racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
    73  	}
    74  	_g_.raceignore++
    75  }
    76  
    77  //go:nosplit
    78  
    79  // RaceEnable re-enables handling of race events in the current goroutine.
    80  func RaceEnable() {
    81  	_g_ := getg()
    82  	_g_.raceignore--
    83  	if _g_.raceignore == 0 {
    84  		racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
    85  	}
    86  }
    87  
    88  // Private interface for the runtime.
    89  
    90  const raceenabled = true
    91  
    92  // For all functions accepting callerpc and pc,
    93  // callerpc is a return PC of the function that calls this function,
    94  // pc is start PC of the function that calls this function.
    95  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    96  	kind := t.kind & kindMask
    97  	if kind == kindArray || kind == kindStruct {
    98  		// for composite objects we have to read every address
    99  		// because a write might happen to any subobject.
   100  		racereadrangepc(addr, t.size, callerpc, pc)
   101  	} else {
   102  		// for non-composite objects we can read just the start
   103  		// address, as any write must write the first byte.
   104  		racereadpc(addr, callerpc, pc)
   105  	}
   106  }
   107  
   108  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   109  	kind := t.kind & kindMask
   110  	if kind == kindArray || kind == kindStruct {
   111  		// for composite objects we have to write every address
   112  		// because a write might happen to any subobject.
   113  		racewriterangepc(addr, t.size, callerpc, pc)
   114  	} else {
   115  		// for non-composite objects we can write just the start
   116  		// address, as any write must write the first byte.
   117  		racewritepc(addr, callerpc, pc)
   118  	}
   119  }
   120  
   121  //go:noescape
   122  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
   123  
   124  //go:noescape
   125  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
   126  
   127  type symbolizeCodeContext struct {
   128  	pc   uintptr
   129  	fn   *byte
   130  	file *byte
   131  	line uintptr
   132  	off  uintptr
   133  	res  uintptr
   134  }
   135  
   136  var qq = [...]byte{'?', '?', 0}
   137  var dash = [...]byte{'-', 0}
   138  
   139  const (
   140  	raceGetProcCmd = iota
   141  	raceSymbolizeCodeCmd
   142  	raceSymbolizeDataCmd
   143  )
   144  
   145  // Callback from C into Go, runs on g0.
   146  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
   147  	switch cmd {
   148  	case raceGetProcCmd:
   149  		throw("should have been handled by racecallbackthunk")
   150  	case raceSymbolizeCodeCmd:
   151  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
   152  	case raceSymbolizeDataCmd:
   153  		raceSymbolizeData((*symbolizeDataContext)(ctx))
   154  	default:
   155  		throw("unknown command")
   156  	}
   157  }
   158  
   159  // raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
   160  // information about the code at that pc.
   161  //
   162  // The race detector has already subtracted 1 from pcs, so they point to the last
   163  // byte of call instructions (including calls to runtime.racewrite and friends).
   164  //
   165  // If the incoming pc is part of an inlined function, *ctx is populated
   166  // with information about the inlined function, and on return ctx.pc is set
   167  // to a pc in the logically containing function. (The race detector should call this
   168  // function again with that pc.)
   169  //
   170  // If the incoming pc is not part of an inlined function, the return pc is unchanged.
   171  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   172  	pc := ctx.pc
   173  	fi := findfunc(pc)
   174  	f := fi._Func()
   175  	if f != nil {
   176  		file, line := f.FileLine(pc)
   177  		if line != 0 {
   178  			if inldata := funcdata(fi, _FUNCDATA_InlTree); inldata != nil {
   179  				inltree := (*[1 << 20]inlinedCall)(inldata)
   180  				for {
   181  					ix := pcdatavalue(fi, _PCDATA_InlTreeIndex, pc, nil)
   182  					if ix >= 0 {
   183  						if inltree[ix].funcID == funcID_wrapper {
   184  							// ignore wrappers
   185  							// Back up to an instruction in the "caller".
   186  							pc = f.Entry() + uintptr(inltree[ix].parentPc)
   187  							continue
   188  						}
   189  						ctx.pc = f.Entry() + uintptr(inltree[ix].parentPc) // "caller" pc
   190  						ctx.fn = cfuncnameFromNameoff(fi, inltree[ix].func_)
   191  						ctx.line = uintptr(line)
   192  						ctx.file = &bytes(file)[0] // assume NUL-terminated
   193  						ctx.off = pc - f.Entry()
   194  						ctx.res = 1
   195  						return
   196  					}
   197  					break
   198  				}
   199  			}
   200  			ctx.fn = cfuncname(fi)
   201  			ctx.line = uintptr(line)
   202  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   203  			ctx.off = pc - f.Entry()
   204  			ctx.res = 1
   205  			return
   206  		}
   207  	}
   208  	ctx.fn = &qq[0]
   209  	ctx.file = &dash[0]
   210  	ctx.line = 0
   211  	ctx.off = ctx.pc
   212  	ctx.res = 1
   213  }
   214  
   215  type symbolizeDataContext struct {
   216  	addr  uintptr
   217  	heap  uintptr
   218  	start uintptr
   219  	size  uintptr
   220  	name  *byte
   221  	file  *byte
   222  	line  uintptr
   223  	res   uintptr
   224  }
   225  
   226  func raceSymbolizeData(ctx *symbolizeDataContext) {
   227  	if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
   228  		ctx.heap = 1
   229  		ctx.start = base
   230  		ctx.size = span.elemsize
   231  		ctx.res = 1
   232  	}
   233  }
   234  
   235  // Race runtime functions called via runtime·racecall.
   236  //go:linkname __tsan_init __tsan_init
   237  var __tsan_init byte
   238  
   239  //go:linkname __tsan_fini __tsan_fini
   240  var __tsan_fini byte
   241  
   242  //go:linkname __tsan_proc_create __tsan_proc_create
   243  var __tsan_proc_create byte
   244  
   245  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   246  var __tsan_proc_destroy byte
   247  
   248  //go:linkname __tsan_map_shadow __tsan_map_shadow
   249  var __tsan_map_shadow byte
   250  
   251  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   252  var __tsan_finalizer_goroutine byte
   253  
   254  //go:linkname __tsan_go_start __tsan_go_start
   255  var __tsan_go_start byte
   256  
   257  //go:linkname __tsan_go_end __tsan_go_end
   258  var __tsan_go_end byte
   259  
   260  //go:linkname __tsan_malloc __tsan_malloc
   261  var __tsan_malloc byte
   262  
   263  //go:linkname __tsan_free __tsan_free
   264  var __tsan_free byte
   265  
   266  //go:linkname __tsan_acquire __tsan_acquire
   267  var __tsan_acquire byte
   268  
   269  //go:linkname __tsan_release __tsan_release
   270  var __tsan_release byte
   271  
   272  //go:linkname __tsan_release_acquire __tsan_release_acquire
   273  var __tsan_release_acquire byte
   274  
   275  //go:linkname __tsan_release_merge __tsan_release_merge
   276  var __tsan_release_merge byte
   277  
   278  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   279  var __tsan_go_ignore_sync_begin byte
   280  
   281  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   282  var __tsan_go_ignore_sync_end byte
   283  
   284  //go:linkname __tsan_report_count __tsan_report_count
   285  var __tsan_report_count byte
   286  
   287  // Mimic what cmd/cgo would do.
   288  //go:cgo_import_static __tsan_init
   289  //go:cgo_import_static __tsan_fini
   290  //go:cgo_import_static __tsan_proc_create
   291  //go:cgo_import_static __tsan_proc_destroy
   292  //go:cgo_import_static __tsan_map_shadow
   293  //go:cgo_import_static __tsan_finalizer_goroutine
   294  //go:cgo_import_static __tsan_go_start
   295  //go:cgo_import_static __tsan_go_end
   296  //go:cgo_import_static __tsan_malloc
   297  //go:cgo_import_static __tsan_free
   298  //go:cgo_import_static __tsan_acquire
   299  //go:cgo_import_static __tsan_release
   300  //go:cgo_import_static __tsan_release_acquire
   301  //go:cgo_import_static __tsan_release_merge
   302  //go:cgo_import_static __tsan_go_ignore_sync_begin
   303  //go:cgo_import_static __tsan_go_ignore_sync_end
   304  //go:cgo_import_static __tsan_report_count
   305  
   306  // These are called from race_amd64.s.
   307  //go:cgo_import_static __tsan_read
   308  //go:cgo_import_static __tsan_read_pc
   309  //go:cgo_import_static __tsan_read_range
   310  //go:cgo_import_static __tsan_write
   311  //go:cgo_import_static __tsan_write_pc
   312  //go:cgo_import_static __tsan_write_range
   313  //go:cgo_import_static __tsan_func_enter
   314  //go:cgo_import_static __tsan_func_exit
   315  
   316  //go:cgo_import_static __tsan_go_atomic32_load
   317  //go:cgo_import_static __tsan_go_atomic64_load
   318  //go:cgo_import_static __tsan_go_atomic32_store
   319  //go:cgo_import_static __tsan_go_atomic64_store
   320  //go:cgo_import_static __tsan_go_atomic32_exchange
   321  //go:cgo_import_static __tsan_go_atomic64_exchange
   322  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   323  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   324  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   325  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   326  
   327  // start/end of global data (data+bss).
   328  var racedatastart uintptr
   329  var racedataend uintptr
   330  
   331  // start/end of heap for race_amd64.s
   332  var racearenastart uintptr
   333  var racearenaend uintptr
   334  
   335  func racefuncenter(callpc uintptr)
   336  func racefuncenterfp(fp uintptr)
   337  func racefuncexit()
   338  func raceread(addr uintptr)
   339  func racewrite(addr uintptr)
   340  func racereadrange(addr, size uintptr)
   341  func racewriterange(addr, size uintptr)
   342  func racereadrangepc1(addr, size, pc uintptr)
   343  func racewriterangepc1(addr, size, pc uintptr)
   344  func racecallbackthunk(uintptr)
   345  
   346  // racecall allows calling an arbitrary function fn from C race runtime
   347  // with up to 4 uintptr arguments.
   348  func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
   349  
   350  // checks if the address has shadow (i.e. heap or data/bss)
   351  //go:nosplit
   352  func isvalidaddr(addr unsafe.Pointer) bool {
   353  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   354  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   355  }
   356  
   357  //go:nosplit
   358  func raceinit() (gctx, pctx uintptr) {
   359  	// cgo is required to initialize libc, which is used by race runtime
   360  	if !iscgo {
   361  		throw("raceinit: race build must use cgo")
   362  	}
   363  
   364  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
   365  
   366  	// Round data segment to page boundaries, because it's used in mmap().
   367  	start := ^uintptr(0)
   368  	end := uintptr(0)
   369  	if start > firstmoduledata.noptrdata {
   370  		start = firstmoduledata.noptrdata
   371  	}
   372  	if start > firstmoduledata.data {
   373  		start = firstmoduledata.data
   374  	}
   375  	if start > firstmoduledata.noptrbss {
   376  		start = firstmoduledata.noptrbss
   377  	}
   378  	if start > firstmoduledata.bss {
   379  		start = firstmoduledata.bss
   380  	}
   381  	if end < firstmoduledata.enoptrdata {
   382  		end = firstmoduledata.enoptrdata
   383  	}
   384  	if end < firstmoduledata.edata {
   385  		end = firstmoduledata.edata
   386  	}
   387  	if end < firstmoduledata.enoptrbss {
   388  		end = firstmoduledata.enoptrbss
   389  	}
   390  	if end < firstmoduledata.ebss {
   391  		end = firstmoduledata.ebss
   392  	}
   393  	size := alignUp(end-start, _PageSize)
   394  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   395  	racedatastart = start
   396  	racedataend = start + size
   397  
   398  	return
   399  }
   400  
   401  var raceFiniLock mutex
   402  
   403  //go:nosplit
   404  func racefini() {
   405  	// racefini() can only be called once to avoid races.
   406  	// This eventually (via __tsan_fini) calls C.exit which has
   407  	// undefined behavior if called more than once. If the lock is
   408  	// already held it's assumed that the first caller exits the program
   409  	// so other calls can hang forever without an issue.
   410  	lock(&raceFiniLock)
   411  	// We're entering external code that may call ExitProcess on
   412  	// Windows.
   413  	osPreemptExtEnter(getg().m)
   414  	racecall(&__tsan_fini, 0, 0, 0, 0)
   415  }
   416  
   417  //go:nosplit
   418  func raceproccreate() uintptr {
   419  	var ctx uintptr
   420  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   421  	return ctx
   422  }
   423  
   424  //go:nosplit
   425  func raceprocdestroy(ctx uintptr) {
   426  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   427  }
   428  
   429  //go:nosplit
   430  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   431  	if racearenastart == 0 {
   432  		racearenastart = uintptr(addr)
   433  	}
   434  	if racearenaend < uintptr(addr)+size {
   435  		racearenaend = uintptr(addr) + size
   436  	}
   437  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   438  }
   439  
   440  //go:nosplit
   441  func racemalloc(p unsafe.Pointer, sz uintptr) {
   442  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   443  }
   444  
   445  //go:nosplit
   446  func racefree(p unsafe.Pointer, sz uintptr) {
   447  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   448  }
   449  
   450  //go:nosplit
   451  func racegostart(pc uintptr) uintptr {
   452  	_g_ := getg()
   453  	var spawng *g
   454  	if _g_.m.curg != nil {
   455  		spawng = _g_.m.curg
   456  	} else {
   457  		spawng = _g_
   458  	}
   459  
   460  	var racectx uintptr
   461  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   462  	return racectx
   463  }
   464  
   465  //go:nosplit
   466  func racegoend() {
   467  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   468  }
   469  
   470  //go:nosplit
   471  func racectxend(racectx uintptr) {
   472  	racecall(&__tsan_go_end, racectx, 0, 0, 0)
   473  }
   474  
   475  //go:nosplit
   476  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   477  	_g_ := getg()
   478  	if _g_ != _g_.m.curg {
   479  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   480  		// Not interesting.
   481  		return
   482  	}
   483  	if callpc != 0 {
   484  		racefuncenter(callpc)
   485  	}
   486  	racewriterangepc1(uintptr(addr), sz, pc)
   487  	if callpc != 0 {
   488  		racefuncexit()
   489  	}
   490  }
   491  
   492  //go:nosplit
   493  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   494  	_g_ := getg()
   495  	if _g_ != _g_.m.curg {
   496  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   497  		// Not interesting.
   498  		return
   499  	}
   500  	if callpc != 0 {
   501  		racefuncenter(callpc)
   502  	}
   503  	racereadrangepc1(uintptr(addr), sz, pc)
   504  	if callpc != 0 {
   505  		racefuncexit()
   506  	}
   507  }
   508  
   509  //go:nosplit
   510  func raceacquire(addr unsafe.Pointer) {
   511  	raceacquireg(getg(), addr)
   512  }
   513  
   514  //go:nosplit
   515  func raceacquireg(gp *g, addr unsafe.Pointer) {
   516  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   517  		return
   518  	}
   519  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   520  }
   521  
   522  //go:nosplit
   523  func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
   524  	if !isvalidaddr(addr) {
   525  		return
   526  	}
   527  	racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
   528  }
   529  
   530  //go:nosplit
   531  func racerelease(addr unsafe.Pointer) {
   532  	racereleaseg(getg(), addr)
   533  }
   534  
   535  //go:nosplit
   536  func racereleaseg(gp *g, addr unsafe.Pointer) {
   537  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   538  		return
   539  	}
   540  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   541  }
   542  
   543  //go:nosplit
   544  func racereleaseacquire(addr unsafe.Pointer) {
   545  	racereleaseacquireg(getg(), addr)
   546  }
   547  
   548  //go:nosplit
   549  func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
   550  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   551  		return
   552  	}
   553  	racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
   554  }
   555  
   556  //go:nosplit
   557  func racereleasemerge(addr unsafe.Pointer) {
   558  	racereleasemergeg(getg(), addr)
   559  }
   560  
   561  //go:nosplit
   562  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   563  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   564  		return
   565  	}
   566  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   567  }
   568  
   569  //go:nosplit
   570  func racefingo() {
   571  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   572  }
   573  
   574  // The declarations below generate ABI wrappers for functions
   575  // implemented in assembly in this package but declared in another
   576  // package.
   577  
   578  //go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
   579  func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
   580  
   581  //go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
   582  func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
   583  
   584  //go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
   585  func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
   586  
   587  //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
   588  func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
   589  
   590  //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
   591  func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
   592  
   593  //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
   594  func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
   595  
   596  //go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
   597  func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
   598  
   599  //go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
   600  func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
   601  
   602  //go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
   603  func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
   604  
   605  //go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
   606  func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
   607  
   608  //go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
   609  func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
   610  
   611  //go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
   612  func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
   613  
   614  //go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
   615  func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
   616  
   617  //go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
   618  func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
   619  
   620  //go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
   621  func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
   622  
   623  //go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
   624  func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
   625  
   626  //go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
   627  func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
   628  
   629  //go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
   630  func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
   631  
   632  //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
   633  func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
   634  
   635  //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
   636  func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
   637  
   638  //go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
   639  func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
   640  
   641  //go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
   642  func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
   643  
   644  //go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
   645  func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
   646  

View as plain text