Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/syscall_windows.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // cbs stores all registered Go callbacks.
    14  var cbs struct {
    15  	lock  mutex
    16  	ctxt  [cb_max]winCallback
    17  	index map[winCallbackKey]int
    18  	n     int
    19  }
    20  
    21  // winCallback records information about a registered Go callback.
    22  type winCallback struct {
    23  	fn     *funcval // Go function
    24  	retPop uintptr  // For 386 cdecl, how many bytes to pop on return
    25  	abiMap abiDesc
    26  }
    27  
    28  // abiPartKind is the action an abiPart should take.
    29  type abiPartKind int
    30  
    31  const (
    32  	abiPartBad   abiPartKind = iota
    33  	abiPartStack             // Move a value from memory to the stack.
    34  	abiPartReg               // Move a value from memory to a register.
    35  )
    36  
    37  // abiPart encodes a step in translating between calling ABIs.
    38  type abiPart struct {
    39  	kind           abiPartKind
    40  	srcStackOffset uintptr
    41  	dstStackOffset uintptr // used if kind == abiPartStack
    42  	dstRegister    int     // used if kind == abiPartReg
    43  	len            uintptr
    44  }
    45  
    46  func (a *abiPart) tryMerge(b abiPart) bool {
    47  	if a.kind != abiPartStack || b.kind != abiPartStack {
    48  		return false
    49  	}
    50  	if a.srcStackOffset+a.len == b.srcStackOffset && a.dstStackOffset+a.len == b.dstStackOffset {
    51  		a.len += b.len
    52  		return true
    53  	}
    54  	return false
    55  }
    56  
    57  // abiDesc specifies how to translate from a C frame to a Go
    58  // frame. This does not specify how to translate back because
    59  // the result is always a uintptr. If the C ABI is fastcall,
    60  // this assumes the four fastcall registers were first spilled
    61  // to the shadow space.
    62  type abiDesc struct {
    63  	parts []abiPart
    64  
    65  	srcStackSize uintptr // stdcall/fastcall stack space tracking
    66  	dstStackSize uintptr // Go stack space used
    67  	dstSpill     uintptr // Extra stack space for argument spill slots
    68  	dstRegisters int     // Go ABI int argument registers used
    69  
    70  	// retOffset is the offset of the uintptr-sized result in the Go
    71  	// frame.
    72  	retOffset uintptr
    73  }
    74  
    75  func (p *abiDesc) assignArg(t *_type) {
    76  	if t.size > sys.PtrSize {
    77  		// We don't support this right now. In
    78  		// stdcall/cdecl, 64-bit ints and doubles are
    79  		// passed as two words (little endian); and
    80  		// structs are pushed on the stack. In
    81  		// fastcall, arguments larger than the word
    82  		// size are passed by reference. On arm,
    83  		// 8-byte aligned arguments round up to the
    84  		// next even register and can be split across
    85  		// registers and the stack.
    86  		panic("compileCallback: argument size is larger than uintptr")
    87  	}
    88  	if k := t.kind & kindMask; GOARCH != "386" && (k == kindFloat32 || k == kindFloat64) {
    89  		// In fastcall, floating-point arguments in
    90  		// the first four positions are passed in
    91  		// floating-point registers, which we don't
    92  		// currently spill. arm passes floating-point
    93  		// arguments in VFP registers, which we also
    94  		// don't support.
    95  		// So basically we only support 386.
    96  		panic("compileCallback: float arguments not supported")
    97  	}
    98  
    99  	if t.size == 0 {
   100  		// The Go ABI aligns for zero-sized types.
   101  		p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.align))
   102  		return
   103  	}
   104  
   105  	// In the C ABI, we're already on a word boundary.
   106  	// Also, sub-word-sized fastcall register arguments
   107  	// are stored to the least-significant bytes of the
   108  	// argument word and all supported Windows
   109  	// architectures are little endian, so srcStackOffset
   110  	// is already pointing to the right place for smaller
   111  	// arguments. The same is true on arm.
   112  
   113  	oldParts := p.parts
   114  	if p.tryRegAssignArg(t, 0) {
   115  		// Account for spill space.
   116  		//
   117  		// TODO(mknyszek): Remove this when we no longer have
   118  		// caller reserved spill space.
   119  		p.dstSpill = alignUp(p.dstSpill, uintptr(t.align))
   120  		p.dstSpill += t.size
   121  	} else {
   122  		// Register assignment failed.
   123  		// Undo the work and stack assign.
   124  		p.parts = oldParts
   125  
   126  		// The Go ABI aligns arguments.
   127  		p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.align))
   128  
   129  		// Copy just the size of the argument. Note that this
   130  		// could be a small by-value struct, but C and Go
   131  		// struct layouts are compatible, so we can copy these
   132  		// directly, too.
   133  		part := abiPart{
   134  			kind:           abiPartStack,
   135  			srcStackOffset: p.srcStackSize,
   136  			dstStackOffset: p.dstStackSize,
   137  			len:            t.size,
   138  		}
   139  		// Add this step to the adapter.
   140  		if len(p.parts) == 0 || !p.parts[len(p.parts)-1].tryMerge(part) {
   141  			p.parts = append(p.parts, part)
   142  		}
   143  		// The Go ABI packs arguments.
   144  		p.dstStackSize += t.size
   145  	}
   146  
   147  	// cdecl, stdcall, fastcall, and arm pad arguments to word size.
   148  	// TODO(rsc): On arm and arm64 do we need to skip the caller's saved LR?
   149  	p.srcStackSize += sys.PtrSize
   150  }
   151  
   152  // tryRegAssignArg tries to register-assign a value of type t.
   153  // If this type is nested in an aggregate type, then offset is the
   154  // offset of this type within its parent type.
   155  // Assumes t.size <= sys.PtrSize and t.size != 0.
   156  //
   157  // Returns whether the assignment succeeded.
   158  func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool {
   159  	switch k := t.kind & kindMask; k {
   160  	case kindBool, kindInt, kindInt8, kindInt16, kindInt32, kindUint, kindUint8, kindUint16, kindUint32, kindUintptr, kindPtr, kindUnsafePointer:
   161  		// Assign a register for all these types.
   162  		return p.assignReg(t.size, offset)
   163  	case kindInt64, kindUint64:
   164  		// Only register-assign if the registers are big enough.
   165  		if sys.PtrSize == 8 {
   166  			return p.assignReg(t.size, offset)
   167  		}
   168  	case kindArray:
   169  		at := (*arraytype)(unsafe.Pointer(t))
   170  		if at.len == 1 {
   171  			return p.tryRegAssignArg(at.elem, offset)
   172  		}
   173  	case kindStruct:
   174  		st := (*structtype)(unsafe.Pointer(t))
   175  		for i := range st.fields {
   176  			f := &st.fields[i]
   177  			if !p.tryRegAssignArg(f.typ, offset+f.offset()) {
   178  				return false
   179  			}
   180  		}
   181  		return true
   182  	}
   183  	// Pointer-sized types such as maps and channels are currently
   184  	// not supported.
   185  	panic("compileCallabck: type " + t.string() + " is currently not supported for use in system callbacks")
   186  }
   187  
   188  // assignReg attempts to assign a single register for an
   189  // argument with the given size, at the given offset into the
   190  // value in the C ABI space.
   191  //
   192  // Returns whether the assignment was successful.
   193  func (p *abiDesc) assignReg(size, offset uintptr) bool {
   194  	if p.dstRegisters >= intArgRegs {
   195  		return false
   196  	}
   197  	p.parts = append(p.parts, abiPart{
   198  		kind:           abiPartReg,
   199  		srcStackOffset: p.srcStackSize + offset,
   200  		dstRegister:    p.dstRegisters,
   201  		len:            size,
   202  	})
   203  	p.dstRegisters++
   204  	return true
   205  }
   206  
   207  type winCallbackKey struct {
   208  	fn    *funcval
   209  	cdecl bool
   210  }
   211  
   212  func callbackasm()
   213  
   214  // callbackasmAddr returns address of runtime.callbackasm
   215  // function adjusted by i.
   216  // On x86 and amd64, runtime.callbackasm is a series of CALL instructions,
   217  // and we want callback to arrive at
   218  // correspondent call instruction instead of start of
   219  // runtime.callbackasm.
   220  // On ARM, runtime.callbackasm is a series of mov and branch instructions.
   221  // R12 is loaded with the callback index. Each entry is two instructions,
   222  // hence 8 bytes.
   223  func callbackasmAddr(i int) uintptr {
   224  	var entrySize int
   225  	switch GOARCH {
   226  	default:
   227  		panic("unsupported architecture")
   228  	case "386", "amd64":
   229  		entrySize = 5
   230  	case "arm", "arm64":
   231  		// On ARM and ARM64, each entry is a MOV instruction
   232  		// followed by a branch instruction
   233  		entrySize = 8
   234  	}
   235  	return funcPC(callbackasm) + uintptr(i*entrySize)
   236  }
   237  
   238  const callbackMaxFrame = 64 * sys.PtrSize
   239  
   240  // compileCallback converts a Go function fn into a C function pointer
   241  // that can be passed to Windows APIs.
   242  //
   243  // On 386, if cdecl is true, the returned C function will use the
   244  // cdecl calling convention; otherwise, it will use stdcall. On amd64,
   245  // it always uses fastcall. On arm, it always uses the ARM convention.
   246  //
   247  //go:linkname compileCallback syscall.compileCallback
   248  func compileCallback(fn eface, cdecl bool) (code uintptr) {
   249  	if GOARCH != "386" {
   250  		// cdecl is only meaningful on 386.
   251  		cdecl = false
   252  	}
   253  
   254  	if fn._type == nil || (fn._type.kind&kindMask) != kindFunc {
   255  		panic("compileCallback: expected function with one uintptr-sized result")
   256  	}
   257  	ft := (*functype)(unsafe.Pointer(fn._type))
   258  
   259  	// Check arguments and construct ABI translation.
   260  	var abiMap abiDesc
   261  	for _, t := range ft.in() {
   262  		abiMap.assignArg(t)
   263  	}
   264  	// The Go ABI aligns the result to the word size. src is
   265  	// already aligned.
   266  	abiMap.dstStackSize = alignUp(abiMap.dstStackSize, sys.PtrSize)
   267  	abiMap.retOffset = abiMap.dstStackSize
   268  
   269  	if len(ft.out()) != 1 {
   270  		panic("compileCallback: expected function with one uintptr-sized result")
   271  	}
   272  	if ft.out()[0].size != sys.PtrSize {
   273  		panic("compileCallback: expected function with one uintptr-sized result")
   274  	}
   275  	if k := ft.out()[0].kind & kindMask; k == kindFloat32 || k == kindFloat64 {
   276  		// In cdecl and stdcall, float results are returned in
   277  		// ST(0). In fastcall, they're returned in XMM0.
   278  		// Either way, it's not AX.
   279  		panic("compileCallback: float results not supported")
   280  	}
   281  	if intArgRegs == 0 {
   282  		// Make room for the uintptr-sized result.
   283  		// If there are argument registers, the return value will
   284  		// be passed in the first register.
   285  		abiMap.dstStackSize += sys.PtrSize
   286  	}
   287  
   288  	// TODO(mknyszek): Remove dstSpill from this calculation when we no longer have
   289  	// caller reserved spill space.
   290  	frameSize := alignUp(abiMap.dstStackSize, sys.PtrSize)
   291  	frameSize += abiMap.dstSpill
   292  	if frameSize > callbackMaxFrame {
   293  		panic("compileCallback: function argument frame too large")
   294  	}
   295  
   296  	// For cdecl, the callee is responsible for popping its
   297  	// arguments from the C stack.
   298  	var retPop uintptr
   299  	if cdecl {
   300  		retPop = abiMap.srcStackSize
   301  	}
   302  
   303  	key := winCallbackKey{(*funcval)(fn.data), cdecl}
   304  
   305  	lock(&cbs.lock) // We don't unlock this in a defer because this is used from the system stack.
   306  
   307  	// Check if this callback is already registered.
   308  	if n, ok := cbs.index[key]; ok {
   309  		unlock(&cbs.lock)
   310  		return callbackasmAddr(n)
   311  	}
   312  
   313  	// Register the callback.
   314  	if cbs.index == nil {
   315  		cbs.index = make(map[winCallbackKey]int)
   316  	}
   317  	n := cbs.n
   318  	if n >= len(cbs.ctxt) {
   319  		unlock(&cbs.lock)
   320  		throw("too many callback functions")
   321  	}
   322  	c := winCallback{key.fn, retPop, abiMap}
   323  	cbs.ctxt[n] = c
   324  	cbs.index[key] = n
   325  	cbs.n++
   326  
   327  	unlock(&cbs.lock)
   328  	return callbackasmAddr(n)
   329  }
   330  
   331  type callbackArgs struct {
   332  	index uintptr
   333  	// args points to the argument block.
   334  	//
   335  	// For cdecl and stdcall, all arguments are on the stack.
   336  	//
   337  	// For fastcall, the trampoline spills register arguments to
   338  	// the reserved spill slots below the stack arguments,
   339  	// resulting in a layout equivalent to stdcall.
   340  	//
   341  	// For arm, the trampoline stores the register arguments just
   342  	// below the stack arguments, so again we can treat it as one
   343  	// big stack arguments frame.
   344  	args unsafe.Pointer
   345  	// Below are out-args from callbackWrap
   346  	result uintptr
   347  	retPop uintptr // For 386 cdecl, how many bytes to pop on return
   348  }
   349  
   350  // callbackWrap is called by callbackasm to invoke a registered C callback.
   351  func callbackWrap(a *callbackArgs) {
   352  	c := cbs.ctxt[a.index]
   353  	a.retPop = c.retPop
   354  
   355  	// Convert from C to Go ABI.
   356  	var regs abi.RegArgs
   357  	var frame [callbackMaxFrame]byte
   358  	goArgs := unsafe.Pointer(&frame)
   359  	for _, part := range c.abiMap.parts {
   360  		switch part.kind {
   361  		case abiPartStack:
   362  			memmove(add(goArgs, part.dstStackOffset), add(a.args, part.srcStackOffset), part.len)
   363  		case abiPartReg:
   364  			goReg := unsafe.Pointer(&regs.Ints[part.dstRegister])
   365  			memmove(goReg, add(a.args, part.srcStackOffset), part.len)
   366  		default:
   367  			panic("bad ABI description")
   368  		}
   369  	}
   370  
   371  	// TODO(mknyszek): Remove this when we no longer have
   372  	// caller reserved spill space.
   373  	frameSize := alignUp(c.abiMap.dstStackSize, sys.PtrSize)
   374  	frameSize += c.abiMap.dstSpill
   375  
   376  	// Even though this is copying back results, we can pass a nil
   377  	// type because those results must not require write barriers.
   378  	reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.abiMap.dstStackSize), uint32(c.abiMap.retOffset), uint32(frameSize), &regs)
   379  
   380  	// Extract the result.
   381  	//
   382  	// There's always exactly one return value, one pointer in size.
   383  	// If it's on the stack, then we will have reserved space for it
   384  	// at the end of the frame, otherwise it was passed in a register.
   385  	if c.abiMap.dstStackSize != c.abiMap.retOffset {
   386  		a.result = *(*uintptr)(unsafe.Pointer(&frame[c.abiMap.retOffset]))
   387  	} else {
   388  		var zero int
   389  		// On architectures with no registers, Ints[0] would be a compile error,
   390  		// so we use a dynamic index. These architectures will never take this
   391  		// branch, so this won't cause a runtime panic.
   392  		a.result = regs.Ints[zero]
   393  	}
   394  }
   395  
   396  const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
   397  
   398  // When available, this function will use LoadLibraryEx with the filename
   399  // parameter and the important SEARCH_SYSTEM32 argument. But on systems that
   400  // do not have that option, absoluteFilepath should contain a fallback
   401  // to the full path inside of system32 for use with vanilla LoadLibrary.
   402  //go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary
   403  //go:nosplit
   404  //go:cgo_unsafe_args
   405  func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle, err uintptr) {
   406  	lockOSThread()
   407  	c := &getg().m.syscall
   408  
   409  	if useLoadLibraryEx {
   410  		c.fn = getLoadLibraryEx()
   411  		c.n = 3
   412  		args := struct {
   413  			lpFileName *uint16
   414  			hFile      uintptr // always 0
   415  			flags      uint32
   416  		}{filename, 0, _LOAD_LIBRARY_SEARCH_SYSTEM32}
   417  		c.args = uintptr(noescape(unsafe.Pointer(&args)))
   418  	} else {
   419  		c.fn = getLoadLibrary()
   420  		c.n = 1
   421  		c.args = uintptr(noescape(unsafe.Pointer(&absoluteFilepath)))
   422  	}
   423  
   424  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   425  	handle = c.r1
   426  	if handle == 0 {
   427  		err = c.err
   428  	}
   429  	unlockOSThread() // not defer'd after the lockOSThread above to save stack frame size.
   430  	return
   431  }
   432  
   433  //go:linkname syscall_loadlibrary syscall.loadlibrary
   434  //go:nosplit
   435  //go:cgo_unsafe_args
   436  func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
   437  	lockOSThread()
   438  	defer unlockOSThread()
   439  	c := &getg().m.syscall
   440  	c.fn = getLoadLibrary()
   441  	c.n = 1
   442  	c.args = uintptr(noescape(unsafe.Pointer(&filename)))
   443  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   444  	handle = c.r1
   445  	if handle == 0 {
   446  		err = c.err
   447  	}
   448  	return
   449  }
   450  
   451  //go:linkname syscall_getprocaddress syscall.getprocaddress
   452  //go:nosplit
   453  //go:cgo_unsafe_args
   454  func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) {
   455  	lockOSThread()
   456  	defer unlockOSThread()
   457  	c := &getg().m.syscall
   458  	c.fn = getGetProcAddress()
   459  	c.n = 2
   460  	c.args = uintptr(noescape(unsafe.Pointer(&handle)))
   461  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   462  	outhandle = c.r1
   463  	if outhandle == 0 {
   464  		err = c.err
   465  	}
   466  	return
   467  }
   468  
   469  //go:linkname syscall_Syscall syscall.Syscall
   470  //go:nosplit
   471  //go:cgo_unsafe_args
   472  func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
   473  	lockOSThread()
   474  	defer unlockOSThread()
   475  	c := &getg().m.syscall
   476  	c.fn = fn
   477  	c.n = nargs
   478  	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
   479  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   480  	return c.r1, c.r2, c.err
   481  }
   482  
   483  //go:linkname syscall_Syscall6 syscall.Syscall6
   484  //go:nosplit
   485  //go:cgo_unsafe_args
   486  func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
   487  	lockOSThread()
   488  	defer unlockOSThread()
   489  	c := &getg().m.syscall
   490  	c.fn = fn
   491  	c.n = nargs
   492  	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
   493  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   494  	return c.r1, c.r2, c.err
   495  }
   496  
   497  //go:linkname syscall_Syscall9 syscall.Syscall9
   498  //go:nosplit
   499  //go:cgo_unsafe_args
   500  func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
   501  	lockOSThread()
   502  	c := &getg().m.syscall
   503  	c.fn = fn
   504  	c.n = nargs
   505  	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
   506  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   507  	unlockOSThread()
   508  	return c.r1, c.r2, c.err
   509  }
   510  
   511  //go:linkname syscall_Syscall12 syscall.Syscall12
   512  //go:nosplit
   513  //go:cgo_unsafe_args
   514  func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) {
   515  	lockOSThread()
   516  	c := &getg().m.syscall
   517  	c.fn = fn
   518  	c.n = nargs
   519  	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
   520  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   521  	unlockOSThread()
   522  	return c.r1, c.r2, c.err
   523  }
   524  
   525  //go:linkname syscall_Syscall15 syscall.Syscall15
   526  //go:nosplit
   527  //go:cgo_unsafe_args
   528  func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
   529  	lockOSThread()
   530  	c := &getg().m.syscall
   531  	c.fn = fn
   532  	c.n = nargs
   533  	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
   534  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   535  	unlockOSThread()
   536  	return c.r1, c.r2, c.err
   537  }
   538  
   539  //go:linkname syscall_Syscall18 syscall.Syscall18
   540  //go:nosplit
   541  //go:cgo_unsafe_args
   542  func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) {
   543  	lockOSThread()
   544  	c := &getg().m.syscall
   545  	c.fn = fn
   546  	c.n = nargs
   547  	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
   548  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   549  	unlockOSThread()
   550  	return c.r1, c.r2, c.err
   551  }
   552  

View as plain text