Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/netpoll_solaris.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"unsafe"
    10  )
    11  
    12  // Solaris runtime-integrated network poller.
    13  //
    14  // Solaris uses event ports for scalable network I/O. Event
    15  // ports are level-triggered, unlike epoll and kqueue which
    16  // can be configured in both level-triggered and edge-triggered
    17  // mode. Level triggering means we have to keep track of a few things
    18  // ourselves. After we receive an event for a file descriptor,
    19  // it's our responsibility to ask again to be notified for future
    20  // events for that descriptor. When doing this we must keep track of
    21  // what kind of events the goroutines are currently interested in,
    22  // for example a fd may be open both for reading and writing.
    23  //
    24  // A description of the high level operation of this code
    25  // follows. Networking code will get a file descriptor by some means
    26  // and will register it with the netpolling mechanism by a code path
    27  // that eventually calls runtime·netpollopen. runtime·netpollopen
    28  // calls port_associate with an empty event set. That means that we
    29  // will not receive any events at this point. The association needs
    30  // to be done at this early point because we need to process the I/O
    31  // readiness notification at some point in the future. If I/O becomes
    32  // ready when nobody is listening, when we finally care about it,
    33  // nobody will tell us anymore.
    34  //
    35  // Beside calling runtime·netpollopen, the networking code paths
    36  // will call runtime·netpollarm each time goroutines are interested
    37  // in doing network I/O. Because now we know what kind of I/O we
    38  // are interested in (reading/writing), we can call port_associate
    39  // passing the correct type of event set (POLLIN/POLLOUT). As we made
    40  // sure to have already associated the file descriptor with the port,
    41  // when we now call port_associate, we will unblock the main poller
    42  // loop (in runtime·netpoll) right away if the socket is actually
    43  // ready for I/O.
    44  //
    45  // The main poller loop runs in its own thread waiting for events
    46  // using port_getn. When an event happens, it will tell the scheduler
    47  // about it using runtime·netpollready. Besides doing this, it must
    48  // also re-associate the events that were not part of this current
    49  // notification with the file descriptor. Failing to do this would
    50  // mean each notification will prevent concurrent code using the
    51  // same file descriptor in parallel.
    52  //
    53  // The logic dealing with re-associations is encapsulated in
    54  // runtime·netpollupdate. This function takes care to associate the
    55  // descriptor only with the subset of events that were previously
    56  // part of the association, except the one that just happened. We
    57  // can't re-associate with that right away, because event ports
    58  // are level triggered so it would cause a busy loop. Instead, that
    59  // association is effected only by the runtime·netpollarm code path,
    60  // when Go code actually asks for I/O.
    61  //
    62  // The open and arming mechanisms are serialized using the lock
    63  // inside PollDesc. This is required because the netpoll loop runs
    64  // asynchronously in respect to other Go code and by the time we get
    65  // to call port_associate to update the association in the loop, the
    66  // file descriptor might have been closed and reopened already. The
    67  // lock allows runtime·netpollupdate to be called synchronously from
    68  // the loop thread while preventing other threads operating to the
    69  // same PollDesc, so once we unblock in the main loop, until we loop
    70  // again we know for sure we are always talking about the same file
    71  // descriptor and can safely access the data we want (the event set).
    72  
    73  //go:cgo_import_dynamic libc_port_create port_create "libc.so"
    74  //go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
    75  //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
    76  //go:cgo_import_dynamic libc_port_getn port_getn "libc.so"
    77  //go:cgo_import_dynamic libc_port_alert port_alert "libc.so"
    78  
    79  //go:linkname libc_port_create libc_port_create
    80  //go:linkname libc_port_associate libc_port_associate
    81  //go:linkname libc_port_dissociate libc_port_dissociate
    82  //go:linkname libc_port_getn libc_port_getn
    83  //go:linkname libc_port_alert libc_port_alert
    84  
    85  var (
    86  	libc_port_create,
    87  	libc_port_associate,
    88  	libc_port_dissociate,
    89  	libc_port_getn,
    90  	libc_port_alert libcFunc
    91  	netpollWakeSig uint32 // used to avoid duplicate calls of netpollBreak
    92  )
    93  
    94  func errno() int32 {
    95  	return *getg().m.perrno
    96  }
    97  
    98  func fcntl(fd, cmd, arg int32) int32 {
    99  	return int32(sysvicall3(&libc_fcntl, uintptr(fd), uintptr(cmd), uintptr(arg)))
   100  }
   101  
   102  func port_create() int32 {
   103  	return int32(sysvicall0(&libc_port_create))
   104  }
   105  
   106  func port_associate(port, source int32, object uintptr, events uint32, user uintptr) int32 {
   107  	return int32(sysvicall5(&libc_port_associate, uintptr(port), uintptr(source), object, uintptr(events), user))
   108  }
   109  
   110  func port_dissociate(port, source int32, object uintptr) int32 {
   111  	return int32(sysvicall3(&libc_port_dissociate, uintptr(port), uintptr(source), object))
   112  }
   113  
   114  func port_getn(port int32, evs *portevent, max uint32, nget *uint32, timeout *timespec) int32 {
   115  	return int32(sysvicall5(&libc_port_getn, uintptr(port), uintptr(unsafe.Pointer(evs)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout))))
   116  }
   117  
   118  func port_alert(port int32, flags, events uint32, user uintptr) int32 {
   119  	return int32(sysvicall4(&libc_port_alert, uintptr(port), uintptr(flags), uintptr(events), user))
   120  }
   121  
   122  var portfd int32 = -1
   123  
   124  func netpollinit() {
   125  	portfd = port_create()
   126  	if portfd >= 0 {
   127  		fcntl(portfd, _F_SETFD, _FD_CLOEXEC)
   128  		return
   129  	}
   130  
   131  	print("runtime: port_create failed (errno=", errno(), ")\n")
   132  	throw("runtime: netpollinit failed")
   133  }
   134  
   135  func netpollIsPollDescriptor(fd uintptr) bool {
   136  	return fd == uintptr(portfd)
   137  }
   138  
   139  func netpollopen(fd uintptr, pd *pollDesc) int32 {
   140  	lock(&pd.lock)
   141  	// We don't register for any specific type of events yet, that's
   142  	// netpollarm's job. We merely ensure we call port_associate before
   143  	// asynchronous connect/accept completes, so when we actually want
   144  	// to do any I/O, the call to port_associate (from netpollarm,
   145  	// with the interested event set) will unblock port_getn right away
   146  	// because of the I/O readiness notification.
   147  	pd.user = 0
   148  	r := port_associate(portfd, _PORT_SOURCE_FD, fd, 0, uintptr(unsafe.Pointer(pd)))
   149  	unlock(&pd.lock)
   150  	return r
   151  }
   152  
   153  func netpollclose(fd uintptr) int32 {
   154  	return port_dissociate(portfd, _PORT_SOURCE_FD, fd)
   155  }
   156  
   157  // Updates the association with a new set of interested events. After
   158  // this call, port_getn will return one and only one event for that
   159  // particular descriptor, so this function needs to be called again.
   160  func netpollupdate(pd *pollDesc, set, clear uint32) {
   161  	if pd.closing {
   162  		return
   163  	}
   164  
   165  	old := pd.user
   166  	events := (old & ^clear) | set
   167  	if old == events {
   168  		return
   169  	}
   170  
   171  	if events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {
   172  		print("runtime: port_associate failed (errno=", errno(), ")\n")
   173  		throw("runtime: netpollupdate failed")
   174  	}
   175  	pd.user = events
   176  }
   177  
   178  // subscribe the fd to the port such that port_getn will return one event.
   179  func netpollarm(pd *pollDesc, mode int) {
   180  	lock(&pd.lock)
   181  	switch mode {
   182  	case 'r':
   183  		netpollupdate(pd, _POLLIN, 0)
   184  	case 'w':
   185  		netpollupdate(pd, _POLLOUT, 0)
   186  	default:
   187  		throw("runtime: bad mode")
   188  	}
   189  	unlock(&pd.lock)
   190  }
   191  
   192  // netpollBreak interrupts a port_getn wait.
   193  func netpollBreak() {
   194  	if atomic.Cas(&netpollWakeSig, 0, 1) {
   195  		// Use port_alert to put portfd into alert mode.
   196  		// This will wake up all threads sleeping in port_getn on portfd,
   197  		// and cause their calls to port_getn to return immediately.
   198  		// Further, until portfd is taken out of alert mode,
   199  		// all calls to port_getn will return immediately.
   200  		if port_alert(portfd, _PORT_ALERT_UPDATE, _POLLHUP, uintptr(unsafe.Pointer(&portfd))) < 0 {
   201  			if e := errno(); e != _EBUSY {
   202  				println("runtime: port_alert failed with", e)
   203  				throw("runtime: netpoll: port_alert failed")
   204  			}
   205  		}
   206  	}
   207  }
   208  
   209  // netpoll checks for ready network connections.
   210  // Returns list of goroutines that become runnable.
   211  // delay < 0: blocks indefinitely
   212  // delay == 0: does not block, just polls
   213  // delay > 0: block for up to that many nanoseconds
   214  func netpoll(delay int64) gList {
   215  	if portfd == -1 {
   216  		return gList{}
   217  	}
   218  
   219  	var wait *timespec
   220  	var ts timespec
   221  	if delay < 0 {
   222  		wait = nil
   223  	} else if delay == 0 {
   224  		wait = &ts
   225  	} else {
   226  		ts.setNsec(delay)
   227  		if ts.tv_sec > 1e6 {
   228  			// An arbitrary cap on how long to wait for a timer.
   229  			// 1e6 s == ~11.5 days.
   230  			ts.tv_sec = 1e6
   231  		}
   232  		wait = &ts
   233  	}
   234  
   235  	var events [128]portevent
   236  retry:
   237  	var n uint32 = 1
   238  	r := port_getn(portfd, &events[0], uint32(len(events)), &n, wait)
   239  	e := errno()
   240  	if r < 0 && e == _ETIME && n > 0 {
   241  		// As per port_getn(3C), an ETIME failure does not preclude the
   242  		// delivery of some number of events.  Treat a timeout failure
   243  		// with delivered events as a success.
   244  		r = 0
   245  	}
   246  	if r < 0 {
   247  		if e != _EINTR && e != _ETIME {
   248  			print("runtime: port_getn on fd ", portfd, " failed (errno=", e, ")\n")
   249  			throw("runtime: netpoll failed")
   250  		}
   251  		// If a timed sleep was interrupted and there are no events,
   252  		// just return to recalculate how long we should sleep now.
   253  		if delay > 0 {
   254  			return gList{}
   255  		}
   256  		goto retry
   257  	}
   258  
   259  	var toRun gList
   260  	for i := 0; i < int(n); i++ {
   261  		ev := &events[i]
   262  
   263  		if ev.portev_source == _PORT_SOURCE_ALERT {
   264  			if ev.portev_events != _POLLHUP || unsafe.Pointer(ev.portev_user) != unsafe.Pointer(&portfd) {
   265  				throw("runtime: netpoll: bad port_alert wakeup")
   266  			}
   267  			if delay != 0 {
   268  				// Now that a blocking call to netpoll
   269  				// has seen the alert, take portfd
   270  				// back out of alert mode.
   271  				// See the comment in netpollBreak.
   272  				if port_alert(portfd, 0, 0, 0) < 0 {
   273  					e := errno()
   274  					println("runtime: port_alert failed with", e)
   275  					throw("runtime: netpoll: port_alert failed")
   276  				}
   277  				atomic.Store(&netpollWakeSig, 0)
   278  			}
   279  			continue
   280  		}
   281  
   282  		if ev.portev_events == 0 {
   283  			continue
   284  		}
   285  		pd := (*pollDesc)(unsafe.Pointer(ev.portev_user))
   286  
   287  		var mode, clear int32
   288  		if (ev.portev_events & (_POLLIN | _POLLHUP | _POLLERR)) != 0 {
   289  			mode += 'r'
   290  			clear |= _POLLIN
   291  		}
   292  		if (ev.portev_events & (_POLLOUT | _POLLHUP | _POLLERR)) != 0 {
   293  			mode += 'w'
   294  			clear |= _POLLOUT
   295  		}
   296  		// To effect edge-triggered events, we need to be sure to
   297  		// update our association with whatever events were not
   298  		// set with the event. For example if we are registered
   299  		// for POLLIN|POLLOUT, and we get POLLIN, besides waking
   300  		// the goroutine interested in POLLIN we have to not forget
   301  		// about the one interested in POLLOUT.
   302  		if clear != 0 {
   303  			lock(&pd.lock)
   304  			netpollupdate(pd, 0, uint32(clear))
   305  			unlock(&pd.lock)
   306  		}
   307  
   308  		if mode != 0 {
   309  			// TODO(mikio): Consider implementing event
   310  			// scanning error reporting once we are sure
   311  			// about the event port on SmartOS.
   312  			//
   313  			// See golang.org/x/issue/30840.
   314  			netpollready(&toRun, pd, mode)
   315  		}
   316  	}
   317  
   318  	return toRun
   319  }
   320  

View as plain text