Black Lives Matter. Support the Equal Justice Initiative.

Source file src/time/sleep_test.go

Documentation: time

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package time_test
     6  
     7  import (
     8  	"errors"
     9  	"fmt"
    10  	"runtime"
    11  	"strings"
    12  	"sync"
    13  	"sync/atomic"
    14  	"testing"
    15  	. "time"
    16  )
    17  
    18  // Go runtime uses different Windows timers for time.Now and sleeping.
    19  // These can tick at different frequencies and can arrive out of sync.
    20  // The effect can be seen, for example, as time.Sleep(100ms) is actually
    21  // shorter then 100ms when measured as difference between time.Now before and
    22  // after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
    23  // windowsInaccuracy is to ignore such errors.
    24  const windowsInaccuracy = 17 * Millisecond
    25  
    26  func TestSleep(t *testing.T) {
    27  	const delay = 100 * Millisecond
    28  	go func() {
    29  		Sleep(delay / 2)
    30  		Interrupt()
    31  	}()
    32  	start := Now()
    33  	Sleep(delay)
    34  	delayadj := delay
    35  	if runtime.GOOS == "windows" {
    36  		delayadj -= windowsInaccuracy
    37  	}
    38  	duration := Now().Sub(start)
    39  	if duration < delayadj {
    40  		t.Fatalf("Sleep(%s) slept for only %s", delay, duration)
    41  	}
    42  }
    43  
    44  // Test the basic function calling behavior. Correct queueing
    45  // behavior is tested elsewhere, since After and AfterFunc share
    46  // the same code.
    47  func TestAfterFunc(t *testing.T) {
    48  	i := 10
    49  	c := make(chan bool)
    50  	var f func()
    51  	f = func() {
    52  		i--
    53  		if i >= 0 {
    54  			AfterFunc(0, f)
    55  			Sleep(1 * Second)
    56  		} else {
    57  			c <- true
    58  		}
    59  	}
    60  
    61  	AfterFunc(0, f)
    62  	<-c
    63  }
    64  
    65  func TestAfterStress(t *testing.T) {
    66  	stop := uint32(0)
    67  	go func() {
    68  		for atomic.LoadUint32(&stop) == 0 {
    69  			runtime.GC()
    70  			// Yield so that the OS can wake up the timer thread,
    71  			// so that it can generate channel sends for the main goroutine,
    72  			// which will eventually set stop = 1 for us.
    73  			Sleep(Nanosecond)
    74  		}
    75  	}()
    76  	ticker := NewTicker(1)
    77  	for i := 0; i < 100; i++ {
    78  		<-ticker.C
    79  	}
    80  	ticker.Stop()
    81  	atomic.StoreUint32(&stop, 1)
    82  }
    83  
    84  func benchmark(b *testing.B, bench func(n int)) {
    85  
    86  	// Create equal number of garbage timers on each P before starting
    87  	// the benchmark.
    88  	var wg sync.WaitGroup
    89  	garbageAll := make([][]*Timer, runtime.GOMAXPROCS(0))
    90  	for i := range garbageAll {
    91  		wg.Add(1)
    92  		go func(i int) {
    93  			defer wg.Done()
    94  			garbage := make([]*Timer, 1<<15)
    95  			for j := range garbage {
    96  				garbage[j] = AfterFunc(Hour, nil)
    97  			}
    98  			garbageAll[i] = garbage
    99  		}(i)
   100  	}
   101  	wg.Wait()
   102  
   103  	b.ResetTimer()
   104  	b.RunParallel(func(pb *testing.PB) {
   105  		for pb.Next() {
   106  			bench(1000)
   107  		}
   108  	})
   109  	b.StopTimer()
   110  
   111  	for _, garbage := range garbageAll {
   112  		for _, t := range garbage {
   113  			t.Stop()
   114  		}
   115  	}
   116  }
   117  
   118  func BenchmarkAfterFunc(b *testing.B) {
   119  	benchmark(b, func(n int) {
   120  		c := make(chan bool)
   121  		var f func()
   122  		f = func() {
   123  			n--
   124  			if n >= 0 {
   125  				AfterFunc(0, f)
   126  			} else {
   127  				c <- true
   128  			}
   129  		}
   130  
   131  		AfterFunc(0, f)
   132  		<-c
   133  	})
   134  }
   135  
   136  func BenchmarkAfter(b *testing.B) {
   137  	benchmark(b, func(n int) {
   138  		for i := 0; i < n; i++ {
   139  			<-After(1)
   140  		}
   141  	})
   142  }
   143  
   144  func BenchmarkStop(b *testing.B) {
   145  	benchmark(b, func(n int) {
   146  		for i := 0; i < n; i++ {
   147  			NewTimer(1 * Second).Stop()
   148  		}
   149  	})
   150  }
   151  
   152  func BenchmarkSimultaneousAfterFunc(b *testing.B) {
   153  	benchmark(b, func(n int) {
   154  		var wg sync.WaitGroup
   155  		wg.Add(n)
   156  		for i := 0; i < n; i++ {
   157  			AfterFunc(0, wg.Done)
   158  		}
   159  		wg.Wait()
   160  	})
   161  }
   162  
   163  func BenchmarkStartStop(b *testing.B) {
   164  	benchmark(b, func(n int) {
   165  		timers := make([]*Timer, n)
   166  		for i := 0; i < n; i++ {
   167  			timers[i] = AfterFunc(Hour, nil)
   168  		}
   169  
   170  		for i := 0; i < n; i++ {
   171  			timers[i].Stop()
   172  		}
   173  	})
   174  }
   175  
   176  func BenchmarkReset(b *testing.B) {
   177  	benchmark(b, func(n int) {
   178  		t := NewTimer(Hour)
   179  		for i := 0; i < n; i++ {
   180  			t.Reset(Hour)
   181  		}
   182  		t.Stop()
   183  	})
   184  }
   185  
   186  func BenchmarkSleep(b *testing.B) {
   187  	benchmark(b, func(n int) {
   188  		var wg sync.WaitGroup
   189  		wg.Add(n)
   190  		for i := 0; i < n; i++ {
   191  			go func() {
   192  				Sleep(Nanosecond)
   193  				wg.Done()
   194  			}()
   195  		}
   196  		wg.Wait()
   197  	})
   198  }
   199  
   200  func TestAfter(t *testing.T) {
   201  	const delay = 100 * Millisecond
   202  	start := Now()
   203  	end := <-After(delay)
   204  	delayadj := delay
   205  	if runtime.GOOS == "windows" {
   206  		delayadj -= windowsInaccuracy
   207  	}
   208  	if duration := Now().Sub(start); duration < delayadj {
   209  		t.Fatalf("After(%s) slept for only %d ns", delay, duration)
   210  	}
   211  	if min := start.Add(delayadj); end.Before(min) {
   212  		t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
   213  	}
   214  }
   215  
   216  func TestAfterTick(t *testing.T) {
   217  	const Count = 10
   218  	Delta := 100 * Millisecond
   219  	if testing.Short() {
   220  		Delta = 10 * Millisecond
   221  	}
   222  	t0 := Now()
   223  	for i := 0; i < Count; i++ {
   224  		<-After(Delta)
   225  	}
   226  	t1 := Now()
   227  	d := t1.Sub(t0)
   228  	target := Delta * Count
   229  	if d < target*9/10 {
   230  		t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
   231  	}
   232  	if !testing.Short() && d > target*30/10 {
   233  		t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
   234  	}
   235  }
   236  
   237  func TestAfterStop(t *testing.T) {
   238  	// We want to test that we stop a timer before it runs.
   239  	// We also want to test that it didn't run after a longer timer.
   240  	// Since we don't want the test to run for too long, we don't
   241  	// want to use lengthy times. That makes the test inherently flaky.
   242  	// So only report an error if it fails five times in a row.
   243  
   244  	var errs []string
   245  	logErrs := func() {
   246  		for _, e := range errs {
   247  			t.Log(e)
   248  		}
   249  	}
   250  
   251  	for i := 0; i < 5; i++ {
   252  		AfterFunc(100*Millisecond, func() {})
   253  		t0 := NewTimer(50 * Millisecond)
   254  		c1 := make(chan bool, 1)
   255  		t1 := AfterFunc(150*Millisecond, func() { c1 <- true })
   256  		c2 := After(200 * Millisecond)
   257  		if !t0.Stop() {
   258  			errs = append(errs, "failed to stop event 0")
   259  			continue
   260  		}
   261  		if !t1.Stop() {
   262  			errs = append(errs, "failed to stop event 1")
   263  			continue
   264  		}
   265  		<-c2
   266  		select {
   267  		case <-t0.C:
   268  			errs = append(errs, "event 0 was not stopped")
   269  			continue
   270  		case <-c1:
   271  			errs = append(errs, "event 1 was not stopped")
   272  			continue
   273  		default:
   274  		}
   275  		if t1.Stop() {
   276  			errs = append(errs, "Stop returned true twice")
   277  			continue
   278  		}
   279  
   280  		// Test passed, so all done.
   281  		if len(errs) > 0 {
   282  			t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs))
   283  			logErrs()
   284  		}
   285  
   286  		return
   287  	}
   288  
   289  	t.Errorf("saw %d errors", len(errs))
   290  	logErrs()
   291  }
   292  
   293  func TestAfterQueuing(t *testing.T) {
   294  	// This test flakes out on some systems,
   295  	// so we'll try it a few times before declaring it a failure.
   296  	const attempts = 5
   297  	err := errors.New("!=nil")
   298  	for i := 0; i < attempts && err != nil; i++ {
   299  		delta := Duration(20+i*50) * Millisecond
   300  		if err = testAfterQueuing(delta); err != nil {
   301  			t.Logf("attempt %v failed: %v", i, err)
   302  		}
   303  	}
   304  	if err != nil {
   305  		t.Fatal(err)
   306  	}
   307  }
   308  
   309  var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, 0}
   310  
   311  type afterResult struct {
   312  	slot int
   313  	t    Time
   314  }
   315  
   316  func await(slot int, result chan<- afterResult, ac <-chan Time) {
   317  	result <- afterResult{slot, <-ac}
   318  }
   319  
   320  func testAfterQueuing(delta Duration) error {
   321  	// make the result channel buffered because we don't want
   322  	// to depend on channel queueing semantics that might
   323  	// possibly change in the future.
   324  	result := make(chan afterResult, len(slots))
   325  
   326  	t0 := Now()
   327  	for _, slot := range slots {
   328  		go await(slot, result, After(Duration(slot)*delta))
   329  	}
   330  	var order []int
   331  	var times []Time
   332  	for range slots {
   333  		r := <-result
   334  		order = append(order, r.slot)
   335  		times = append(times, r.t)
   336  	}
   337  	for i := range order {
   338  		if i > 0 && order[i] < order[i-1] {
   339  			return fmt.Errorf("After calls returned out of order: %v", order)
   340  		}
   341  	}
   342  	for i, t := range times {
   343  		dt := t.Sub(t0)
   344  		target := Duration(order[i]) * delta
   345  		if dt < target-delta/2 || dt > target+delta*10 {
   346  			return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
   347  		}
   348  	}
   349  	return nil
   350  }
   351  
   352  func TestTimerStopStress(t *testing.T) {
   353  	if testing.Short() {
   354  		return
   355  	}
   356  	for i := 0; i < 100; i++ {
   357  		go func(i int) {
   358  			timer := AfterFunc(2*Second, func() {
   359  				t.Errorf("timer %d was not stopped", i)
   360  			})
   361  			Sleep(1 * Second)
   362  			timer.Stop()
   363  		}(i)
   364  	}
   365  	Sleep(3 * Second)
   366  }
   367  
   368  func TestSleepZeroDeadlock(t *testing.T) {
   369  	// Sleep(0) used to hang, the sequence of events was as follows.
   370  	// Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
   371  	// Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
   372  	// After the GC nobody wakes up the goroutine from Gwaiting status.
   373  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
   374  	c := make(chan bool)
   375  	go func() {
   376  		for i := 0; i < 100; i++ {
   377  			runtime.GC()
   378  		}
   379  		c <- true
   380  	}()
   381  	for i := 0; i < 100; i++ {
   382  		Sleep(0)
   383  		tmp := make(chan bool, 1)
   384  		tmp <- true
   385  		<-tmp
   386  	}
   387  	<-c
   388  }
   389  
   390  func testReset(d Duration) error {
   391  	t0 := NewTimer(2 * d)
   392  	Sleep(d)
   393  	if !t0.Reset(3 * d) {
   394  		return errors.New("resetting unfired timer returned false")
   395  	}
   396  	Sleep(2 * d)
   397  	select {
   398  	case <-t0.C:
   399  		return errors.New("timer fired early")
   400  	default:
   401  	}
   402  	Sleep(2 * d)
   403  	select {
   404  	case <-t0.C:
   405  	default:
   406  		return errors.New("reset timer did not fire")
   407  	}
   408  
   409  	if t0.Reset(50 * Millisecond) {
   410  		return errors.New("resetting expired timer returned true")
   411  	}
   412  	return nil
   413  }
   414  
   415  func TestReset(t *testing.T) {
   416  	// We try to run this test with increasingly larger multiples
   417  	// until one works so slow, loaded hardware isn't as flaky,
   418  	// but without slowing down fast machines unnecessarily.
   419  	const unit = 25 * Millisecond
   420  	tries := []Duration{
   421  		1 * unit,
   422  		3 * unit,
   423  		7 * unit,
   424  		15 * unit,
   425  	}
   426  	var err error
   427  	for _, d := range tries {
   428  		err = testReset(d)
   429  		if err == nil {
   430  			t.Logf("passed using duration %v", d)
   431  			return
   432  		}
   433  	}
   434  	t.Error(err)
   435  }
   436  
   437  // Test that sleeping (via Sleep or Timer) for an interval so large it
   438  // overflows does not result in a short sleep duration. Nor does it interfere
   439  // with execution of other timers. If it does, timers in this or subsequent
   440  // tests may not fire.
   441  func TestOverflowSleep(t *testing.T) {
   442  	const big = Duration(int64(1<<63 - 1))
   443  
   444  	go func() {
   445  		Sleep(big)
   446  		// On failure, this may return after the test has completed, so
   447  		// we need to panic instead.
   448  		panic("big sleep returned")
   449  	}()
   450  
   451  	select {
   452  	case <-After(big):
   453  		t.Fatalf("big timeout fired")
   454  	case <-After(25 * Millisecond):
   455  		// OK
   456  	}
   457  
   458  	const neg = Duration(-1 << 63)
   459  	Sleep(neg) // Returns immediately.
   460  	select {
   461  	case <-After(neg):
   462  		// OK
   463  	case <-After(1 * Second):
   464  		t.Fatalf("negative timeout didn't fire")
   465  	}
   466  }
   467  
   468  // Test that a panic while deleting a timer does not leave
   469  // the timers mutex held, deadlocking a ticker.Stop in a defer.
   470  func TestIssue5745(t *testing.T) {
   471  	ticker := NewTicker(Hour)
   472  	defer func() {
   473  		// would deadlock here before the fix due to
   474  		// lock taken before the segfault.
   475  		ticker.Stop()
   476  
   477  		if r := recover(); r == nil {
   478  			t.Error("Expected panic, but none happened.")
   479  		}
   480  	}()
   481  
   482  	// cause a panic due to a segfault
   483  	var timer *Timer
   484  	timer.Stop()
   485  	t.Error("Should be unreachable.")
   486  }
   487  
   488  func TestOverflowPeriodRuntimeTimer(t *testing.T) {
   489  	// This may hang forever if timers are broken. See comment near
   490  	// the end of CheckRuntimeTimerOverflow in internal_test.go.
   491  	CheckRuntimeTimerPeriodOverflow()
   492  }
   493  
   494  func checkZeroPanicString(t *testing.T) {
   495  	e := recover()
   496  	s, _ := e.(string)
   497  	if want := "called on uninitialized Timer"; !strings.Contains(s, want) {
   498  		t.Errorf("panic = %v; want substring %q", e, want)
   499  	}
   500  }
   501  
   502  func TestZeroTimerResetPanics(t *testing.T) {
   503  	defer checkZeroPanicString(t)
   504  	var tr Timer
   505  	tr.Reset(1)
   506  }
   507  
   508  func TestZeroTimerStopPanics(t *testing.T) {
   509  	defer checkZeroPanicString(t)
   510  	var tr Timer
   511  	tr.Stop()
   512  }
   513  
   514  // Test that zero duration timers aren't missed by the scheduler. Regression test for issue 44868.
   515  func TestZeroTimer(t *testing.T) {
   516  	if testing.Short() {
   517  		t.Skip("-short")
   518  	}
   519  
   520  	for i := 0; i < 1000000; i++ {
   521  		s := Now()
   522  		ti := NewTimer(0)
   523  		<-ti.C
   524  		if diff := Since(s); diff > 2*Second {
   525  			t.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff)
   526  		}
   527  	}
   528  }
   529  
   530  // Test that rapidly moving a timer earlier doesn't cause it to get dropped.
   531  // Issue 47329.
   532  func TestTimerModifiedEarlier(t *testing.T) {
   533  	past := Until(Unix(0, 0))
   534  	count := 1000
   535  	fail := 0
   536  	for i := 0; i < count; i++ {
   537  		timer := NewTimer(Hour)
   538  		for j := 0; j < 10; j++ {
   539  			if !timer.Stop() {
   540  				<-timer.C
   541  			}
   542  			timer.Reset(past)
   543  		}
   544  
   545  		deadline := NewTimer(10 * Second)
   546  		defer deadline.Stop()
   547  		now := Now()
   548  		select {
   549  		case <-timer.C:
   550  			if since := Since(now); since > 8*Second {
   551  				t.Errorf("timer took too long (%v)", since)
   552  				fail++
   553  			}
   554  		case <-deadline.C:
   555  			t.Error("deadline expired")
   556  		}
   557  	}
   558  
   559  	if fail > 0 {
   560  		t.Errorf("%d failures", fail)
   561  	}
   562  }
   563  
   564  // Benchmark timer latency when the thread that creates the timer is busy with
   565  // other work and the timers must be serviced by other threads.
   566  // https://golang.org/issue/38860
   567  func BenchmarkParallelTimerLatency(b *testing.B) {
   568  	gmp := runtime.GOMAXPROCS(0)
   569  	if gmp < 2 || runtime.NumCPU() < gmp {
   570  		b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
   571  	}
   572  
   573  	// allocate memory now to avoid GC interference later.
   574  	timerCount := gmp - 1
   575  	stats := make([]struct {
   576  		sum   float64
   577  		max   Duration
   578  		count int64
   579  		_     [5]int64 // cache line padding
   580  	}, timerCount)
   581  
   582  	// Ensure the time to start new threads to service timers will not pollute
   583  	// the results.
   584  	warmupScheduler(gmp)
   585  
   586  	// Note that other than the AfterFunc calls this benchmark is measuring it
   587  	// avoids using any other timers. In particular, the main goroutine uses
   588  	// doWork to spin for some durations because up through Go 1.15 if all
   589  	// threads are idle sysmon could leave deep sleep when we wake.
   590  
   591  	// Ensure sysmon is in deep sleep.
   592  	doWork(30 * Millisecond)
   593  
   594  	b.ResetTimer()
   595  
   596  	const delay = Millisecond
   597  	var wg sync.WaitGroup
   598  	var count int32
   599  	for i := 0; i < b.N; i++ {
   600  		wg.Add(timerCount)
   601  		atomic.StoreInt32(&count, 0)
   602  		for j := 0; j < timerCount; j++ {
   603  			j := j
   604  			expectedWakeup := Now().Add(delay)
   605  			AfterFunc(delay, func() {
   606  				late := Since(expectedWakeup)
   607  				if late < 0 {
   608  					late = 0
   609  				}
   610  				stats[j].count++
   611  				stats[j].sum += float64(late.Nanoseconds())
   612  				if late > stats[j].max {
   613  					stats[j].max = late
   614  				}
   615  				atomic.AddInt32(&count, 1)
   616  				for atomic.LoadInt32(&count) < int32(timerCount) {
   617  					// spin until all timers fired
   618  				}
   619  				wg.Done()
   620  			})
   621  		}
   622  
   623  		for atomic.LoadInt32(&count) < int32(timerCount) {
   624  			// spin until all timers fired
   625  		}
   626  		wg.Wait()
   627  
   628  		// Spin for a bit to let the other scheduler threads go idle before the
   629  		// next round.
   630  		doWork(Millisecond)
   631  	}
   632  	var total float64
   633  	var samples float64
   634  	max := Duration(0)
   635  	for _, s := range stats {
   636  		if s.max > max {
   637  			max = s.max
   638  		}
   639  		total += s.sum
   640  		samples += float64(s.count)
   641  	}
   642  	b.ReportMetric(0, "ns/op")
   643  	b.ReportMetric(total/samples, "avg-late-ns")
   644  	b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
   645  }
   646  
   647  // Benchmark timer latency with staggered wakeup times and varying CPU bound
   648  // workloads. https://golang.org/issue/38860
   649  func BenchmarkStaggeredTickerLatency(b *testing.B) {
   650  	gmp := runtime.GOMAXPROCS(0)
   651  	if gmp < 2 || runtime.NumCPU() < gmp {
   652  		b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
   653  	}
   654  
   655  	const delay = 3 * Millisecond
   656  
   657  	for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} {
   658  		b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) {
   659  			for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ {
   660  				tickerCount := gmp * tickersPerP
   661  				b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) {
   662  					// allocate memory now to avoid GC interference later.
   663  					stats := make([]struct {
   664  						sum   float64
   665  						max   Duration
   666  						count int64
   667  						_     [5]int64 // cache line padding
   668  					}, tickerCount)
   669  
   670  					// Ensure the time to start new threads to service timers
   671  					// will not pollute the results.
   672  					warmupScheduler(gmp)
   673  
   674  					b.ResetTimer()
   675  
   676  					var wg sync.WaitGroup
   677  					wg.Add(tickerCount)
   678  					for j := 0; j < tickerCount; j++ {
   679  						j := j
   680  						doWork(delay / Duration(gmp))
   681  						expectedWakeup := Now().Add(delay)
   682  						ticker := NewTicker(delay)
   683  						go func(c int, ticker *Ticker, firstWake Time) {
   684  							defer ticker.Stop()
   685  
   686  							for ; c > 0; c-- {
   687  								<-ticker.C
   688  								late := Since(expectedWakeup)
   689  								if late < 0 {
   690  									late = 0
   691  								}
   692  								stats[j].count++
   693  								stats[j].sum += float64(late.Nanoseconds())
   694  								if late > stats[j].max {
   695  									stats[j].max = late
   696  								}
   697  								expectedWakeup = expectedWakeup.Add(delay)
   698  								doWork(dur)
   699  							}
   700  							wg.Done()
   701  						}(b.N, ticker, expectedWakeup)
   702  					}
   703  					wg.Wait()
   704  
   705  					var total float64
   706  					var samples float64
   707  					max := Duration(0)
   708  					for _, s := range stats {
   709  						if s.max > max {
   710  							max = s.max
   711  						}
   712  						total += s.sum
   713  						samples += float64(s.count)
   714  					}
   715  					b.ReportMetric(0, "ns/op")
   716  					b.ReportMetric(total/samples, "avg-late-ns")
   717  					b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
   718  				})
   719  			}
   720  		})
   721  	}
   722  }
   723  
   724  // warmupScheduler ensures the scheduler has at least targetThreadCount threads
   725  // in its thread pool.
   726  func warmupScheduler(targetThreadCount int) {
   727  	var wg sync.WaitGroup
   728  	var count int32
   729  	for i := 0; i < targetThreadCount; i++ {
   730  		wg.Add(1)
   731  		go func() {
   732  			atomic.AddInt32(&count, 1)
   733  			for atomic.LoadInt32(&count) < int32(targetThreadCount) {
   734  				// spin until all threads started
   735  			}
   736  
   737  			// spin a bit more to ensure they are all running on separate CPUs.
   738  			doWork(Millisecond)
   739  			wg.Done()
   740  		}()
   741  	}
   742  	wg.Wait()
   743  }
   744  
   745  func doWork(dur Duration) {
   746  	start := Now()
   747  	for Since(start) < dur {
   748  	}
   749  }
   750  

View as plain text