Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/rwmutex_test.go

Documentation: runtime

     1  // Copyright 2017 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // GOMAXPROCS=10 go test
     6  
     7  // This is a copy of sync/rwmutex_test.go rewritten to test the
     8  // runtime rwmutex.
     9  
    10  package runtime_test
    11  
    12  import (
    13  	"fmt"
    14  	. "runtime"
    15  	"runtime/debug"
    16  	"sync/atomic"
    17  	"testing"
    18  )
    19  
    20  func parallelReader(m *RWMutex, clocked chan bool, cunlock *uint32, cdone chan bool) {
    21  	m.RLock()
    22  	clocked <- true
    23  	for atomic.LoadUint32(cunlock) == 0 {
    24  	}
    25  	m.RUnlock()
    26  	cdone <- true
    27  }
    28  
    29  func doTestParallelReaders(numReaders int) {
    30  	GOMAXPROCS(numReaders + 1)
    31  	var m RWMutex
    32  	clocked := make(chan bool, numReaders)
    33  	var cunlock uint32
    34  	cdone := make(chan bool)
    35  	for i := 0; i < numReaders; i++ {
    36  		go parallelReader(&m, clocked, &cunlock, cdone)
    37  	}
    38  	// Wait for all parallel RLock()s to succeed.
    39  	for i := 0; i < numReaders; i++ {
    40  		<-clocked
    41  	}
    42  	atomic.StoreUint32(&cunlock, 1)
    43  	// Wait for the goroutines to finish.
    44  	for i := 0; i < numReaders; i++ {
    45  		<-cdone
    46  	}
    47  }
    48  
    49  func TestParallelRWMutexReaders(t *testing.T) {
    50  	if GOARCH == "wasm" {
    51  		t.Skip("wasm has no threads yet")
    52  	}
    53  	defer GOMAXPROCS(GOMAXPROCS(-1))
    54  	// If runtime triggers a forced GC during this test then it will deadlock,
    55  	// since the goroutines can't be stopped/preempted.
    56  	// Disable GC for this test (see issue #10958).
    57  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
    58  	doTestParallelReaders(1)
    59  	doTestParallelReaders(3)
    60  	doTestParallelReaders(4)
    61  }
    62  
    63  func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
    64  	for i := 0; i < num_iterations; i++ {
    65  		rwm.RLock()
    66  		n := atomic.AddInt32(activity, 1)
    67  		if n < 1 || n >= 10000 {
    68  			panic(fmt.Sprintf("wlock(%d)\n", n))
    69  		}
    70  		for i := 0; i < 100; i++ {
    71  		}
    72  		atomic.AddInt32(activity, -1)
    73  		rwm.RUnlock()
    74  	}
    75  	cdone <- true
    76  }
    77  
    78  func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
    79  	for i := 0; i < num_iterations; i++ {
    80  		rwm.Lock()
    81  		n := atomic.AddInt32(activity, 10000)
    82  		if n != 10000 {
    83  			panic(fmt.Sprintf("wlock(%d)\n", n))
    84  		}
    85  		for i := 0; i < 100; i++ {
    86  		}
    87  		atomic.AddInt32(activity, -10000)
    88  		rwm.Unlock()
    89  	}
    90  	cdone <- true
    91  }
    92  
    93  func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
    94  	GOMAXPROCS(gomaxprocs)
    95  	// Number of active readers + 10000 * number of active writers.
    96  	var activity int32
    97  	var rwm RWMutex
    98  	cdone := make(chan bool)
    99  	go writer(&rwm, num_iterations, &activity, cdone)
   100  	var i int
   101  	for i = 0; i < numReaders/2; i++ {
   102  		go reader(&rwm, num_iterations, &activity, cdone)
   103  	}
   104  	go writer(&rwm, num_iterations, &activity, cdone)
   105  	for ; i < numReaders; i++ {
   106  		go reader(&rwm, num_iterations, &activity, cdone)
   107  	}
   108  	// Wait for the 2 writers and all readers to finish.
   109  	for i := 0; i < 2+numReaders; i++ {
   110  		<-cdone
   111  	}
   112  }
   113  
   114  func TestRWMutex(t *testing.T) {
   115  	defer GOMAXPROCS(GOMAXPROCS(-1))
   116  	n := 1000
   117  	if testing.Short() {
   118  		n = 5
   119  	}
   120  	HammerRWMutex(1, 1, n)
   121  	HammerRWMutex(1, 3, n)
   122  	HammerRWMutex(1, 10, n)
   123  	HammerRWMutex(4, 1, n)
   124  	HammerRWMutex(4, 3, n)
   125  	HammerRWMutex(4, 10, n)
   126  	HammerRWMutex(10, 1, n)
   127  	HammerRWMutex(10, 3, n)
   128  	HammerRWMutex(10, 10, n)
   129  	HammerRWMutex(10, 5, n)
   130  }
   131  
   132  func BenchmarkRWMutexUncontended(b *testing.B) {
   133  	type PaddedRWMutex struct {
   134  		RWMutex
   135  		pad [32]uint32
   136  	}
   137  	b.RunParallel(func(pb *testing.PB) {
   138  		var rwm PaddedRWMutex
   139  		for pb.Next() {
   140  			rwm.RLock()
   141  			rwm.RLock()
   142  			rwm.RUnlock()
   143  			rwm.RUnlock()
   144  			rwm.Lock()
   145  			rwm.Unlock()
   146  		}
   147  	})
   148  }
   149  
   150  func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
   151  	var rwm RWMutex
   152  	b.RunParallel(func(pb *testing.PB) {
   153  		foo := 0
   154  		for pb.Next() {
   155  			foo++
   156  			if foo%writeRatio == 0 {
   157  				rwm.Lock()
   158  				rwm.Unlock()
   159  			} else {
   160  				rwm.RLock()
   161  				for i := 0; i != localWork; i += 1 {
   162  					foo *= 2
   163  					foo /= 2
   164  				}
   165  				rwm.RUnlock()
   166  			}
   167  		}
   168  		_ = foo
   169  	})
   170  }
   171  
   172  func BenchmarkRWMutexWrite100(b *testing.B) {
   173  	benchmarkRWMutex(b, 0, 100)
   174  }
   175  
   176  func BenchmarkRWMutexWrite10(b *testing.B) {
   177  	benchmarkRWMutex(b, 0, 10)
   178  }
   179  
   180  func BenchmarkRWMutexWorkWrite100(b *testing.B) {
   181  	benchmarkRWMutex(b, 100, 100)
   182  }
   183  
   184  func BenchmarkRWMutexWorkWrite10(b *testing.B) {
   185  	benchmarkRWMutex(b, 100, 10)
   186  }
   187  

View as plain text