Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/malloc_test.go

Documentation: runtime

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"flag"
     9  	"fmt"
    10  	"internal/race"
    11  	"internal/testenv"
    12  	"os"
    13  	"os/exec"
    14  	"reflect"
    15  	"runtime"
    16  	. "runtime"
    17  	"strings"
    18  	"sync/atomic"
    19  	"testing"
    20  	"time"
    21  	"unsafe"
    22  )
    23  
    24  var testMemStatsCount int
    25  
    26  func TestMemStats(t *testing.T) {
    27  	testMemStatsCount++
    28  
    29  	// Make sure there's at least one forced GC.
    30  	GC()
    31  
    32  	// Test that MemStats has sane values.
    33  	st := new(MemStats)
    34  	ReadMemStats(st)
    35  
    36  	nz := func(x interface{}) error {
    37  		if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
    38  			return nil
    39  		}
    40  		return fmt.Errorf("zero value")
    41  	}
    42  	le := func(thresh float64) func(interface{}) error {
    43  		return func(x interface{}) error {
    44  			// These sanity tests aren't necessarily valid
    45  			// with high -test.count values, so only run
    46  			// them once.
    47  			if testMemStatsCount > 1 {
    48  				return nil
    49  			}
    50  
    51  			if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
    52  				return nil
    53  			}
    54  			return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
    55  		}
    56  	}
    57  	eq := func(x interface{}) func(interface{}) error {
    58  		return func(y interface{}) error {
    59  			if x == y {
    60  				return nil
    61  			}
    62  			return fmt.Errorf("want %v", x)
    63  		}
    64  	}
    65  	// Of the uint fields, HeapReleased, HeapIdle can be 0.
    66  	// PauseTotalNs can be 0 if timer resolution is poor.
    67  	fields := map[string][]func(interface{}) error{
    68  		"Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
    69  		"Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
    70  		"HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
    71  		"HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
    72  		"StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
    73  		"MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
    74  		"MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
    75  		"BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
    76  		"NextGC": {nz, le(1e10)}, "LastGC": {nz},
    77  		"PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
    78  		"NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
    79  		"GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
    80  		"BySize": nil,
    81  	}
    82  
    83  	rst := reflect.ValueOf(st).Elem()
    84  	for i := 0; i < rst.Type().NumField(); i++ {
    85  		name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
    86  		checks, ok := fields[name]
    87  		if !ok {
    88  			t.Errorf("unknown MemStats field %s", name)
    89  			continue
    90  		}
    91  		for _, check := range checks {
    92  			if err := check(val); err != nil {
    93  				t.Errorf("%s = %v: %s", name, val, err)
    94  			}
    95  		}
    96  	}
    97  
    98  	if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
    99  		st.BuckHashSys+st.GCSys+st.OtherSys {
   100  		t.Fatalf("Bad sys value: %+v", *st)
   101  	}
   102  
   103  	if st.HeapIdle+st.HeapInuse != st.HeapSys {
   104  		t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
   105  	}
   106  
   107  	if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
   108  		t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
   109  	}
   110  
   111  	var pauseTotal uint64
   112  	for _, pause := range st.PauseNs {
   113  		pauseTotal += pause
   114  	}
   115  	if int(st.NumGC) < len(st.PauseNs) {
   116  		// We have all pauses, so this should be exact.
   117  		if st.PauseTotalNs != pauseTotal {
   118  			t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   119  		}
   120  		for i := int(st.NumGC); i < len(st.PauseNs); i++ {
   121  			if st.PauseNs[i] != 0 {
   122  				t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
   123  			}
   124  			if st.PauseEnd[i] != 0 {
   125  				t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
   126  			}
   127  		}
   128  	} else {
   129  		if st.PauseTotalNs < pauseTotal {
   130  			t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   131  		}
   132  	}
   133  
   134  	if st.NumForcedGC > st.NumGC {
   135  		t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
   136  	}
   137  }
   138  
   139  func TestStringConcatenationAllocs(t *testing.T) {
   140  	n := testing.AllocsPerRun(1e3, func() {
   141  		b := make([]byte, 10)
   142  		for i := 0; i < 10; i++ {
   143  			b[i] = byte(i) + '0'
   144  		}
   145  		s := "foo" + string(b)
   146  		if want := "foo0123456789"; s != want {
   147  			t.Fatalf("want %v, got %v", want, s)
   148  		}
   149  	})
   150  	// Only string concatenation allocates.
   151  	if n != 1 {
   152  		t.Fatalf("want 1 allocation, got %v", n)
   153  	}
   154  }
   155  
   156  func TestTinyAlloc(t *testing.T) {
   157  	if runtime.Raceenabled {
   158  		t.Skip("tinyalloc suppressed when running in race mode")
   159  	}
   160  	const N = 16
   161  	var v [N]unsafe.Pointer
   162  	for i := range v {
   163  		v[i] = unsafe.Pointer(new(byte))
   164  	}
   165  
   166  	chunks := make(map[uintptr]bool, N)
   167  	for _, p := range v {
   168  		chunks[uintptr(p)&^7] = true
   169  	}
   170  
   171  	if len(chunks) == N {
   172  		t.Fatal("no bytes allocated within the same 8-byte chunk")
   173  	}
   174  }
   175  
   176  var (
   177  	tinyByteSink   *byte
   178  	tinyUint32Sink *uint32
   179  	tinyObj12Sink  *obj12
   180  )
   181  
   182  type obj12 struct {
   183  	a uint64
   184  	b uint32
   185  }
   186  
   187  func TestTinyAllocIssue37262(t *testing.T) {
   188  	if runtime.Raceenabled {
   189  		t.Skip("tinyalloc suppressed when running in race mode")
   190  	}
   191  	// Try to cause an alignment access fault
   192  	// by atomically accessing the first 64-bit
   193  	// value of a tiny-allocated object.
   194  	// See issue 37262 for details.
   195  
   196  	// GC twice, once to reach a stable heap state
   197  	// and again to make sure we finish the sweep phase.
   198  	runtime.GC()
   199  	runtime.GC()
   200  
   201  	// Make 1-byte allocations until we get a fresh tiny slot.
   202  	aligned := false
   203  	for i := 0; i < 16; i++ {
   204  		tinyByteSink = new(byte)
   205  		if uintptr(unsafe.Pointer(tinyByteSink))&0xf == 0xf {
   206  			aligned = true
   207  			break
   208  		}
   209  	}
   210  	if !aligned {
   211  		t.Fatal("unable to get a fresh tiny slot")
   212  	}
   213  
   214  	// Create a 4-byte object so that the current
   215  	// tiny slot is partially filled.
   216  	tinyUint32Sink = new(uint32)
   217  
   218  	// Create a 12-byte object, which fits into the
   219  	// tiny slot. If it actually gets place there,
   220  	// then the field "a" will be improperly aligned
   221  	// for atomic access on 32-bit architectures.
   222  	// This won't be true if issue 36606 gets resolved.
   223  	tinyObj12Sink = new(obj12)
   224  
   225  	// Try to atomically access "x.a".
   226  	atomic.StoreUint64(&tinyObj12Sink.a, 10)
   227  
   228  	// Clear the sinks.
   229  	tinyByteSink = nil
   230  	tinyUint32Sink = nil
   231  	tinyObj12Sink = nil
   232  }
   233  
   234  func TestPageCacheLeak(t *testing.T) {
   235  	defer GOMAXPROCS(GOMAXPROCS(1))
   236  	leaked := PageCachePagesLeaked()
   237  	if leaked != 0 {
   238  		t.Fatalf("found %d leaked pages in page caches", leaked)
   239  	}
   240  }
   241  
   242  func TestPhysicalMemoryUtilization(t *testing.T) {
   243  	got := runTestProg(t, "testprog", "GCPhys")
   244  	want := "OK\n"
   245  	if got != want {
   246  		t.Fatalf("expected %q, but got %q", want, got)
   247  	}
   248  }
   249  
   250  func TestScavengedBitsCleared(t *testing.T) {
   251  	var mismatches [128]BitsMismatch
   252  	if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
   253  		t.Errorf("uncleared scavenged bits")
   254  		for _, m := range mismatches[:n] {
   255  			t.Logf("\t@ address 0x%x", m.Base)
   256  			t.Logf("\t|  got: %064b", m.Got)
   257  			t.Logf("\t| want: %064b", m.Want)
   258  		}
   259  		t.FailNow()
   260  	}
   261  }
   262  
   263  type acLink struct {
   264  	x [1 << 20]byte
   265  }
   266  
   267  var arenaCollisionSink []*acLink
   268  
   269  func TestArenaCollision(t *testing.T) {
   270  	testenv.MustHaveExec(t)
   271  
   272  	// Test that mheap.sysAlloc handles collisions with other
   273  	// memory mappings.
   274  	if os.Getenv("TEST_ARENA_COLLISION") != "1" {
   275  		cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestArenaCollision", "-test.v"))
   276  		cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
   277  		out, err := cmd.CombinedOutput()
   278  		if race.Enabled {
   279  			// This test runs the runtime out of hint
   280  			// addresses, so it will start mapping the
   281  			// heap wherever it can. The race detector
   282  			// doesn't support this, so look for the
   283  			// expected failure.
   284  			if want := "too many address space collisions"; !strings.Contains(string(out), want) {
   285  				t.Fatalf("want %q, got:\n%s", want, string(out))
   286  			}
   287  		} else if !strings.Contains(string(out), "PASS\n") || err != nil {
   288  			t.Fatalf("%s\n(exit status %v)", string(out), err)
   289  		}
   290  		return
   291  	}
   292  	disallowed := [][2]uintptr{}
   293  	// Drop all but the next 3 hints. 64-bit has a lot of hints,
   294  	// so it would take a lot of memory to go through all of them.
   295  	KeepNArenaHints(3)
   296  	// Consume these 3 hints and force the runtime to find some
   297  	// fallback hints.
   298  	for i := 0; i < 5; i++ {
   299  		// Reserve memory at the next hint so it can't be used
   300  		// for the heap.
   301  		start, end := MapNextArenaHint()
   302  		disallowed = append(disallowed, [2]uintptr{start, end})
   303  		// Allocate until the runtime tries to use the hint we
   304  		// just mapped over.
   305  		hint := GetNextArenaHint()
   306  		for GetNextArenaHint() == hint {
   307  			ac := new(acLink)
   308  			arenaCollisionSink = append(arenaCollisionSink, ac)
   309  			// The allocation must not have fallen into
   310  			// one of the reserved regions.
   311  			p := uintptr(unsafe.Pointer(ac))
   312  			for _, d := range disallowed {
   313  				if d[0] <= p && p < d[1] {
   314  					t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
   315  				}
   316  			}
   317  		}
   318  	}
   319  }
   320  
   321  var mallocSink uintptr
   322  
   323  func BenchmarkMalloc8(b *testing.B) {
   324  	var x uintptr
   325  	for i := 0; i < b.N; i++ {
   326  		p := new(int64)
   327  		x ^= uintptr(unsafe.Pointer(p))
   328  	}
   329  	mallocSink = x
   330  }
   331  
   332  func BenchmarkMalloc16(b *testing.B) {
   333  	var x uintptr
   334  	for i := 0; i < b.N; i++ {
   335  		p := new([2]int64)
   336  		x ^= uintptr(unsafe.Pointer(p))
   337  	}
   338  	mallocSink = x
   339  }
   340  
   341  func BenchmarkMallocTypeInfo8(b *testing.B) {
   342  	var x uintptr
   343  	for i := 0; i < b.N; i++ {
   344  		p := new(struct {
   345  			p [8 / unsafe.Sizeof(uintptr(0))]*int
   346  		})
   347  		x ^= uintptr(unsafe.Pointer(p))
   348  	}
   349  	mallocSink = x
   350  }
   351  
   352  func BenchmarkMallocTypeInfo16(b *testing.B) {
   353  	var x uintptr
   354  	for i := 0; i < b.N; i++ {
   355  		p := new(struct {
   356  			p [16 / unsafe.Sizeof(uintptr(0))]*int
   357  		})
   358  		x ^= uintptr(unsafe.Pointer(p))
   359  	}
   360  	mallocSink = x
   361  }
   362  
   363  type LargeStruct struct {
   364  	x [16][]byte
   365  }
   366  
   367  func BenchmarkMallocLargeStruct(b *testing.B) {
   368  	var x uintptr
   369  	for i := 0; i < b.N; i++ {
   370  		p := make([]LargeStruct, 2)
   371  		x ^= uintptr(unsafe.Pointer(&p[0]))
   372  	}
   373  	mallocSink = x
   374  }
   375  
   376  var n = flag.Int("n", 1000, "number of goroutines")
   377  
   378  func BenchmarkGoroutineSelect(b *testing.B) {
   379  	quit := make(chan struct{})
   380  	read := func(ch chan struct{}) {
   381  		for {
   382  			select {
   383  			case _, ok := <-ch:
   384  				if !ok {
   385  					return
   386  				}
   387  			case <-quit:
   388  				return
   389  			}
   390  		}
   391  	}
   392  	benchHelper(b, *n, read)
   393  }
   394  
   395  func BenchmarkGoroutineBlocking(b *testing.B) {
   396  	read := func(ch chan struct{}) {
   397  		for {
   398  			if _, ok := <-ch; !ok {
   399  				return
   400  			}
   401  		}
   402  	}
   403  	benchHelper(b, *n, read)
   404  }
   405  
   406  func BenchmarkGoroutineForRange(b *testing.B) {
   407  	read := func(ch chan struct{}) {
   408  		for range ch {
   409  		}
   410  	}
   411  	benchHelper(b, *n, read)
   412  }
   413  
   414  func benchHelper(b *testing.B, n int, read func(chan struct{})) {
   415  	m := make([]chan struct{}, n)
   416  	for i := range m {
   417  		m[i] = make(chan struct{}, 1)
   418  		go read(m[i])
   419  	}
   420  	b.StopTimer()
   421  	b.ResetTimer()
   422  	GC()
   423  
   424  	for i := 0; i < b.N; i++ {
   425  		for _, ch := range m {
   426  			if ch != nil {
   427  				ch <- struct{}{}
   428  			}
   429  		}
   430  		time.Sleep(10 * time.Millisecond)
   431  		b.StartTimer()
   432  		GC()
   433  		b.StopTimer()
   434  	}
   435  
   436  	for _, ch := range m {
   437  		close(ch)
   438  	}
   439  	time.Sleep(10 * time.Millisecond)
   440  }
   441  
   442  func BenchmarkGoroutineIdle(b *testing.B) {
   443  	quit := make(chan struct{})
   444  	fn := func() {
   445  		<-quit
   446  	}
   447  	for i := 0; i < *n; i++ {
   448  		go fn()
   449  	}
   450  
   451  	GC()
   452  	b.ResetTimer()
   453  
   454  	for i := 0; i < b.N; i++ {
   455  		GC()
   456  	}
   457  
   458  	b.StopTimer()
   459  	close(quit)
   460  	time.Sleep(10 * time.Millisecond)
   461  }
   462  

View as plain text