Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/mpagecache_test.go

Documentation: runtime

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"math/rand"
     9  	. "runtime"
    10  	"testing"
    11  )
    12  
    13  func checkPageCache(t *testing.T, got, want PageCache) {
    14  	if got.Base() != want.Base() {
    15  		t.Errorf("bad pageCache base: got 0x%x, want 0x%x", got.Base(), want.Base())
    16  	}
    17  	if got.Cache() != want.Cache() {
    18  		t.Errorf("bad pageCache bits: got %016x, want %016x", got.Base(), want.Base())
    19  	}
    20  	if got.Scav() != want.Scav() {
    21  		t.Errorf("bad pageCache scav: got %016x, want %016x", got.Scav(), want.Scav())
    22  	}
    23  }
    24  
    25  func TestPageCacheAlloc(t *testing.T) {
    26  	base := PageBase(BaseChunkIdx, 0)
    27  	type hit struct {
    28  		npages uintptr
    29  		base   uintptr
    30  		scav   uintptr
    31  	}
    32  	tests := map[string]struct {
    33  		cache PageCache
    34  		hits  []hit
    35  	}{
    36  		"Empty": {
    37  			cache: NewPageCache(base, 0, 0),
    38  			hits: []hit{
    39  				{1, 0, 0},
    40  				{2, 0, 0},
    41  				{3, 0, 0},
    42  				{4, 0, 0},
    43  				{5, 0, 0},
    44  				{11, 0, 0},
    45  				{12, 0, 0},
    46  				{16, 0, 0},
    47  				{27, 0, 0},
    48  				{32, 0, 0},
    49  				{43, 0, 0},
    50  				{57, 0, 0},
    51  				{64, 0, 0},
    52  				{121, 0, 0},
    53  			},
    54  		},
    55  		"Lo1": {
    56  			cache: NewPageCache(base, 0x1, 0x1),
    57  			hits: []hit{
    58  				{1, base, PageSize},
    59  				{1, 0, 0},
    60  				{10, 0, 0},
    61  			},
    62  		},
    63  		"Hi1": {
    64  			cache: NewPageCache(base, 0x1<<63, 0x1),
    65  			hits: []hit{
    66  				{1, base + 63*PageSize, 0},
    67  				{1, 0, 0},
    68  				{10, 0, 0},
    69  			},
    70  		},
    71  		"Swiss1": {
    72  			cache: NewPageCache(base, 0x20005555, 0x5505),
    73  			hits: []hit{
    74  				{2, 0, 0},
    75  				{1, base, PageSize},
    76  				{1, base + 2*PageSize, PageSize},
    77  				{1, base + 4*PageSize, 0},
    78  				{1, base + 6*PageSize, 0},
    79  				{1, base + 8*PageSize, PageSize},
    80  				{1, base + 10*PageSize, PageSize},
    81  				{1, base + 12*PageSize, PageSize},
    82  				{1, base + 14*PageSize, PageSize},
    83  				{1, base + 29*PageSize, 0},
    84  				{1, 0, 0},
    85  				{10, 0, 0},
    86  			},
    87  		},
    88  		"Lo2": {
    89  			cache: NewPageCache(base, 0x3, 0x2<<62),
    90  			hits: []hit{
    91  				{2, base, 0},
    92  				{2, 0, 0},
    93  				{1, 0, 0},
    94  			},
    95  		},
    96  		"Hi2": {
    97  			cache: NewPageCache(base, 0x3<<62, 0x3<<62),
    98  			hits: []hit{
    99  				{2, base + 62*PageSize, 2 * PageSize},
   100  				{2, 0, 0},
   101  				{1, 0, 0},
   102  			},
   103  		},
   104  		"Swiss2": {
   105  			cache: NewPageCache(base, 0x3333<<31, 0x3030<<31),
   106  			hits: []hit{
   107  				{2, base + 31*PageSize, 0},
   108  				{2, base + 35*PageSize, 2 * PageSize},
   109  				{2, base + 39*PageSize, 0},
   110  				{2, base + 43*PageSize, 2 * PageSize},
   111  				{2, 0, 0},
   112  			},
   113  		},
   114  		"Hi53": {
   115  			cache: NewPageCache(base, ((uint64(1)<<53)-1)<<10, ((uint64(1)<<16)-1)<<10),
   116  			hits: []hit{
   117  				{53, base + 10*PageSize, 16 * PageSize},
   118  				{53, 0, 0},
   119  				{1, 0, 0},
   120  			},
   121  		},
   122  		"Full53": {
   123  			cache: NewPageCache(base, ^uint64(0), ((uint64(1)<<16)-1)<<10),
   124  			hits: []hit{
   125  				{53, base, 16 * PageSize},
   126  				{53, 0, 0},
   127  				{1, base + 53*PageSize, 0},
   128  			},
   129  		},
   130  		"Full64": {
   131  			cache: NewPageCache(base, ^uint64(0), ^uint64(0)),
   132  			hits: []hit{
   133  				{64, base, 64 * PageSize},
   134  				{64, 0, 0},
   135  				{1, 0, 0},
   136  			},
   137  		},
   138  		"FullMixed": {
   139  			cache: NewPageCache(base, ^uint64(0), ^uint64(0)),
   140  			hits: []hit{
   141  				{5, base, 5 * PageSize},
   142  				{7, base + 5*PageSize, 7 * PageSize},
   143  				{1, base + 12*PageSize, 1 * PageSize},
   144  				{23, base + 13*PageSize, 23 * PageSize},
   145  				{63, 0, 0},
   146  				{3, base + 36*PageSize, 3 * PageSize},
   147  				{3, base + 39*PageSize, 3 * PageSize},
   148  				{3, base + 42*PageSize, 3 * PageSize},
   149  				{12, base + 45*PageSize, 12 * PageSize},
   150  				{11, 0, 0},
   151  				{4, base + 57*PageSize, 4 * PageSize},
   152  				{4, 0, 0},
   153  				{6, 0, 0},
   154  				{36, 0, 0},
   155  				{2, base + 61*PageSize, 2 * PageSize},
   156  				{3, 0, 0},
   157  				{1, base + 63*PageSize, 1 * PageSize},
   158  				{4, 0, 0},
   159  				{2, 0, 0},
   160  				{62, 0, 0},
   161  				{1, 0, 0},
   162  			},
   163  		},
   164  	}
   165  	for name, test := range tests {
   166  		test := test
   167  		t.Run(name, func(t *testing.T) {
   168  			c := test.cache
   169  			for i, h := range test.hits {
   170  				b, s := c.Alloc(h.npages)
   171  				if b != h.base {
   172  					t.Fatalf("bad alloc base #%d: got 0x%x, want 0x%x", i, b, h.base)
   173  				}
   174  				if s != h.scav {
   175  					t.Fatalf("bad alloc scav #%d: got %d, want %d", i, s, h.scav)
   176  				}
   177  			}
   178  		})
   179  	}
   180  }
   181  
   182  func TestPageCacheFlush(t *testing.T) {
   183  	if GOOS == "openbsd" && testing.Short() {
   184  		t.Skip("skipping because virtual memory is limited; see #36210")
   185  	}
   186  	bits64ToBitRanges := func(bits uint64, base uint) []BitRange {
   187  		var ranges []BitRange
   188  		start, size := uint(0), uint(0)
   189  		for i := 0; i < 64; i++ {
   190  			if bits&(1<<i) != 0 {
   191  				if size == 0 {
   192  					start = uint(i) + base
   193  				}
   194  				size++
   195  			} else {
   196  				if size != 0 {
   197  					ranges = append(ranges, BitRange{start, size})
   198  					size = 0
   199  				}
   200  			}
   201  		}
   202  		if size != 0 {
   203  			ranges = append(ranges, BitRange{start, size})
   204  		}
   205  		return ranges
   206  	}
   207  	runTest := func(t *testing.T, base uint, cache, scav uint64) {
   208  		// Set up the before state.
   209  		beforeAlloc := map[ChunkIdx][]BitRange{
   210  			BaseChunkIdx: {{base, 64}},
   211  		}
   212  		beforeScav := map[ChunkIdx][]BitRange{
   213  			BaseChunkIdx: {},
   214  		}
   215  		b := NewPageAlloc(beforeAlloc, beforeScav)
   216  		defer FreePageAlloc(b)
   217  
   218  		// Create and flush the cache.
   219  		c := NewPageCache(PageBase(BaseChunkIdx, base), cache, scav)
   220  		c.Flush(b)
   221  		if !c.Empty() {
   222  			t.Errorf("pageCache flush did not clear cache")
   223  		}
   224  
   225  		// Set up the expected after state.
   226  		afterAlloc := map[ChunkIdx][]BitRange{
   227  			BaseChunkIdx: bits64ToBitRanges(^cache, base),
   228  		}
   229  		afterScav := map[ChunkIdx][]BitRange{
   230  			BaseChunkIdx: bits64ToBitRanges(scav, base),
   231  		}
   232  		want := NewPageAlloc(afterAlloc, afterScav)
   233  		defer FreePageAlloc(want)
   234  
   235  		// Check to see if it worked.
   236  		checkPageAlloc(t, want, b)
   237  	}
   238  
   239  	// Empty.
   240  	runTest(t, 0, 0, 0)
   241  
   242  	// Full.
   243  	runTest(t, 0, ^uint64(0), ^uint64(0))
   244  
   245  	// Random.
   246  	for i := 0; i < 100; i++ {
   247  		// Generate random valid base within a chunk.
   248  		base := uint(rand.Intn(PallocChunkPages/64)) * 64
   249  
   250  		// Generate random cache.
   251  		cache := rand.Uint64()
   252  		scav := rand.Uint64() & cache
   253  
   254  		// Run the test.
   255  		runTest(t, base, cache, scav)
   256  	}
   257  }
   258  
   259  func TestPageAllocAllocToCache(t *testing.T) {
   260  	if GOOS == "openbsd" && testing.Short() {
   261  		t.Skip("skipping because virtual memory is limited; see #36210")
   262  	}
   263  	type test struct {
   264  		before map[ChunkIdx][]BitRange
   265  		scav   map[ChunkIdx][]BitRange
   266  		hits   []PageCache // expected base addresses and patterns
   267  		after  map[ChunkIdx][]BitRange
   268  	}
   269  	tests := map[string]test{
   270  		"AllFree": {
   271  			before: map[ChunkIdx][]BitRange{
   272  				BaseChunkIdx: {},
   273  			},
   274  			scav: map[ChunkIdx][]BitRange{
   275  				BaseChunkIdx: {{1, 1}, {64, 64}},
   276  			},
   277  			hits: []PageCache{
   278  				NewPageCache(PageBase(BaseChunkIdx, 0), ^uint64(0), 0x2),
   279  				NewPageCache(PageBase(BaseChunkIdx, 64), ^uint64(0), ^uint64(0)),
   280  				NewPageCache(PageBase(BaseChunkIdx, 128), ^uint64(0), 0),
   281  				NewPageCache(PageBase(BaseChunkIdx, 192), ^uint64(0), 0),
   282  			},
   283  			after: map[ChunkIdx][]BitRange{
   284  				BaseChunkIdx: {{0, 256}},
   285  			},
   286  		},
   287  		"ManyArena": {
   288  			before: map[ChunkIdx][]BitRange{
   289  				BaseChunkIdx:     {{0, PallocChunkPages}},
   290  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   291  				BaseChunkIdx + 2: {{0, PallocChunkPages - 64}},
   292  			},
   293  			scav: map[ChunkIdx][]BitRange{
   294  				BaseChunkIdx:     {{0, PallocChunkPages}},
   295  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   296  				BaseChunkIdx + 2: {},
   297  			},
   298  			hits: []PageCache{
   299  				NewPageCache(PageBase(BaseChunkIdx+2, PallocChunkPages-64), ^uint64(0), 0),
   300  			},
   301  			after: map[ChunkIdx][]BitRange{
   302  				BaseChunkIdx:     {{0, PallocChunkPages}},
   303  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   304  				BaseChunkIdx + 2: {{0, PallocChunkPages}},
   305  			},
   306  		},
   307  		"NotContiguous": {
   308  			before: map[ChunkIdx][]BitRange{
   309  				BaseChunkIdx:        {{0, PallocChunkPages}},
   310  				BaseChunkIdx + 0xff: {{0, 0}},
   311  			},
   312  			scav: map[ChunkIdx][]BitRange{
   313  				BaseChunkIdx:        {{0, PallocChunkPages}},
   314  				BaseChunkIdx + 0xff: {{31, 67}},
   315  			},
   316  			hits: []PageCache{
   317  				NewPageCache(PageBase(BaseChunkIdx+0xff, 0), ^uint64(0), ((uint64(1)<<33)-1)<<31),
   318  			},
   319  			after: map[ChunkIdx][]BitRange{
   320  				BaseChunkIdx:        {{0, PallocChunkPages}},
   321  				BaseChunkIdx + 0xff: {{0, 64}},
   322  			},
   323  		},
   324  		"First": {
   325  			before: map[ChunkIdx][]BitRange{
   326  				BaseChunkIdx: {{0, 32}, {33, 31}, {96, 32}},
   327  			},
   328  			scav: map[ChunkIdx][]BitRange{
   329  				BaseChunkIdx: {{1, 4}, {31, 5}, {66, 2}},
   330  			},
   331  			hits: []PageCache{
   332  				NewPageCache(PageBase(BaseChunkIdx, 0), 1<<32, 1<<32),
   333  				NewPageCache(PageBase(BaseChunkIdx, 64), (uint64(1)<<32)-1, 0x3<<2),
   334  			},
   335  			after: map[ChunkIdx][]BitRange{
   336  				BaseChunkIdx: {{0, 128}},
   337  			},
   338  		},
   339  		"Fail": {
   340  			before: map[ChunkIdx][]BitRange{
   341  				BaseChunkIdx: {{0, PallocChunkPages}},
   342  			},
   343  			hits: []PageCache{
   344  				NewPageCache(0, 0, 0),
   345  				NewPageCache(0, 0, 0),
   346  				NewPageCache(0, 0, 0),
   347  			},
   348  			after: map[ChunkIdx][]BitRange{
   349  				BaseChunkIdx: {{0, PallocChunkPages}},
   350  			},
   351  		},
   352  	}
   353  	if PageAlloc64Bit != 0 {
   354  		const chunkIdxBigJump = 0x100000 // chunk index offset which translates to O(TiB)
   355  
   356  		// This test is similar to the one with the same name for
   357  		// pageAlloc.alloc and serves the same purpose.
   358  		// See mpagealloc_test.go for details.
   359  		sumsPerPhysPage := ChunkIdx(PhysPageSize / PallocSumBytes)
   360  		baseChunkIdx := BaseChunkIdx &^ (sumsPerPhysPage - 1)
   361  		tests["DiscontiguousMappedSumBoundary"] = test{
   362  			before: map[ChunkIdx][]BitRange{
   363  				baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages - 1}},
   364  				baseChunkIdx + chunkIdxBigJump:     {{1, PallocChunkPages - 1}},
   365  			},
   366  			scav: map[ChunkIdx][]BitRange{
   367  				baseChunkIdx + sumsPerPhysPage - 1: {},
   368  				baseChunkIdx + chunkIdxBigJump:     {},
   369  			},
   370  			hits: []PageCache{
   371  				NewPageCache(PageBase(baseChunkIdx+sumsPerPhysPage-1, PallocChunkPages-64), 1<<63, 0),
   372  				NewPageCache(PageBase(baseChunkIdx+chunkIdxBigJump, 0), 1, 0),
   373  				NewPageCache(0, 0, 0),
   374  			},
   375  			after: map[ChunkIdx][]BitRange{
   376  				baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages}},
   377  				baseChunkIdx + chunkIdxBigJump:     {{0, PallocChunkPages}},
   378  			},
   379  		}
   380  	}
   381  	for name, v := range tests {
   382  		v := v
   383  		t.Run(name, func(t *testing.T) {
   384  			b := NewPageAlloc(v.before, v.scav)
   385  			defer FreePageAlloc(b)
   386  
   387  			for _, expect := range v.hits {
   388  				checkPageCache(t, b.AllocToCache(), expect)
   389  				if t.Failed() {
   390  					return
   391  				}
   392  			}
   393  			want := NewPageAlloc(v.after, v.scav)
   394  			defer FreePageAlloc(want)
   395  
   396  			checkPageAlloc(t, want, b)
   397  		})
   398  	}
   399  }
   400  

View as plain text