Black Lives Matter. Support the Equal Justice Initiative.

Source file src/cmd/compile/internal/gc/compile.go

Documentation: cmd/compile/internal/gc

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package gc
     6  
     7  import (
     8  	"internal/race"
     9  	"math/rand"
    10  	"sort"
    11  	"sync"
    12  
    13  	"cmd/compile/internal/base"
    14  	"cmd/compile/internal/ir"
    15  	"cmd/compile/internal/liveness"
    16  	"cmd/compile/internal/objw"
    17  	"cmd/compile/internal/ssagen"
    18  	"cmd/compile/internal/typecheck"
    19  	"cmd/compile/internal/types"
    20  	"cmd/compile/internal/walk"
    21  	"cmd/internal/obj"
    22  )
    23  
    24  // "Portable" code generation.
    25  
    26  var (
    27  	compilequeue []*ir.Func // functions waiting to be compiled
    28  )
    29  
    30  func enqueueFunc(fn *ir.Func) {
    31  	if ir.CurFunc != nil {
    32  		base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc)
    33  	}
    34  
    35  	if ir.FuncName(fn) == "_" {
    36  		// Skip compiling blank functions.
    37  		// Frontend already reported any spec-mandated errors (#29870).
    38  		return
    39  	}
    40  
    41  	if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) {
    42  		return // we'll get this as part of its enclosing function
    43  	}
    44  
    45  	if len(fn.Body) == 0 {
    46  		// Initialize ABI wrappers if necessary.
    47  		ssagen.InitLSym(fn, false)
    48  		types.CalcSize(fn.Type())
    49  		a := ssagen.AbiForBodylessFuncStackMap(fn)
    50  		abiInfo := a.ABIAnalyzeFuncType(fn.Type().FuncType()) // abiInfo has spill/home locations for wrapper
    51  		liveness.WriteFuncMap(fn, abiInfo)
    52  		if fn.ABI == obj.ABI0 {
    53  			x := ssagen.EmitArgInfo(fn, abiInfo)
    54  			objw.Global(x, int32(len(x.P)), obj.RODATA|obj.LOCAL)
    55  		}
    56  		return
    57  	}
    58  
    59  	errorsBefore := base.Errors()
    60  
    61  	todo := []*ir.Func{fn}
    62  	for len(todo) > 0 {
    63  		next := todo[len(todo)-1]
    64  		todo = todo[:len(todo)-1]
    65  
    66  		prepareFunc(next)
    67  		todo = append(todo, next.Closures...)
    68  	}
    69  
    70  	if base.Errors() > errorsBefore {
    71  		return
    72  	}
    73  
    74  	// Enqueue just fn itself. compileFunctions will handle
    75  	// scheduling compilation of its closures after it's done.
    76  	compilequeue = append(compilequeue, fn)
    77  }
    78  
    79  // prepareFunc handles any remaining frontend compilation tasks that
    80  // aren't yet safe to perform concurrently.
    81  func prepareFunc(fn *ir.Func) {
    82  	// Set up the function's LSym early to avoid data races with the assemblers.
    83  	// Do this before walk, as walk needs the LSym to set attributes/relocations
    84  	// (e.g. in MarkTypeUsedInInterface).
    85  	ssagen.InitLSym(fn, true)
    86  
    87  	// Calculate parameter offsets.
    88  	types.CalcSize(fn.Type())
    89  
    90  	typecheck.DeclContext = ir.PAUTO
    91  	ir.CurFunc = fn
    92  	walk.Walk(fn)
    93  	ir.CurFunc = nil // enforce no further uses of CurFunc
    94  	typecheck.DeclContext = ir.PEXTERN
    95  }
    96  
    97  // compileFunctions compiles all functions in compilequeue.
    98  // It fans out nBackendWorkers to do the work
    99  // and waits for them to complete.
   100  func compileFunctions() {
   101  	if len(compilequeue) == 0 {
   102  		return
   103  	}
   104  
   105  	if race.Enabled {
   106  		// Randomize compilation order to try to shake out races.
   107  		tmp := make([]*ir.Func, len(compilequeue))
   108  		perm := rand.Perm(len(compilequeue))
   109  		for i, v := range perm {
   110  			tmp[v] = compilequeue[i]
   111  		}
   112  		copy(compilequeue, tmp)
   113  	} else {
   114  		// Compile the longest functions first,
   115  		// since they're most likely to be the slowest.
   116  		// This helps avoid stragglers.
   117  		sort.Slice(compilequeue, func(i, j int) bool {
   118  			return len(compilequeue[i].Body) > len(compilequeue[j].Body)
   119  		})
   120  	}
   121  
   122  	// By default, we perform work right away on the current goroutine
   123  	// as the solo worker.
   124  	queue := func(work func(int)) {
   125  		work(0)
   126  	}
   127  
   128  	if nWorkers := base.Flag.LowerC; nWorkers > 1 {
   129  		// For concurrent builds, we create a goroutine per task, but
   130  		// require them to hold a unique worker ID while performing work
   131  		// to limit parallelism.
   132  		workerIDs := make(chan int, nWorkers)
   133  		for i := 0; i < nWorkers; i++ {
   134  			workerIDs <- i
   135  		}
   136  
   137  		queue = func(work func(int)) {
   138  			go func() {
   139  				worker := <-workerIDs
   140  				work(worker)
   141  				workerIDs <- worker
   142  			}()
   143  		}
   144  	}
   145  
   146  	var wg sync.WaitGroup
   147  	var compile func([]*ir.Func)
   148  	compile = func(fns []*ir.Func) {
   149  		wg.Add(len(fns))
   150  		for _, fn := range fns {
   151  			fn := fn
   152  			queue(func(worker int) {
   153  				ssagen.Compile(fn, worker)
   154  				compile(fn.Closures)
   155  				wg.Done()
   156  			})
   157  		}
   158  	}
   159  
   160  	types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
   161  	base.Ctxt.InParallel = true
   162  
   163  	compile(compilequeue)
   164  	compilequeue = nil
   165  	wg.Wait()
   166  
   167  	base.Ctxt.InParallel = false
   168  	types.CalcSizeDisabled = false
   169  }
   170  

View as plain text