...

Source file src/cmd/compile/internal/escape/graph.go

Documentation: cmd/compile/internal/escape

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package escape
     6  
     7  import (
     8  	"cmd/compile/internal/base"
     9  	"cmd/compile/internal/ir"
    10  	"cmd/compile/internal/logopt"
    11  	"cmd/compile/internal/types"
    12  	"fmt"
    13  )
    14  
    15  // Below we implement the methods for walking the AST and recording
    16  // data flow edges. Note that because a sub-expression might have
    17  // side-effects, it's important to always visit the entire AST.
    18  //
    19  // For example, write either:
    20  //
    21  //     if x {
    22  //         e.discard(n.Left)
    23  //     } else {
    24  //         e.value(k, n.Left)
    25  //     }
    26  //
    27  // or
    28  //
    29  //     if x {
    30  //         k = e.discardHole()
    31  //     }
    32  //     e.value(k, n.Left)
    33  //
    34  // Do NOT write:
    35  //
    36  //    // BAD: possibly loses side-effects within n.Left
    37  //    if !x {
    38  //        e.value(k, n.Left)
    39  //    }
    40  
    41  // A location represents an abstract location that stores a Go
    42  // variable.
    43  type location struct {
    44  	n         ir.Node  // represented variable or expression, if any
    45  	curfn     *ir.Func // enclosing function
    46  	edges     []edge   // incoming edges
    47  	loopDepth int      // loopDepth at declaration
    48  
    49  	// resultIndex records the tuple index (starting at 1) for
    50  	// PPARAMOUT variables within their function's result type.
    51  	// For non-PPARAMOUT variables it's 0.
    52  	resultIndex int
    53  
    54  	// derefs and walkgen are used during walkOne to track the
    55  	// minimal dereferences from the walk root.
    56  	derefs  int // >= -1
    57  	walkgen uint32
    58  
    59  	// dst and dstEdgeindex track the next immediate assignment
    60  	// destination location during walkone, along with the index
    61  	// of the edge pointing back to this location.
    62  	dst        *location
    63  	dstEdgeIdx int
    64  
    65  	// queued is used by walkAll to track whether this location is
    66  	// in the walk queue.
    67  	queued bool
    68  
    69  	// attrs is a bitset of location attributes.
    70  	attrs locAttr
    71  
    72  	// paramEsc records the represented parameter's leak set.
    73  	paramEsc leaks
    74  
    75  	captured   bool // has a closure captured this variable?
    76  	reassigned bool // has this variable been reassigned?
    77  	addrtaken  bool // has this variable's address been taken?
    78  }
    79  
    80  type locAttr uint8
    81  
    82  const (
    83  	// attrEscapes indicates whether the represented variable's address
    84  	// escapes; that is, whether the variable must be heap allocated.
    85  	attrEscapes locAttr = 1 << iota
    86  
    87  	// attrPersists indicates whether the represented expression's
    88  	// address outlives the statement; that is, whether its storage
    89  	// cannot be immediately reused.
    90  	attrPersists
    91  
    92  	// attrMutates indicates whether pointers that are reachable from
    93  	// this location may have their addressed memory mutated. This is
    94  	// used to detect string->[]byte conversions that can be safely
    95  	// optimized away.
    96  	attrMutates
    97  
    98  	// attrCalls indicates whether closures that are reachable from this
    99  	// location may be called without tracking their results. This is
   100  	// used to better optimize indirect closure calls.
   101  	attrCalls
   102  )
   103  
   104  func (l *location) hasAttr(attr locAttr) bool { return l.attrs&attr != 0 }
   105  
   106  // An edge represents an assignment edge between two Go variables.
   107  type edge struct {
   108  	src    *location
   109  	derefs int // >= -1
   110  	notes  *note
   111  }
   112  
   113  func (l *location) asHole() hole {
   114  	return hole{dst: l}
   115  }
   116  
   117  // leak records that parameter l leaks to sink.
   118  func (l *location) leakTo(sink *location, derefs int) {
   119  	// If sink is a result parameter that doesn't escape (#44614)
   120  	// and we can fit return bits into the escape analysis tag,
   121  	// then record as a result leak.
   122  	if !sink.hasAttr(attrEscapes) && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
   123  		ri := sink.resultIndex - 1
   124  		if ri < numEscResults {
   125  			// Leak to result parameter.
   126  			l.paramEsc.AddResult(ri, derefs)
   127  			return
   128  		}
   129  	}
   130  
   131  	// Otherwise, record as heap leak.
   132  	l.paramEsc.AddHeap(derefs)
   133  }
   134  
   135  // leakTo records that parameter l leaks to sink.
   136  func (b *batch) leakTo(l, sink *location, derefs int) {
   137  	if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.hasAttr(attrEscapes) {
   138  		if base.Flag.LowerM >= 2 {
   139  			fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(sink), derefs)
   140  		}
   141  		explanation := b.explainPath(sink, l)
   142  		if logopt.Enabled() {
   143  			var e_curfn *ir.Func // TODO(mdempsky): Fix.
   144  			logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
   145  				fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(sink), derefs), explanation)
   146  		}
   147  	}
   148  
   149  	// If sink is a result parameter that doesn't escape (#44614)
   150  	// and we can fit return bits into the escape analysis tag,
   151  	// then record as a result leak.
   152  	if !sink.hasAttr(attrEscapes) && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
   153  		if ri := sink.resultIndex - 1; ri < numEscResults {
   154  			// Leak to result parameter.
   155  			l.paramEsc.AddResult(ri, derefs)
   156  			return
   157  		}
   158  	}
   159  
   160  	// Otherwise, record as heap leak.
   161  	l.paramEsc.AddHeap(derefs)
   162  }
   163  
   164  func (l *location) isName(c ir.Class) bool {
   165  	return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
   166  }
   167  
   168  // A hole represents a context for evaluation of a Go
   169  // expression. E.g., when evaluating p in "x = **p", we'd have a hole
   170  // with dst==x and derefs==2.
   171  type hole struct {
   172  	dst    *location
   173  	derefs int // >= -1
   174  	notes  *note
   175  
   176  	// addrtaken indicates whether this context is taking the address of
   177  	// the expression, independent of whether the address will actually
   178  	// be stored into a variable.
   179  	addrtaken bool
   180  }
   181  
   182  type note struct {
   183  	next  *note
   184  	where ir.Node
   185  	why   string
   186  }
   187  
   188  func (k hole) note(where ir.Node, why string) hole {
   189  	if where == nil || why == "" {
   190  		base.Fatalf("note: missing where/why")
   191  	}
   192  	if base.Flag.LowerM >= 2 || logopt.Enabled() {
   193  		k.notes = &note{
   194  			next:  k.notes,
   195  			where: where,
   196  			why:   why,
   197  		}
   198  	}
   199  	return k
   200  }
   201  
   202  func (k hole) shift(delta int) hole {
   203  	k.derefs += delta
   204  	if k.derefs < -1 {
   205  		base.Fatalf("derefs underflow: %v", k.derefs)
   206  	}
   207  	k.addrtaken = delta < 0
   208  	return k
   209  }
   210  
   211  func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
   212  func (k hole) addr(where ir.Node, why string) hole  { return k.shift(-1).note(where, why) }
   213  
   214  func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
   215  	if !t.IsInterface() && !types.IsDirectIface(t) {
   216  		k = k.shift(1)
   217  	}
   218  	return k.note(where, why)
   219  }
   220  
   221  func (b *batch) flow(k hole, src *location) {
   222  	if k.addrtaken {
   223  		src.addrtaken = true
   224  	}
   225  
   226  	dst := k.dst
   227  	if dst == &b.blankLoc {
   228  		return
   229  	}
   230  	if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
   231  		return
   232  	}
   233  	if dst.hasAttr(attrEscapes) && k.derefs < 0 { // dst = &src
   234  		if base.Flag.LowerM >= 2 || logopt.Enabled() {
   235  			pos := base.FmtPos(src.n.Pos())
   236  			if base.Flag.LowerM >= 2 {
   237  				fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
   238  			}
   239  			explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
   240  			if logopt.Enabled() {
   241  				var e_curfn *ir.Func // TODO(mdempsky): Fix.
   242  				logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
   243  			}
   244  
   245  		}
   246  		src.attrs |= attrEscapes | attrPersists | attrMutates | attrCalls
   247  		return
   248  	}
   249  
   250  	// TODO(mdempsky): Deduplicate edges?
   251  	dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
   252  }
   253  
   254  func (b *batch) heapHole() hole    { return b.heapLoc.asHole() }
   255  func (b *batch) mutatorHole() hole { return b.mutatorLoc.asHole() }
   256  func (b *batch) calleeHole() hole  { return b.calleeLoc.asHole() }
   257  func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
   258  
   259  func (b *batch) oldLoc(n *ir.Name) *location {
   260  	if n.Canonical().Opt == nil {
   261  		base.FatalfAt(n.Pos(), "%v has no location", n)
   262  	}
   263  	return n.Canonical().Opt.(*location)
   264  }
   265  
   266  func (e *escape) newLoc(n ir.Node, persists bool) *location {
   267  	if e.curfn == nil {
   268  		base.Fatalf("e.curfn isn't set")
   269  	}
   270  	if n != nil && n.Type() != nil && n.Type().NotInHeap() {
   271  		base.ErrorfAt(n.Pos(), 0, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
   272  	}
   273  
   274  	if n != nil && n.Op() == ir.ONAME {
   275  		if canon := n.(*ir.Name).Canonical(); n != canon {
   276  			base.FatalfAt(n.Pos(), "newLoc on non-canonical %v (canonical is %v)", n, canon)
   277  		}
   278  	}
   279  	loc := &location{
   280  		n:         n,
   281  		curfn:     e.curfn,
   282  		loopDepth: e.loopDepth,
   283  	}
   284  	if persists {
   285  		loc.attrs |= attrPersists
   286  	}
   287  	e.allLocs = append(e.allLocs, loc)
   288  	if n != nil {
   289  		if n.Op() == ir.ONAME {
   290  			n := n.(*ir.Name)
   291  			if n.Class == ir.PPARAM && n.Curfn == nil {
   292  				// ok; hidden parameter
   293  			} else if n.Curfn != e.curfn {
   294  				base.FatalfAt(n.Pos(), "curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
   295  			}
   296  
   297  			if n.Opt != nil {
   298  				base.FatalfAt(n.Pos(), "%v already has a location", n)
   299  			}
   300  			n.Opt = loc
   301  		}
   302  	}
   303  	return loc
   304  }
   305  
   306  // teeHole returns a new hole that flows into each hole of ks,
   307  // similar to the Unix tee(1) command.
   308  func (e *escape) teeHole(ks ...hole) hole {
   309  	if len(ks) == 0 {
   310  		return e.discardHole()
   311  	}
   312  	if len(ks) == 1 {
   313  		return ks[0]
   314  	}
   315  	// TODO(mdempsky): Optimize if there's only one non-discard hole?
   316  
   317  	// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
   318  	// new temporary location ltmp, wire it into place, and return
   319  	// a hole for "ltmp = _".
   320  	loc := e.newLoc(nil, false)
   321  	for _, k := range ks {
   322  		// N.B., "p = &q" and "p = &tmp; tmp = q" are not
   323  		// semantically equivalent. To combine holes like "l1
   324  		// = _" and "l2 = &_", we'd need to wire them as "l1 =
   325  		// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
   326  		// instead.
   327  		if k.derefs < 0 {
   328  			base.Fatalf("teeHole: negative derefs")
   329  		}
   330  
   331  		e.flow(k, loc)
   332  	}
   333  	return loc.asHole()
   334  }
   335  
   336  // later returns a new hole that flows into k, but some time later.
   337  // Its main effect is to prevent immediate reuse of temporary
   338  // variables introduced during Order.
   339  func (e *escape) later(k hole) hole {
   340  	loc := e.newLoc(nil, true)
   341  	e.flow(k, loc)
   342  	return loc.asHole()
   343  }
   344  
   345  // Fmt is called from node printing to print information about escape analysis results.
   346  func Fmt(n ir.Node) string {
   347  	text := ""
   348  	switch n.Esc() {
   349  	case ir.EscUnknown:
   350  		break
   351  
   352  	case ir.EscHeap:
   353  		text = "esc(h)"
   354  
   355  	case ir.EscNone:
   356  		text = "esc(no)"
   357  
   358  	case ir.EscNever:
   359  		text = "esc(N)"
   360  
   361  	default:
   362  		text = fmt.Sprintf("esc(%d)", n.Esc())
   363  	}
   364  
   365  	if n.Op() == ir.ONAME {
   366  		n := n.(*ir.Name)
   367  		if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
   368  			if text != "" {
   369  				text += " "
   370  			}
   371  			text += fmt.Sprintf("ld(%d)", loc.loopDepth)
   372  		}
   373  	}
   374  
   375  	return text
   376  }
   377  

View as plain text