mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Pin new dependency: github.com/google/cel-go v0.9.0
This commit is contained in:
		
							
								
								
									
										26
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,26 @@
 | 
			
		||||
Copyright 2021 The ANTLR Project
 | 
			
		||||
 | 
			
		||||
Redistribution and use in source and binary forms, with or without modification,
 | 
			
		||||
are permitted provided that the following conditions are met:
 | 
			
		||||
 | 
			
		||||
    1. Redistributions of source code must retain the above copyright notice,
 | 
			
		||||
    this list of conditions and the following disclaimer.
 | 
			
		||||
 | 
			
		||||
    2. Redistributions in binary form must reproduce the above copyright notice,
 | 
			
		||||
    this list of conditions and the following disclaimer in the documentation
 | 
			
		||||
    and/or other materials provided with the distribution.
 | 
			
		||||
 | 
			
		||||
    3. Neither the name of the copyright holder nor the names of its
 | 
			
		||||
    contributors may be used to endorse or promote products derived from this
 | 
			
		||||
    software without specific prior written permission.
 | 
			
		||||
 | 
			
		||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
 | 
			
		||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | 
			
		||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | 
			
		||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
 | 
			
		||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 | 
			
		||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 | 
			
		||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
 | 
			
		||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 | 
			
		||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 | 
			
		||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
							
								
								
									
										152
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										152
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,152 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
var ATNInvalidAltNumber int
 | 
			
		||||
 | 
			
		||||
type ATN struct {
 | 
			
		||||
	// DecisionToState is the decision points for all rules, subrules, optional
 | 
			
		||||
	// blocks, ()+, ()*, etc. Used to build DFA predictors for them.
 | 
			
		||||
	DecisionToState []DecisionState
 | 
			
		||||
 | 
			
		||||
	// grammarType is the ATN type and is used for deserializing ATNs from strings.
 | 
			
		||||
	grammarType int
 | 
			
		||||
 | 
			
		||||
	// lexerActions is referenced by action transitions in the ATN for lexer ATNs.
 | 
			
		||||
	lexerActions []LexerAction
 | 
			
		||||
 | 
			
		||||
	// maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
 | 
			
		||||
	maxTokenType int
 | 
			
		||||
 | 
			
		||||
	modeNameToStartState map[string]*TokensStartState
 | 
			
		||||
 | 
			
		||||
	modeToStartState []*TokensStartState
 | 
			
		||||
 | 
			
		||||
	// ruleToStartState maps from rule index to starting state number.
 | 
			
		||||
	ruleToStartState []*RuleStartState
 | 
			
		||||
 | 
			
		||||
	// ruleToStopState maps from rule index to stop state number.
 | 
			
		||||
	ruleToStopState []*RuleStopState
 | 
			
		||||
 | 
			
		||||
	// ruleToTokenType maps the rule index to the resulting token type for lexer
 | 
			
		||||
	// ATNs. For parser ATNs, it maps the rule index to the generated bypass token
 | 
			
		||||
	// type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
 | 
			
		||||
	// specified, and otherwise is nil.
 | 
			
		||||
	ruleToTokenType []int
 | 
			
		||||
 | 
			
		||||
	states []ATNState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewATN(grammarType int, maxTokenType int) *ATN {
 | 
			
		||||
	return &ATN{
 | 
			
		||||
		grammarType:          grammarType,
 | 
			
		||||
		maxTokenType:         maxTokenType,
 | 
			
		||||
		modeNameToStartState: make(map[string]*TokensStartState),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NextTokensInContext computes the set of valid tokens that can occur starting
 | 
			
		||||
// in state s. If ctx is nil, the set of tokens will not include what can follow
 | 
			
		||||
// the rule surrounding s. In other words, the set will be restricted to tokens
 | 
			
		||||
// reachable staying within the rule of s.
 | 
			
		||||
func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
 | 
			
		||||
	return NewLL1Analyzer(a).Look(s, nil, ctx)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NextTokensNoContext computes the set of valid tokens that can occur starting
 | 
			
		||||
// in s and staying in same rule. Token.EPSILON is in set if we reach end of
 | 
			
		||||
// rule.
 | 
			
		||||
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
 | 
			
		||||
	if s.GetNextTokenWithinRule() != nil {
 | 
			
		||||
		return s.GetNextTokenWithinRule()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil))
 | 
			
		||||
	s.GetNextTokenWithinRule().readOnly = true
 | 
			
		||||
 | 
			
		||||
	return s.GetNextTokenWithinRule()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
 | 
			
		||||
	if ctx == nil {
 | 
			
		||||
		return a.NextTokensNoContext(s)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return a.NextTokensInContext(s, ctx)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATN) addState(state ATNState) {
 | 
			
		||||
	if state != nil {
 | 
			
		||||
		state.SetATN(a)
 | 
			
		||||
		state.SetStateNumber(len(a.states))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	a.states = append(a.states, state)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATN) removeState(state ATNState) {
 | 
			
		||||
	a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATN) defineDecisionState(s DecisionState) int {
 | 
			
		||||
	a.DecisionToState = append(a.DecisionToState, s)
 | 
			
		||||
	s.setDecision(len(a.DecisionToState) - 1)
 | 
			
		||||
 | 
			
		||||
	return s.getDecision()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATN) getDecisionState(decision int) DecisionState {
 | 
			
		||||
	if len(a.DecisionToState) == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return a.DecisionToState[decision]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getExpectedTokens computes the set of input symbols which could follow ATN
 | 
			
		||||
// state number stateNumber in the specified full parse context ctx and returns
 | 
			
		||||
// the set of potentially valid input symbols which could follow the specified
 | 
			
		||||
// state in the specified context. This method considers the complete parser
 | 
			
		||||
// context, but does not evaluate semantic predicates (i.e. all predicates
 | 
			
		||||
// encountered during the calculation are assumed true). If a path in the ATN
 | 
			
		||||
// exists from the starting state to the RuleStopState of the outermost context
 | 
			
		||||
// without Matching any symbols, Token.EOF is added to the returned set.
 | 
			
		||||
//
 | 
			
		||||
// A nil ctx defaults to ParserRuleContext.EMPTY.
 | 
			
		||||
//
 | 
			
		||||
// It panics if the ATN does not contain state stateNumber.
 | 
			
		||||
func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
 | 
			
		||||
	if stateNumber < 0 || stateNumber >= len(a.states) {
 | 
			
		||||
		panic("Invalid state number.")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s := a.states[stateNumber]
 | 
			
		||||
	following := a.NextTokens(s, nil)
 | 
			
		||||
 | 
			
		||||
	if !following.contains(TokenEpsilon) {
 | 
			
		||||
		return following
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expected := NewIntervalSet()
 | 
			
		||||
 | 
			
		||||
	expected.addSet(following)
 | 
			
		||||
	expected.removeOne(TokenEpsilon)
 | 
			
		||||
 | 
			
		||||
	for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
 | 
			
		||||
		invokingState := a.states[ctx.GetInvokingState()]
 | 
			
		||||
		rt := invokingState.GetTransitions()[0]
 | 
			
		||||
 | 
			
		||||
		following = a.NextTokens(rt.(*RuleTransition).followState, nil)
 | 
			
		||||
		expected.addSet(following)
 | 
			
		||||
		expected.removeOne(TokenEpsilon)
 | 
			
		||||
		ctx = ctx.GetParent().(RuleContext)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if following.contains(TokenEpsilon) {
 | 
			
		||||
		expected.addOne(TokenEOF)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return expected
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										295
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										295
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,295 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type comparable interface {
 | 
			
		||||
	equals(other interface{}) bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
 | 
			
		||||
// context). The syntactic context is a graph-structured stack node whose
 | 
			
		||||
// path(s) to the root is the rule invocation(s) chain used to arrive at the
 | 
			
		||||
// state. The semantic context is the tree of semantic predicates encountered
 | 
			
		||||
// before reaching an ATN state.
 | 
			
		||||
type ATNConfig interface {
 | 
			
		||||
	comparable
 | 
			
		||||
 | 
			
		||||
	hash() int
 | 
			
		||||
 | 
			
		||||
	GetState() ATNState
 | 
			
		||||
	GetAlt() int
 | 
			
		||||
	GetSemanticContext() SemanticContext
 | 
			
		||||
 | 
			
		||||
	GetContext() PredictionContext
 | 
			
		||||
	SetContext(PredictionContext)
 | 
			
		||||
 | 
			
		||||
	GetReachesIntoOuterContext() int
 | 
			
		||||
	SetReachesIntoOuterContext(int)
 | 
			
		||||
 | 
			
		||||
	String() string
 | 
			
		||||
 | 
			
		||||
	getPrecedenceFilterSuppressed() bool
 | 
			
		||||
	setPrecedenceFilterSuppressed(bool)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseATNConfig struct {
 | 
			
		||||
	precedenceFilterSuppressed bool
 | 
			
		||||
	state                      ATNState
 | 
			
		||||
	alt                        int
 | 
			
		||||
	context                    PredictionContext
 | 
			
		||||
	semanticContext            SemanticContext
 | 
			
		||||
	reachesIntoOuterContext    int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
 | 
			
		||||
	return &BaseATNConfig{
 | 
			
		||||
		state:                   old.state,
 | 
			
		||||
		alt:                     old.alt,
 | 
			
		||||
		context:                 old.context,
 | 
			
		||||
		semanticContext:         old.semanticContext,
 | 
			
		||||
		reachesIntoOuterContext: old.reachesIntoOuterContext,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
 | 
			
		||||
	return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
 | 
			
		||||
	if semanticContext == nil {
 | 
			
		||||
		panic("semanticContext cannot be nil") // TODO: Necessary?
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
 | 
			
		||||
	return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
 | 
			
		||||
	return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
 | 
			
		||||
	return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
 | 
			
		||||
	return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
 | 
			
		||||
	if semanticContext == nil {
 | 
			
		||||
		panic("semanticContext cannot be nil")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &BaseATNConfig{
 | 
			
		||||
		state:                      state,
 | 
			
		||||
		alt:                        c.GetAlt(),
 | 
			
		||||
		context:                    context,
 | 
			
		||||
		semanticContext:            semanticContext,
 | 
			
		||||
		reachesIntoOuterContext:    c.GetReachesIntoOuterContext(),
 | 
			
		||||
		precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
 | 
			
		||||
	return b.precedenceFilterSuppressed
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
 | 
			
		||||
	b.precedenceFilterSuppressed = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfig) GetState() ATNState {
 | 
			
		||||
	return b.state
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfig) GetAlt() int {
 | 
			
		||||
	return b.alt
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfig) SetContext(v PredictionContext) {
 | 
			
		||||
	b.context = v
 | 
			
		||||
}
 | 
			
		||||
func (b *BaseATNConfig) GetContext() PredictionContext {
 | 
			
		||||
	return b.context
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
 | 
			
		||||
	return b.semanticContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
 | 
			
		||||
	return b.reachesIntoOuterContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
 | 
			
		||||
	b.reachesIntoOuterContext = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// An ATN configuration is equal to another if both have the same state, they
 | 
			
		||||
// predict the same alternative, and syntactic/semantic contexts are the same.
 | 
			
		||||
func (b *BaseATNConfig) equals(o interface{}) bool {
 | 
			
		||||
	if b == o {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var other, ok = o.(*BaseATNConfig)
 | 
			
		||||
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var equal bool
 | 
			
		||||
 | 
			
		||||
	if b.context == nil {
 | 
			
		||||
		equal = other.context == nil
 | 
			
		||||
	} else {
 | 
			
		||||
		equal = b.context.equals(other.context)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var (
 | 
			
		||||
		nums = b.state.GetStateNumber() == other.state.GetStateNumber()
 | 
			
		||||
		alts = b.alt == other.alt
 | 
			
		||||
		cons = b.semanticContext.equals(other.semanticContext)
 | 
			
		||||
		sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	return nums && alts && cons && sups && equal
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfig) hash() int {
 | 
			
		||||
	var c int
 | 
			
		||||
	if b.context != nil {
 | 
			
		||||
		c = b.context.hash()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	h := murmurInit(7)
 | 
			
		||||
	h = murmurUpdate(h, b.state.GetStateNumber())
 | 
			
		||||
	h = murmurUpdate(h, b.alt)
 | 
			
		||||
	h = murmurUpdate(h, c)
 | 
			
		||||
	h = murmurUpdate(h, b.semanticContext.hash())
 | 
			
		||||
	return murmurFinish(h, 4)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfig) String() string {
 | 
			
		||||
	var s1, s2, s3 string
 | 
			
		||||
 | 
			
		||||
	if b.context != nil {
 | 
			
		||||
		s1 = ",[" + fmt.Sprint(b.context) + "]"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if b.semanticContext != SemanticContextNone {
 | 
			
		||||
		s2 = "," + fmt.Sprint(b.semanticContext)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if b.reachesIntoOuterContext > 0 {
 | 
			
		||||
		s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type LexerATNConfig struct {
 | 
			
		||||
	*BaseATNConfig
 | 
			
		||||
	lexerActionExecutor            *LexerActionExecutor
 | 
			
		||||
	passedThroughNonGreedyDecision bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
 | 
			
		||||
	return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
 | 
			
		||||
	return &LexerATNConfig{
 | 
			
		||||
		BaseATNConfig:       NewBaseATNConfig5(state, alt, context, SemanticContextNone),
 | 
			
		||||
		lexerActionExecutor: lexerActionExecutor,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
 | 
			
		||||
	return &LexerATNConfig{
 | 
			
		||||
		BaseATNConfig:                  NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
 | 
			
		||||
		lexerActionExecutor:            c.lexerActionExecutor,
 | 
			
		||||
		passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
 | 
			
		||||
	return &LexerATNConfig{
 | 
			
		||||
		BaseATNConfig:                  NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
 | 
			
		||||
		lexerActionExecutor:            lexerActionExecutor,
 | 
			
		||||
		passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
 | 
			
		||||
	return &LexerATNConfig{
 | 
			
		||||
		BaseATNConfig:                  NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
 | 
			
		||||
		lexerActionExecutor:            c.lexerActionExecutor,
 | 
			
		||||
		passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
 | 
			
		||||
	return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNConfig) hash() int {
 | 
			
		||||
	var f int
 | 
			
		||||
	if l.passedThroughNonGreedyDecision {
 | 
			
		||||
		f = 1
 | 
			
		||||
	} else {
 | 
			
		||||
		f = 0
 | 
			
		||||
	}
 | 
			
		||||
	h := murmurInit(7)
 | 
			
		||||
	h = murmurUpdate(h, l.state.hash())
 | 
			
		||||
	h = murmurUpdate(h, l.alt)
 | 
			
		||||
	h = murmurUpdate(h, l.context.hash())
 | 
			
		||||
	h = murmurUpdate(h, l.semanticContext.hash())
 | 
			
		||||
	h = murmurUpdate(h, f)
 | 
			
		||||
	h = murmurUpdate(h, l.lexerActionExecutor.hash())
 | 
			
		||||
	h = murmurFinish(h, 6)
 | 
			
		||||
	return h
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNConfig) equals(other interface{}) bool {
 | 
			
		||||
	var othert, ok = other.(*LexerATNConfig)
 | 
			
		||||
 | 
			
		||||
	if l == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var b bool
 | 
			
		||||
 | 
			
		||||
	if l.lexerActionExecutor != nil {
 | 
			
		||||
		b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
 | 
			
		||||
	} else {
 | 
			
		||||
		b = othert.lexerActionExecutor != nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if b {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return l.BaseATNConfig.equals(othert.BaseATNConfig)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
 | 
			
		||||
	var ds, ok = target.(DecisionState)
 | 
			
		||||
 | 
			
		||||
	return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										387
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										387
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,387 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import "fmt"
 | 
			
		||||
 | 
			
		||||
type ATNConfigSet interface {
 | 
			
		||||
	hash() int
 | 
			
		||||
	Add(ATNConfig, *DoubleDict) bool
 | 
			
		||||
	AddAll([]ATNConfig) bool
 | 
			
		||||
 | 
			
		||||
	GetStates() *Set
 | 
			
		||||
	GetPredicates() []SemanticContext
 | 
			
		||||
	GetItems() []ATNConfig
 | 
			
		||||
 | 
			
		||||
	OptimizeConfigs(interpreter *BaseATNSimulator)
 | 
			
		||||
 | 
			
		||||
	Equals(other interface{}) bool
 | 
			
		||||
 | 
			
		||||
	Length() int
 | 
			
		||||
	IsEmpty() bool
 | 
			
		||||
	Contains(ATNConfig) bool
 | 
			
		||||
	ContainsFast(ATNConfig) bool
 | 
			
		||||
	Clear()
 | 
			
		||||
	String() string
 | 
			
		||||
 | 
			
		||||
	HasSemanticContext() bool
 | 
			
		||||
	SetHasSemanticContext(v bool)
 | 
			
		||||
 | 
			
		||||
	ReadOnly() bool
 | 
			
		||||
	SetReadOnly(bool)
 | 
			
		||||
 | 
			
		||||
	GetConflictingAlts() *BitSet
 | 
			
		||||
	SetConflictingAlts(*BitSet)
 | 
			
		||||
 | 
			
		||||
	FullContext() bool
 | 
			
		||||
 | 
			
		||||
	GetUniqueAlt() int
 | 
			
		||||
	SetUniqueAlt(int)
 | 
			
		||||
 | 
			
		||||
	GetDipsIntoOuterContext() bool
 | 
			
		||||
	SetDipsIntoOuterContext(bool)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
 | 
			
		||||
// about its elements and can combine similar configurations using a
 | 
			
		||||
// graph-structured stack.
 | 
			
		||||
type BaseATNConfigSet struct {
 | 
			
		||||
	cachedHash int
 | 
			
		||||
 | 
			
		||||
	// configLookup is used to determine whether two BaseATNConfigSets are equal. We
 | 
			
		||||
	// need all configurations with the same (s, i, _, semctx) to be equal. A key
 | 
			
		||||
	// effectively doubles the number of objects associated with ATNConfigs. All
 | 
			
		||||
	// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
 | 
			
		||||
	// read-only because a set becomes a DFA state.
 | 
			
		||||
	configLookup *Set
 | 
			
		||||
 | 
			
		||||
	// configs is the added elements.
 | 
			
		||||
	configs []ATNConfig
 | 
			
		||||
 | 
			
		||||
	// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
 | 
			
		||||
	// info together because it saves recomputation. Can we track conflicts as they
 | 
			
		||||
	// are added to save scanning configs later?
 | 
			
		||||
	conflictingAlts *BitSet
 | 
			
		||||
 | 
			
		||||
	// dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
 | 
			
		||||
	// we hit a pred while computing a closure operation. Do not make a DFA state
 | 
			
		||||
	// from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
 | 
			
		||||
	dipsIntoOuterContext bool
 | 
			
		||||
 | 
			
		||||
	// fullCtx is whether it is part of a full context LL prediction. Used to
 | 
			
		||||
	// determine how to merge $. It is a wildcard with SLL, but not for an LL
 | 
			
		||||
	// context merge.
 | 
			
		||||
	fullCtx bool
 | 
			
		||||
 | 
			
		||||
	// Used in parser and lexer. In lexer, it indicates we hit a pred
 | 
			
		||||
	// while computing a closure operation. Don't make a DFA state from a.
 | 
			
		||||
	hasSemanticContext bool
 | 
			
		||||
 | 
			
		||||
	// readOnly is whether it is read-only. Do not
 | 
			
		||||
	// allow any code to manipulate the set if true because DFA states will point at
 | 
			
		||||
	// sets and those must not change. It not protect other fields; conflictingAlts
 | 
			
		||||
	// in particular, which is assigned after readOnly.
 | 
			
		||||
	readOnly bool
 | 
			
		||||
 | 
			
		||||
	// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
 | 
			
		||||
	// info together because it saves recomputation. Can we track conflicts as they
 | 
			
		||||
	// are added to save scanning configs later?
 | 
			
		||||
	uniqueAlt int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
 | 
			
		||||
	return &BaseATNConfigSet{
 | 
			
		||||
		cachedHash: -1,
 | 
			
		||||
		configLookup:     NewSet(nil, equalATNConfigs),
 | 
			
		||||
		fullCtx:          fullCtx,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Add merges contexts with existing configs for (s, i, pi, _), where s is the
 | 
			
		||||
// ATNConfig.state, i is the ATNConfig.alt, and pi is the
 | 
			
		||||
// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
 | 
			
		||||
// dipsIntoOuterContext and hasSemanticContext when necessary.
 | 
			
		||||
func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
 | 
			
		||||
	if b.readOnly {
 | 
			
		||||
		panic("set is read-only")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.GetSemanticContext() != SemanticContextNone {
 | 
			
		||||
		b.hasSemanticContext = true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.GetReachesIntoOuterContext() > 0 {
 | 
			
		||||
		b.dipsIntoOuterContext = true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	existing := b.configLookup.add(config).(ATNConfig)
 | 
			
		||||
 | 
			
		||||
	if existing == config {
 | 
			
		||||
		b.cachedHash = -1
 | 
			
		||||
		b.configs = append(b.configs, config) // Track order here
 | 
			
		||||
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Merge a previous (s, i, pi, _) with it and save the result
 | 
			
		||||
	rootIsWildcard := !b.fullCtx
 | 
			
		||||
	merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
 | 
			
		||||
 | 
			
		||||
	// No need to check for existing.context because config.context is in the cache,
 | 
			
		||||
	// since the only way to create new graphs is the "call rule" and here. We cache
 | 
			
		||||
	// at both places.
 | 
			
		||||
	existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
 | 
			
		||||
 | 
			
		||||
	// Preserve the precedence filter suppression during the merge
 | 
			
		||||
	if config.getPrecedenceFilterSuppressed() {
 | 
			
		||||
		existing.setPrecedenceFilterSuppressed(true)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Replace the context because there is no need to do alt mapping
 | 
			
		||||
	existing.SetContext(merged)
 | 
			
		||||
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) GetStates() *Set {
 | 
			
		||||
	states := NewSet(nil, nil)
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(b.configs); i++ {
 | 
			
		||||
		states.add(b.configs[i].GetState())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return states
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) HasSemanticContext() bool {
 | 
			
		||||
	return b.hasSemanticContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
 | 
			
		||||
	b.hasSemanticContext = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
 | 
			
		||||
	preds := make([]SemanticContext, 0)
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(b.configs); i++ {
 | 
			
		||||
		c := b.configs[i].GetSemanticContext()
 | 
			
		||||
 | 
			
		||||
		if c != SemanticContextNone {
 | 
			
		||||
			preds = append(preds, c)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return preds
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) GetItems() []ATNConfig {
 | 
			
		||||
	return b.configs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
 | 
			
		||||
	if b.readOnly {
 | 
			
		||||
		panic("set is read-only")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if b.configLookup.length() == 0 {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(b.configs); i++ {
 | 
			
		||||
		config := b.configs[i]
 | 
			
		||||
 | 
			
		||||
		config.SetContext(interpreter.getCachedContext(config.GetContext()))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
 | 
			
		||||
	for i := 0; i < len(coll); i++ {
 | 
			
		||||
		b.Add(coll[i], nil)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) Equals(other interface{}) bool {
 | 
			
		||||
	if b == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*BaseATNConfigSet); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	other2 := other.(*BaseATNConfigSet)
 | 
			
		||||
 | 
			
		||||
	return b.configs != nil &&
 | 
			
		||||
		// TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
 | 
			
		||||
		b.fullCtx == other2.fullCtx &&
 | 
			
		||||
		b.uniqueAlt == other2.uniqueAlt &&
 | 
			
		||||
		b.conflictingAlts == other2.conflictingAlts &&
 | 
			
		||||
		b.hasSemanticContext == other2.hasSemanticContext &&
 | 
			
		||||
		b.dipsIntoOuterContext == other2.dipsIntoOuterContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) hash() int {
 | 
			
		||||
	if b.readOnly {
 | 
			
		||||
		if b.cachedHash == -1 {
 | 
			
		||||
			b.cachedHash = b.hashCodeConfigs()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return b.cachedHash
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return b.hashCodeConfigs()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) hashCodeConfigs() int {
 | 
			
		||||
	h := murmurInit(1)
 | 
			
		||||
	for _, c := range b.configs {
 | 
			
		||||
		if c != nil {
 | 
			
		||||
			h = murmurUpdate(h, c.hash())
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return murmurFinish(h, len(b.configs))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) Length() int {
 | 
			
		||||
	return len(b.configs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) IsEmpty() bool {
 | 
			
		||||
	return len(b.configs) == 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
 | 
			
		||||
	if b.configLookup == nil {
 | 
			
		||||
		panic("not implemented for read-only sets")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return b.configLookup.contains(item)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
 | 
			
		||||
	if b.configLookup == nil {
 | 
			
		||||
		panic("not implemented for read-only sets")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return b.configLookup.contains(item) // TODO: containsFast is not implemented for Set
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) Clear() {
 | 
			
		||||
	if b.readOnly {
 | 
			
		||||
		panic("set is read-only")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	b.configs = make([]ATNConfig, 0)
 | 
			
		||||
	b.cachedHash = -1
 | 
			
		||||
	b.configLookup = NewSet(nil, equalATNConfigs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) FullContext() bool {
 | 
			
		||||
	return b.fullCtx
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
 | 
			
		||||
	return b.dipsIntoOuterContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
 | 
			
		||||
	b.dipsIntoOuterContext = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) GetUniqueAlt() int {
 | 
			
		||||
	return b.uniqueAlt
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
 | 
			
		||||
	b.uniqueAlt = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
 | 
			
		||||
	return b.conflictingAlts
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
 | 
			
		||||
	b.conflictingAlts = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) ReadOnly() bool {
 | 
			
		||||
	return b.readOnly
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
 | 
			
		||||
	b.readOnly = readOnly
 | 
			
		||||
 | 
			
		||||
	if readOnly {
 | 
			
		||||
		b.configLookup = nil // Read only, so no need for the lookup cache
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNConfigSet) String() string {
 | 
			
		||||
	s := "["
 | 
			
		||||
 | 
			
		||||
	for i, c := range b.configs {
 | 
			
		||||
		s += c.String()
 | 
			
		||||
 | 
			
		||||
		if i != len(b.configs)-1 {
 | 
			
		||||
			s += ", "
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s += "]"
 | 
			
		||||
 | 
			
		||||
	if b.hasSemanticContext {
 | 
			
		||||
		s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if b.uniqueAlt != ATNInvalidAltNumber {
 | 
			
		||||
		s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if b.conflictingAlts != nil {
 | 
			
		||||
		s += ",conflictingAlts=" + b.conflictingAlts.String()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if b.dipsIntoOuterContext {
 | 
			
		||||
		s += ",dipsIntoOuterContext"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type OrderedATNConfigSet struct {
 | 
			
		||||
	*BaseATNConfigSet
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
 | 
			
		||||
	b := NewBaseATNConfigSet(false)
 | 
			
		||||
 | 
			
		||||
	b.configLookup = NewSet(nil, nil)
 | 
			
		||||
 | 
			
		||||
	return &OrderedATNConfigSet{BaseATNConfigSet: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func equalATNConfigs(a, b interface{}) bool {
 | 
			
		||||
	if a == nil || b == nil {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if a == b {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var ai, ok = a.(ATNConfig)
 | 
			
		||||
	var bi, ok1 = b.(ATNConfig)
 | 
			
		||||
 | 
			
		||||
	if !ok || !ok1 {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nums := ai.GetState().GetStateNumber() == bi.GetState().GetStateNumber()
 | 
			
		||||
	alts := ai.GetAlt() == bi.GetAlt()
 | 
			
		||||
	cons := ai.GetSemanticContext().equals(bi.GetSemanticContext())
 | 
			
		||||
 | 
			
		||||
	return nums && alts && cons
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										25
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,25 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false}
 | 
			
		||||
 | 
			
		||||
type ATNDeserializationOptions struct {
 | 
			
		||||
	readOnly                      bool
 | 
			
		||||
	verifyATN                     bool
 | 
			
		||||
	generateRuleBypassTransitions bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions {
 | 
			
		||||
	o := new(ATNDeserializationOptions)
 | 
			
		||||
 | 
			
		||||
	if CopyFrom != nil {
 | 
			
		||||
		o.readOnly = CopyFrom.readOnly
 | 
			
		||||
		o.verifyATN = CopyFrom.verifyATN
 | 
			
		||||
		o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return o
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										828
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										828
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,828 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/hex"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"unicode/utf16"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// This is the earliest supported serialized UUID.
 | 
			
		||||
// stick to serialized version for now, we don't need a UUID instance
 | 
			
		||||
var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"
 | 
			
		||||
var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089"
 | 
			
		||||
 | 
			
		||||
// This list contains all of the currently supported UUIDs, ordered by when
 | 
			
		||||
// the feature first appeared in this branch.
 | 
			
		||||
var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP}
 | 
			
		||||
 | 
			
		||||
var SerializedVersion = 3
 | 
			
		||||
 | 
			
		||||
// This is the current serialized UUID.
 | 
			
		||||
var SerializedUUID = AddedUnicodeSMP
 | 
			
		||||
 | 
			
		||||
type LoopEndStateIntPair struct {
 | 
			
		||||
	item0 *LoopEndState
 | 
			
		||||
	item1 int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BlockStartStateIntPair struct {
 | 
			
		||||
	item0 BlockStartState
 | 
			
		||||
	item1 int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ATNDeserializer struct {
 | 
			
		||||
	deserializationOptions *ATNDeserializationOptions
 | 
			
		||||
	data                   []rune
 | 
			
		||||
	pos                    int
 | 
			
		||||
	uuid                   string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
 | 
			
		||||
	if options == nil {
 | 
			
		||||
		options = ATNDeserializationOptionsdefaultOptions
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &ATNDeserializer{deserializationOptions: options}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func stringInSlice(a string, list []string) int {
 | 
			
		||||
	for i, b := range list {
 | 
			
		||||
		if b == a {
 | 
			
		||||
			return i
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return -1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isFeatureSupported determines if a particular serialized representation of an
 | 
			
		||||
// ATN supports a particular feature, identified by the UUID used for
 | 
			
		||||
// serializing the ATN at the time the feature was first introduced. Feature is
 | 
			
		||||
// the UUID marking the first time the feature was supported in the serialized
 | 
			
		||||
// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently
 | 
			
		||||
// being deserialized. It returns true if actualUuid represents a serialized ATN
 | 
			
		||||
// at or after the feature identified by feature was introduced, and otherwise
 | 
			
		||||
// false.
 | 
			
		||||
func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool {
 | 
			
		||||
	idx1 := stringInSlice(feature, SupportedUUIDs)
 | 
			
		||||
 | 
			
		||||
	if idx1 < 0 {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	idx2 := stringInSlice(actualUUID, SupportedUUIDs)
 | 
			
		||||
 | 
			
		||||
	return idx2 >= idx1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN {
 | 
			
		||||
	a.reset(utf16.Decode(data))
 | 
			
		||||
	a.checkVersion()
 | 
			
		||||
	a.checkUUID()
 | 
			
		||||
 | 
			
		||||
	atn := a.readATN()
 | 
			
		||||
 | 
			
		||||
	a.readStates(atn)
 | 
			
		||||
	a.readRules(atn)
 | 
			
		||||
	a.readModes(atn)
 | 
			
		||||
 | 
			
		||||
	sets := make([]*IntervalSet, 0)
 | 
			
		||||
 | 
			
		||||
	// First, deserialize sets with 16-bit arguments <= U+FFFF.
 | 
			
		||||
	sets = a.readSets(atn, sets, a.readInt)
 | 
			
		||||
	// Next, if the ATN was serialized with the Unicode SMP feature,
 | 
			
		||||
	// deserialize sets with 32-bit arguments <= U+10FFFF.
 | 
			
		||||
	if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) {
 | 
			
		||||
		sets = a.readSets(atn, sets, a.readInt32)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	a.readEdges(atn, sets)
 | 
			
		||||
	a.readDecisions(atn)
 | 
			
		||||
	a.readLexerActions(atn)
 | 
			
		||||
	a.markPrecedenceDecisions(atn)
 | 
			
		||||
	a.verifyATN(atn)
 | 
			
		||||
 | 
			
		||||
	if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser {
 | 
			
		||||
		a.generateRuleBypassTransitions(atn)
 | 
			
		||||
		// Re-verify after modification
 | 
			
		||||
		a.verifyATN(atn)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return atn
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) reset(data []rune) {
 | 
			
		||||
	temp := make([]rune, len(data))
 | 
			
		||||
 | 
			
		||||
	for i, c := range data {
 | 
			
		||||
		// Don't adjust the first value since that's the version number
 | 
			
		||||
		if i == 0 {
 | 
			
		||||
			temp[i] = c
 | 
			
		||||
		} else if c > 1 {
 | 
			
		||||
			temp[i] = c - 2
 | 
			
		||||
		} else {
 | 
			
		||||
		    temp[i] = c + 65533
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	a.data = temp
 | 
			
		||||
	a.pos = 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) checkVersion() {
 | 
			
		||||
	version := a.readInt()
 | 
			
		||||
 | 
			
		||||
	if version != SerializedVersion {
 | 
			
		||||
		panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) checkUUID() {
 | 
			
		||||
	uuid := a.readUUID()
 | 
			
		||||
 | 
			
		||||
	if stringInSlice(uuid, SupportedUUIDs) < 0 {
 | 
			
		||||
		panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	a.uuid = uuid
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readATN() *ATN {
 | 
			
		||||
	grammarType := a.readInt()
 | 
			
		||||
	maxTokenType := a.readInt()
 | 
			
		||||
 | 
			
		||||
	return NewATN(grammarType, maxTokenType)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readStates(atn *ATN) {
 | 
			
		||||
	loopBackStateNumbers := make([]LoopEndStateIntPair, 0)
 | 
			
		||||
	endStateNumbers := make([]BlockStartStateIntPair, 0)
 | 
			
		||||
 | 
			
		||||
	nstates := a.readInt()
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < nstates; i++ {
 | 
			
		||||
		stype := a.readInt()
 | 
			
		||||
 | 
			
		||||
		// Ignore bad types of states
 | 
			
		||||
		if stype == ATNStateInvalidType {
 | 
			
		||||
			atn.addState(nil)
 | 
			
		||||
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		ruleIndex := a.readInt()
 | 
			
		||||
 | 
			
		||||
		if ruleIndex == 0xFFFF {
 | 
			
		||||
			ruleIndex = -1
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		s := a.stateFactory(stype, ruleIndex)
 | 
			
		||||
 | 
			
		||||
		if stype == ATNStateLoopEnd {
 | 
			
		||||
			loopBackStateNumber := a.readInt()
 | 
			
		||||
 | 
			
		||||
			loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
 | 
			
		||||
		} else if s2, ok := s.(BlockStartState); ok {
 | 
			
		||||
			endStateNumber := a.readInt()
 | 
			
		||||
 | 
			
		||||
			endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber})
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		atn.addState(s)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Delay the assignment of loop back and end states until we know all the state
 | 
			
		||||
	// instances have been initialized
 | 
			
		||||
	for j := 0; j < len(loopBackStateNumbers); j++ {
 | 
			
		||||
		pair := loopBackStateNumbers[j]
 | 
			
		||||
 | 
			
		||||
		pair.item0.loopBackState = atn.states[pair.item1]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for j := 0; j < len(endStateNumbers); j++ {
 | 
			
		||||
		pair := endStateNumbers[j]
 | 
			
		||||
 | 
			
		||||
		pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	numNonGreedyStates := a.readInt()
 | 
			
		||||
 | 
			
		||||
	for j := 0; j < numNonGreedyStates; j++ {
 | 
			
		||||
		stateNumber := a.readInt()
 | 
			
		||||
 | 
			
		||||
		atn.states[stateNumber].(DecisionState).setNonGreedy(true)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	numPrecedenceStates := a.readInt()
 | 
			
		||||
 | 
			
		||||
	for j := 0; j < numPrecedenceStates; j++ {
 | 
			
		||||
		stateNumber := a.readInt()
 | 
			
		||||
 | 
			
		||||
		atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readRules(atn *ATN) {
 | 
			
		||||
	nrules := a.readInt()
 | 
			
		||||
 | 
			
		||||
	if atn.grammarType == ATNTypeLexer {
 | 
			
		||||
		atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0)
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < nrules; i++ {
 | 
			
		||||
		s := a.readInt()
 | 
			
		||||
		startState := atn.states[s].(*RuleStartState)
 | 
			
		||||
 | 
			
		||||
		atn.ruleToStartState[i] = startState
 | 
			
		||||
 | 
			
		||||
		if atn.grammarType == ATNTypeLexer {
 | 
			
		||||
			tokenType := a.readInt()
 | 
			
		||||
 | 
			
		||||
			if tokenType == 0xFFFF {
 | 
			
		||||
				tokenType = TokenEOF
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			atn.ruleToTokenType[i] = tokenType
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0)
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(atn.states); i++ {
 | 
			
		||||
		state := atn.states[i]
 | 
			
		||||
 | 
			
		||||
		if s2, ok := state.(*RuleStopState); ok {
 | 
			
		||||
			atn.ruleToStopState[s2.ruleIndex] = s2
 | 
			
		||||
			atn.ruleToStartState[s2.ruleIndex].stopState = s2
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readModes(atn *ATN) {
 | 
			
		||||
	nmodes := a.readInt()
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < nmodes; i++ {
 | 
			
		||||
		s := a.readInt()
 | 
			
		||||
 | 
			
		||||
		atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet {
 | 
			
		||||
	m := a.readInt()
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < m; i++ {
 | 
			
		||||
		iset := NewIntervalSet()
 | 
			
		||||
 | 
			
		||||
		sets = append(sets, iset)
 | 
			
		||||
 | 
			
		||||
		n := a.readInt()
 | 
			
		||||
		containsEOF := a.readInt()
 | 
			
		||||
 | 
			
		||||
		if containsEOF != 0 {
 | 
			
		||||
			iset.addOne(-1)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for j := 0; j < n; j++ {
 | 
			
		||||
			i1 := readUnicode()
 | 
			
		||||
			i2 := readUnicode()
 | 
			
		||||
 | 
			
		||||
			iset.addRange(i1, i2)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return sets
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
 | 
			
		||||
	nedges := a.readInt()
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < nedges; i++ {
 | 
			
		||||
		var (
 | 
			
		||||
			src      = a.readInt()
 | 
			
		||||
			trg      = a.readInt()
 | 
			
		||||
			ttype    = a.readInt()
 | 
			
		||||
			arg1     = a.readInt()
 | 
			
		||||
			arg2     = a.readInt()
 | 
			
		||||
			arg3     = a.readInt()
 | 
			
		||||
			trans    = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
 | 
			
		||||
			srcState = atn.states[src]
 | 
			
		||||
		)
 | 
			
		||||
 | 
			
		||||
		srcState.AddTransition(trans, -1)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Edges for rule stop states can be derived, so they are not serialized
 | 
			
		||||
	for i := 0; i < len(atn.states); i++ {
 | 
			
		||||
		state := atn.states[i]
 | 
			
		||||
 | 
			
		||||
		for j := 0; j < len(state.GetTransitions()); j++ {
 | 
			
		||||
			var t, ok = state.GetTransitions()[j].(*RuleTransition)
 | 
			
		||||
 | 
			
		||||
			if !ok {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			outermostPrecedenceReturn := -1
 | 
			
		||||
 | 
			
		||||
			if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule {
 | 
			
		||||
				if t.precedence == 0 {
 | 
			
		||||
					outermostPrecedenceReturn = t.getTarget().GetRuleIndex()
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn)
 | 
			
		||||
 | 
			
		||||
			atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(atn.states); i++ {
 | 
			
		||||
		state := atn.states[i]
 | 
			
		||||
 | 
			
		||||
		if s2, ok := state.(*BaseBlockStartState); ok {
 | 
			
		||||
			// We need to know the end state to set its start state
 | 
			
		||||
			if s2.endState == nil {
 | 
			
		||||
				panic("IllegalState")
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Block end states can only be associated to a single block start state
 | 
			
		||||
			if s2.endState.startState != nil {
 | 
			
		||||
				panic("IllegalState")
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			s2.endState.startState = state
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if s2, ok := state.(*PlusLoopbackState); ok {
 | 
			
		||||
			for j := 0; j < len(s2.GetTransitions()); j++ {
 | 
			
		||||
				target := s2.GetTransitions()[j].getTarget()
 | 
			
		||||
 | 
			
		||||
				if t2, ok := target.(*PlusBlockStartState); ok {
 | 
			
		||||
					t2.loopBackState = state
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		} else if s2, ok := state.(*StarLoopbackState); ok {
 | 
			
		||||
			for j := 0; j < len(s2.GetTransitions()); j++ {
 | 
			
		||||
				target := s2.GetTransitions()[j].getTarget()
 | 
			
		||||
 | 
			
		||||
				if t2, ok := target.(*StarLoopEntryState); ok {
 | 
			
		||||
					t2.loopBackState = state
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readDecisions(atn *ATN) {
 | 
			
		||||
	ndecisions := a.readInt()
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < ndecisions; i++ {
 | 
			
		||||
		s := a.readInt()
 | 
			
		||||
		decState := atn.states[s].(DecisionState)
 | 
			
		||||
 | 
			
		||||
		atn.DecisionToState = append(atn.DecisionToState, decState)
 | 
			
		||||
		decState.setDecision(i)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readLexerActions(atn *ATN) {
 | 
			
		||||
	if atn.grammarType == ATNTypeLexer {
 | 
			
		||||
		count := a.readInt()
 | 
			
		||||
 | 
			
		||||
		atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil)
 | 
			
		||||
 | 
			
		||||
		for i := 0; i < count; i++ {
 | 
			
		||||
			actionType := a.readInt()
 | 
			
		||||
			data1 := a.readInt()
 | 
			
		||||
 | 
			
		||||
			if data1 == 0xFFFF {
 | 
			
		||||
				data1 = -1
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			data2 := a.readInt()
 | 
			
		||||
 | 
			
		||||
			if data2 == 0xFFFF {
 | 
			
		||||
				data2 = -1
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			lexerAction := a.lexerActionFactory(actionType, data1, data2)
 | 
			
		||||
 | 
			
		||||
			atn.lexerActions[i] = lexerAction
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
 | 
			
		||||
	count := len(atn.ruleToStartState)
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < count; i++ {
 | 
			
		||||
		atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < count; i++ {
 | 
			
		||||
		a.generateRuleBypassTransition(atn, i)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
 | 
			
		||||
	bypassStart := NewBasicBlockStartState()
 | 
			
		||||
 | 
			
		||||
	bypassStart.ruleIndex = idx
 | 
			
		||||
	atn.addState(bypassStart)
 | 
			
		||||
 | 
			
		||||
	bypassStop := NewBlockEndState()
 | 
			
		||||
 | 
			
		||||
	bypassStop.ruleIndex = idx
 | 
			
		||||
	atn.addState(bypassStop)
 | 
			
		||||
 | 
			
		||||
	bypassStart.endState = bypassStop
 | 
			
		||||
 | 
			
		||||
	atn.defineDecisionState(bypassStart.BaseDecisionState)
 | 
			
		||||
 | 
			
		||||
	bypassStop.startState = bypassStart
 | 
			
		||||
 | 
			
		||||
	var excludeTransition Transition
 | 
			
		||||
	var endState ATNState
 | 
			
		||||
 | 
			
		||||
	if atn.ruleToStartState[idx].isPrecedenceRule {
 | 
			
		||||
		// Wrap from the beginning of the rule to the StarLoopEntryState
 | 
			
		||||
		endState = nil
 | 
			
		||||
 | 
			
		||||
		for i := 0; i < len(atn.states); i++ {
 | 
			
		||||
			state := atn.states[i]
 | 
			
		||||
 | 
			
		||||
			if a.stateIsEndStateFor(state, idx) != nil {
 | 
			
		||||
				endState = state
 | 
			
		||||
				excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
 | 
			
		||||
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if excludeTransition == nil {
 | 
			
		||||
			panic("Couldn't identify final state of the precedence rule prefix section.")
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		endState = atn.ruleToStopState[idx]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// All non-excluded transitions that currently target end state need to target
 | 
			
		||||
	// blockEnd instead
 | 
			
		||||
	for i := 0; i < len(atn.states); i++ {
 | 
			
		||||
		state := atn.states[i]
 | 
			
		||||
 | 
			
		||||
		for j := 0; j < len(state.GetTransitions()); j++ {
 | 
			
		||||
			transition := state.GetTransitions()[j]
 | 
			
		||||
 | 
			
		||||
			if transition == excludeTransition {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if transition.getTarget() == endState {
 | 
			
		||||
				transition.setTarget(bypassStop)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// All transitions leaving the rule start state need to leave blockStart instead
 | 
			
		||||
	ruleToStartState := atn.ruleToStartState[idx]
 | 
			
		||||
	count := len(ruleToStartState.GetTransitions())
 | 
			
		||||
 | 
			
		||||
	for count > 0 {
 | 
			
		||||
		bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
 | 
			
		||||
		ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Link the new states
 | 
			
		||||
	atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
 | 
			
		||||
	bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
 | 
			
		||||
 | 
			
		||||
	MatchState := NewBasicState()
 | 
			
		||||
 | 
			
		||||
	atn.addState(MatchState)
 | 
			
		||||
	MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
 | 
			
		||||
	bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
 | 
			
		||||
	if state.GetRuleIndex() != idx {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if _, ok := state.(*StarLoopEntryState); !ok {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
 | 
			
		||||
 | 
			
		||||
	if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
 | 
			
		||||
 | 
			
		||||
	if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
 | 
			
		||||
		return state
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
 | 
			
		||||
// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
 | 
			
		||||
// the correct value.
 | 
			
		||||
func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
 | 
			
		||||
	for _, state := range atn.states {
 | 
			
		||||
		if _, ok := state.(*StarLoopEntryState); !ok {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// We analyze the ATN to determine if a ATN decision state is the
 | 
			
		||||
		// decision for the closure block that determines whether a
 | 
			
		||||
		// precedence rule should continue or complete.
 | 
			
		||||
		if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
 | 
			
		||||
			maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
 | 
			
		||||
 | 
			
		||||
			if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
 | 
			
		||||
				var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
 | 
			
		||||
 | 
			
		||||
				if s3.epsilonOnlyTransitions && ok2 {
 | 
			
		||||
					state.(*StarLoopEntryState).precedenceRuleDecision = true
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) verifyATN(atn *ATN) {
 | 
			
		||||
	if !a.deserializationOptions.verifyATN {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Verify assumptions
 | 
			
		||||
	for i := 0; i < len(atn.states); i++ {
 | 
			
		||||
		state := atn.states[i]
 | 
			
		||||
 | 
			
		||||
		if state == nil {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
 | 
			
		||||
 | 
			
		||||
		switch s2 := state.(type) {
 | 
			
		||||
		case *PlusBlockStartState:
 | 
			
		||||
			a.checkCondition(s2.loopBackState != nil, "")
 | 
			
		||||
 | 
			
		||||
		case *StarLoopEntryState:
 | 
			
		||||
			a.checkCondition(s2.loopBackState != nil, "")
 | 
			
		||||
			a.checkCondition(len(s2.GetTransitions()) == 2, "")
 | 
			
		||||
 | 
			
		||||
			switch s2 := state.(type) {
 | 
			
		||||
			case *StarBlockStartState:
 | 
			
		||||
				var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState)
 | 
			
		||||
 | 
			
		||||
				a.checkCondition(ok2, "")
 | 
			
		||||
				a.checkCondition(!s2.nonGreedy, "")
 | 
			
		||||
 | 
			
		||||
			case *LoopEndState:
 | 
			
		||||
				var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState)
 | 
			
		||||
 | 
			
		||||
				a.checkCondition(ok2, "")
 | 
			
		||||
				a.checkCondition(s3.nonGreedy, "")
 | 
			
		||||
 | 
			
		||||
			default:
 | 
			
		||||
				panic("IllegalState")
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
		case *StarLoopbackState:
 | 
			
		||||
			a.checkCondition(len(state.GetTransitions()) == 1, "")
 | 
			
		||||
 | 
			
		||||
			var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
 | 
			
		||||
 | 
			
		||||
			a.checkCondition(ok2, "")
 | 
			
		||||
 | 
			
		||||
		case *LoopEndState:
 | 
			
		||||
			a.checkCondition(s2.loopBackState != nil, "")
 | 
			
		||||
 | 
			
		||||
		case *RuleStartState:
 | 
			
		||||
			a.checkCondition(s2.stopState != nil, "")
 | 
			
		||||
 | 
			
		||||
		case *BaseBlockStartState:
 | 
			
		||||
			a.checkCondition(s2.endState != nil, "")
 | 
			
		||||
 | 
			
		||||
		case *BlockEndState:
 | 
			
		||||
			a.checkCondition(s2.startState != nil, "")
 | 
			
		||||
 | 
			
		||||
		case DecisionState:
 | 
			
		||||
			a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
 | 
			
		||||
 | 
			
		||||
		default:
 | 
			
		||||
			var _, ok = s2.(*RuleStopState)
 | 
			
		||||
 | 
			
		||||
			a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) checkCondition(condition bool, message string) {
 | 
			
		||||
	if !condition {
 | 
			
		||||
		if message == "" {
 | 
			
		||||
			message = "IllegalState"
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		panic(message)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readInt() int {
 | 
			
		||||
	v := a.data[a.pos]
 | 
			
		||||
 | 
			
		||||
	a.pos++
 | 
			
		||||
 | 
			
		||||
	return int(v)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readInt32() int {
 | 
			
		||||
	var low = a.readInt()
 | 
			
		||||
	var high = a.readInt()
 | 
			
		||||
	return low | (high << 16)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//TODO
 | 
			
		||||
//func (a *ATNDeserializer) readLong() int64 {
 | 
			
		||||
//    panic("Not implemented")
 | 
			
		||||
//    var low = a.readInt32()
 | 
			
		||||
//    var high = a.readInt32()
 | 
			
		||||
//    return (low & 0x00000000FFFFFFFF) | (high << int32)
 | 
			
		||||
//}
 | 
			
		||||
 | 
			
		||||
func createByteToHex() []string {
 | 
			
		||||
	bth := make([]string, 256)
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < 256; i++ {
 | 
			
		||||
		bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)}))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return bth
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var byteToHex = createByteToHex()
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) readUUID() string {
 | 
			
		||||
	bb := make([]int, 16)
 | 
			
		||||
 | 
			
		||||
	for i := 7; i >= 0; i-- {
 | 
			
		||||
		integer := a.readInt()
 | 
			
		||||
 | 
			
		||||
		bb[(2*i)+1] = integer & 0xFF
 | 
			
		||||
		bb[2*i] = (integer >> 8) & 0xFF
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return byteToHex[bb[0]] + byteToHex[bb[1]] +
 | 
			
		||||
		byteToHex[bb[2]] + byteToHex[bb[3]] + "-" +
 | 
			
		||||
		byteToHex[bb[4]] + byteToHex[bb[5]] + "-" +
 | 
			
		||||
		byteToHex[bb[6]] + byteToHex[bb[7]] + "-" +
 | 
			
		||||
		byteToHex[bb[8]] + byteToHex[bb[9]] + "-" +
 | 
			
		||||
		byteToHex[bb[10]] + byteToHex[bb[11]] +
 | 
			
		||||
		byteToHex[bb[12]] + byteToHex[bb[13]] +
 | 
			
		||||
		byteToHex[bb[14]] + byteToHex[bb[15]]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
 | 
			
		||||
	target := atn.states[trg]
 | 
			
		||||
 | 
			
		||||
	switch typeIndex {
 | 
			
		||||
	case TransitionEPSILON:
 | 
			
		||||
		return NewEpsilonTransition(target, -1)
 | 
			
		||||
 | 
			
		||||
	case TransitionRANGE:
 | 
			
		||||
		if arg3 != 0 {
 | 
			
		||||
			return NewRangeTransition(target, TokenEOF, arg2)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return NewRangeTransition(target, arg1, arg2)
 | 
			
		||||
 | 
			
		||||
	case TransitionRULE:
 | 
			
		||||
		return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
 | 
			
		||||
 | 
			
		||||
	case TransitionPREDICATE:
 | 
			
		||||
		return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
 | 
			
		||||
 | 
			
		||||
	case TransitionPRECEDENCE:
 | 
			
		||||
		return NewPrecedencePredicateTransition(target, arg1)
 | 
			
		||||
 | 
			
		||||
	case TransitionATOM:
 | 
			
		||||
		if arg3 != 0 {
 | 
			
		||||
			return NewAtomTransition(target, TokenEOF)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return NewAtomTransition(target, arg1)
 | 
			
		||||
 | 
			
		||||
	case TransitionACTION:
 | 
			
		||||
		return NewActionTransition(target, arg1, arg2, arg3 != 0)
 | 
			
		||||
 | 
			
		||||
	case TransitionSET:
 | 
			
		||||
		return NewSetTransition(target, sets[arg1])
 | 
			
		||||
 | 
			
		||||
	case TransitionNOTSET:
 | 
			
		||||
		return NewNotSetTransition(target, sets[arg1])
 | 
			
		||||
 | 
			
		||||
	case TransitionWILDCARD:
 | 
			
		||||
		return NewWildcardTransition(target)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	panic("The specified transition type is not valid.")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
 | 
			
		||||
	var s ATNState
 | 
			
		||||
 | 
			
		||||
	switch typeIndex {
 | 
			
		||||
	case ATNStateInvalidType:
 | 
			
		||||
		return nil
 | 
			
		||||
 | 
			
		||||
	case ATNStateBasic:
 | 
			
		||||
		s = NewBasicState()
 | 
			
		||||
 | 
			
		||||
	case ATNStateRuleStart:
 | 
			
		||||
		s = NewRuleStartState()
 | 
			
		||||
 | 
			
		||||
	case ATNStateBlockStart:
 | 
			
		||||
		s = NewBasicBlockStartState()
 | 
			
		||||
 | 
			
		||||
	case ATNStatePlusBlockStart:
 | 
			
		||||
		s = NewPlusBlockStartState()
 | 
			
		||||
 | 
			
		||||
	case ATNStateStarBlockStart:
 | 
			
		||||
		s = NewStarBlockStartState()
 | 
			
		||||
 | 
			
		||||
	case ATNStateTokenStart:
 | 
			
		||||
		s = NewTokensStartState()
 | 
			
		||||
 | 
			
		||||
	case ATNStateRuleStop:
 | 
			
		||||
		s = NewRuleStopState()
 | 
			
		||||
 | 
			
		||||
	case ATNStateBlockEnd:
 | 
			
		||||
		s = NewBlockEndState()
 | 
			
		||||
 | 
			
		||||
	case ATNStateStarLoopBack:
 | 
			
		||||
		s = NewStarLoopbackState()
 | 
			
		||||
 | 
			
		||||
	case ATNStateStarLoopEntry:
 | 
			
		||||
		s = NewStarLoopEntryState()
 | 
			
		||||
 | 
			
		||||
	case ATNStatePlusLoopBack:
 | 
			
		||||
		s = NewPlusLoopbackState()
 | 
			
		||||
 | 
			
		||||
	case ATNStateLoopEnd:
 | 
			
		||||
		s = NewLoopEndState()
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
		panic(fmt.Sprintf("state type %d is invalid", typeIndex))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.SetRuleIndex(ruleIndex)
 | 
			
		||||
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
 | 
			
		||||
	switch typeIndex {
 | 
			
		||||
	case LexerActionTypeChannel:
 | 
			
		||||
		return NewLexerChannelAction(data1)
 | 
			
		||||
 | 
			
		||||
	case LexerActionTypeCustom:
 | 
			
		||||
		return NewLexerCustomAction(data1, data2)
 | 
			
		||||
 | 
			
		||||
	case LexerActionTypeMode:
 | 
			
		||||
		return NewLexerModeAction(data1)
 | 
			
		||||
 | 
			
		||||
	case LexerActionTypeMore:
 | 
			
		||||
		return LexerMoreActionINSTANCE
 | 
			
		||||
 | 
			
		||||
	case LexerActionTypePopMode:
 | 
			
		||||
		return LexerPopModeActionINSTANCE
 | 
			
		||||
 | 
			
		||||
	case LexerActionTypePushMode:
 | 
			
		||||
		return NewLexerPushModeAction(data1)
 | 
			
		||||
 | 
			
		||||
	case LexerActionTypeSkip:
 | 
			
		||||
		return LexerSkipActionINSTANCE
 | 
			
		||||
 | 
			
		||||
	case LexerActionTypeType:
 | 
			
		||||
		return NewLexerTypeAction(data1)
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
		panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										50
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										50
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,50 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
 | 
			
		||||
 | 
			
		||||
type IATNSimulator interface {
 | 
			
		||||
	SharedContextCache() *PredictionContextCache
 | 
			
		||||
	ATN() *ATN
 | 
			
		||||
	DecisionToDFA() []*DFA
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseATNSimulator struct {
 | 
			
		||||
	atn                *ATN
 | 
			
		||||
	sharedContextCache *PredictionContextCache
 | 
			
		||||
	decisionToDFA      []*DFA
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
 | 
			
		||||
	b := new(BaseATNSimulator)
 | 
			
		||||
 | 
			
		||||
	b.atn = atn
 | 
			
		||||
	b.sharedContextCache = sharedContextCache
 | 
			
		||||
 | 
			
		||||
	return b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
 | 
			
		||||
	if b.sharedContextCache == nil {
 | 
			
		||||
		return context
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	visited := make(map[PredictionContext]PredictionContext)
 | 
			
		||||
 | 
			
		||||
	return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
 | 
			
		||||
	return b.sharedContextCache
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNSimulator) ATN() *ATN {
 | 
			
		||||
	return b.atn
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
 | 
			
		||||
	return b.decisionToDFA
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										386
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										386
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,386 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import "strconv"
 | 
			
		||||
 | 
			
		||||
// Constants for serialization.
 | 
			
		||||
const (
 | 
			
		||||
	ATNStateInvalidType    = 0
 | 
			
		||||
	ATNStateBasic          = 1
 | 
			
		||||
	ATNStateRuleStart      = 2
 | 
			
		||||
	ATNStateBlockStart     = 3
 | 
			
		||||
	ATNStatePlusBlockStart = 4
 | 
			
		||||
	ATNStateStarBlockStart = 5
 | 
			
		||||
	ATNStateTokenStart     = 6
 | 
			
		||||
	ATNStateRuleStop       = 7
 | 
			
		||||
	ATNStateBlockEnd       = 8
 | 
			
		||||
	ATNStateStarLoopBack   = 9
 | 
			
		||||
	ATNStateStarLoopEntry  = 10
 | 
			
		||||
	ATNStatePlusLoopBack   = 11
 | 
			
		||||
	ATNStateLoopEnd        = 12
 | 
			
		||||
 | 
			
		||||
	ATNStateInvalidStateNumber = -1
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var ATNStateInitialNumTransitions = 4
 | 
			
		||||
 | 
			
		||||
type ATNState interface {
 | 
			
		||||
	GetEpsilonOnlyTransitions() bool
 | 
			
		||||
 | 
			
		||||
	GetRuleIndex() int
 | 
			
		||||
	SetRuleIndex(int)
 | 
			
		||||
 | 
			
		||||
	GetNextTokenWithinRule() *IntervalSet
 | 
			
		||||
	SetNextTokenWithinRule(*IntervalSet)
 | 
			
		||||
 | 
			
		||||
	GetATN() *ATN
 | 
			
		||||
	SetATN(*ATN)
 | 
			
		||||
 | 
			
		||||
	GetStateType() int
 | 
			
		||||
 | 
			
		||||
	GetStateNumber() int
 | 
			
		||||
	SetStateNumber(int)
 | 
			
		||||
 | 
			
		||||
	GetTransitions() []Transition
 | 
			
		||||
	SetTransitions([]Transition)
 | 
			
		||||
	AddTransition(Transition, int)
 | 
			
		||||
 | 
			
		||||
	String() string
 | 
			
		||||
	hash() int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseATNState struct {
 | 
			
		||||
	// NextTokenWithinRule caches lookahead during parsing. Not used during construction.
 | 
			
		||||
	NextTokenWithinRule *IntervalSet
 | 
			
		||||
 | 
			
		||||
	// atn is the current ATN.
 | 
			
		||||
	atn *ATN
 | 
			
		||||
 | 
			
		||||
	epsilonOnlyTransitions bool
 | 
			
		||||
 | 
			
		||||
	// ruleIndex tracks the Rule index because there are no Rule objects at runtime.
 | 
			
		||||
	ruleIndex int
 | 
			
		||||
 | 
			
		||||
	stateNumber int
 | 
			
		||||
 | 
			
		||||
	stateType int
 | 
			
		||||
 | 
			
		||||
	// Track the transitions emanating from this ATN state.
 | 
			
		||||
	transitions []Transition
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseATNState() *BaseATNState {
 | 
			
		||||
	return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) GetRuleIndex() int {
 | 
			
		||||
	return as.ruleIndex
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) SetRuleIndex(v int) {
 | 
			
		||||
	as.ruleIndex = v
 | 
			
		||||
}
 | 
			
		||||
func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
 | 
			
		||||
	return as.epsilonOnlyTransitions
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) GetATN() *ATN {
 | 
			
		||||
	return as.atn
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) SetATN(atn *ATN) {
 | 
			
		||||
	as.atn = atn
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) GetTransitions() []Transition {
 | 
			
		||||
	return as.transitions
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) SetTransitions(t []Transition) {
 | 
			
		||||
	as.transitions = t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) GetStateType() int {
 | 
			
		||||
	return as.stateType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) GetStateNumber() int {
 | 
			
		||||
	return as.stateNumber
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) SetStateNumber(stateNumber int) {
 | 
			
		||||
	as.stateNumber = stateNumber
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
 | 
			
		||||
	return as.NextTokenWithinRule
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
 | 
			
		||||
	as.NextTokenWithinRule = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) hash() int {
 | 
			
		||||
	return as.stateNumber
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) String() string {
 | 
			
		||||
	return strconv.Itoa(as.stateNumber)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) equals(other interface{}) bool {
 | 
			
		||||
	if ot, ok := other.(ATNState); ok {
 | 
			
		||||
		return as.stateNumber == ot.GetStateNumber()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) isNonGreedyExitState() bool {
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (as *BaseATNState) AddTransition(trans Transition, index int) {
 | 
			
		||||
	if len(as.transitions) == 0 {
 | 
			
		||||
		as.epsilonOnlyTransitions = trans.getIsEpsilon()
 | 
			
		||||
	} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
 | 
			
		||||
		as.epsilonOnlyTransitions = false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if index == -1 {
 | 
			
		||||
		as.transitions = append(as.transitions, trans)
 | 
			
		||||
	} else {
 | 
			
		||||
		as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
 | 
			
		||||
		// TODO: as.transitions.splice(index, 1, trans)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BasicState struct {
 | 
			
		||||
	*BaseATNState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBasicState() *BasicState {
 | 
			
		||||
	b := NewBaseATNState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStateBasic
 | 
			
		||||
 | 
			
		||||
	return &BasicState{BaseATNState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type DecisionState interface {
 | 
			
		||||
	ATNState
 | 
			
		||||
 | 
			
		||||
	getDecision() int
 | 
			
		||||
	setDecision(int)
 | 
			
		||||
 | 
			
		||||
	getNonGreedy() bool
 | 
			
		||||
	setNonGreedy(bool)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseDecisionState struct {
 | 
			
		||||
	*BaseATNState
 | 
			
		||||
	decision  int
 | 
			
		||||
	nonGreedy bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseDecisionState() *BaseDecisionState {
 | 
			
		||||
	return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *BaseDecisionState) getDecision() int {
 | 
			
		||||
	return s.decision
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *BaseDecisionState) setDecision(b int) {
 | 
			
		||||
	s.decision = b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *BaseDecisionState) getNonGreedy() bool {
 | 
			
		||||
	return s.nonGreedy
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *BaseDecisionState) setNonGreedy(b bool) {
 | 
			
		||||
	s.nonGreedy = b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BlockStartState interface {
 | 
			
		||||
	DecisionState
 | 
			
		||||
 | 
			
		||||
	getEndState() *BlockEndState
 | 
			
		||||
	setEndState(*BlockEndState)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// BaseBlockStartState is the start of a regular (...) block.
 | 
			
		||||
type BaseBlockStartState struct {
 | 
			
		||||
	*BaseDecisionState
 | 
			
		||||
	endState *BlockEndState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBlockStartState() *BaseBlockStartState {
 | 
			
		||||
	return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *BaseBlockStartState) getEndState() *BlockEndState {
 | 
			
		||||
	return s.endState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
 | 
			
		||||
	s.endState = b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BasicBlockStartState struct {
 | 
			
		||||
	*BaseBlockStartState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBasicBlockStartState() *BasicBlockStartState {
 | 
			
		||||
	b := NewBlockStartState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStateBlockStart
 | 
			
		||||
 | 
			
		||||
	return &BasicBlockStartState{BaseBlockStartState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// BlockEndState is a terminal node of a simple (a|b|c) block.
 | 
			
		||||
type BlockEndState struct {
 | 
			
		||||
	*BaseATNState
 | 
			
		||||
	startState ATNState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBlockEndState() *BlockEndState {
 | 
			
		||||
	b := NewBaseATNState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStateBlockEnd
 | 
			
		||||
 | 
			
		||||
	return &BlockEndState{BaseATNState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
 | 
			
		||||
// start symbol. In that case, there is one transition to EOF. Later, we might
 | 
			
		||||
// encode references to all calls to this rule to compute FOLLOW sets for error
 | 
			
		||||
// handling.
 | 
			
		||||
type RuleStopState struct {
 | 
			
		||||
	*BaseATNState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewRuleStopState() *RuleStopState {
 | 
			
		||||
	b := NewBaseATNState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStateRuleStop
 | 
			
		||||
 | 
			
		||||
	return &RuleStopState{BaseATNState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RuleStartState struct {
 | 
			
		||||
	*BaseATNState
 | 
			
		||||
	stopState        ATNState
 | 
			
		||||
	isPrecedenceRule bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewRuleStartState() *RuleStartState {
 | 
			
		||||
	b := NewBaseATNState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStateRuleStart
 | 
			
		||||
 | 
			
		||||
	return &RuleStartState{BaseATNState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
 | 
			
		||||
// transitions: one to the loop back to start of the block, and one to exit.
 | 
			
		||||
type PlusLoopbackState struct {
 | 
			
		||||
	*BaseDecisionState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPlusLoopbackState() *PlusLoopbackState {
 | 
			
		||||
	b := NewBaseDecisionState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStatePlusLoopBack
 | 
			
		||||
 | 
			
		||||
	return &PlusLoopbackState{BaseDecisionState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
 | 
			
		||||
// decision state; we don't use it for code generation. Somebody might need it,
 | 
			
		||||
// it is included for completeness. In reality, PlusLoopbackState is the real
 | 
			
		||||
// decision-making node for A+.
 | 
			
		||||
type PlusBlockStartState struct {
 | 
			
		||||
	*BaseBlockStartState
 | 
			
		||||
	loopBackState ATNState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPlusBlockStartState() *PlusBlockStartState {
 | 
			
		||||
	b := NewBlockStartState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStatePlusBlockStart
 | 
			
		||||
 | 
			
		||||
	return &PlusBlockStartState{BaseBlockStartState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StarBlockStartState is the block that begins a closure loop.
 | 
			
		||||
type StarBlockStartState struct {
 | 
			
		||||
	*BaseBlockStartState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewStarBlockStartState() *StarBlockStartState {
 | 
			
		||||
	b := NewBlockStartState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStateStarBlockStart
 | 
			
		||||
 | 
			
		||||
	return &StarBlockStartState{BaseBlockStartState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type StarLoopbackState struct {
 | 
			
		||||
	*BaseATNState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewStarLoopbackState() *StarLoopbackState {
 | 
			
		||||
	b := NewBaseATNState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStateStarLoopBack
 | 
			
		||||
 | 
			
		||||
	return &StarLoopbackState{BaseATNState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type StarLoopEntryState struct {
 | 
			
		||||
	*BaseDecisionState
 | 
			
		||||
	loopBackState          ATNState
 | 
			
		||||
	precedenceRuleDecision bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewStarLoopEntryState() *StarLoopEntryState {
 | 
			
		||||
	b := NewBaseDecisionState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStateStarLoopEntry
 | 
			
		||||
 | 
			
		||||
	// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
 | 
			
		||||
	return &StarLoopEntryState{BaseDecisionState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// LoopEndState marks the end of a * or + loop.
 | 
			
		||||
type LoopEndState struct {
 | 
			
		||||
	*BaseATNState
 | 
			
		||||
	loopBackState ATNState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLoopEndState() *LoopEndState {
 | 
			
		||||
	b := NewBaseATNState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStateLoopEnd
 | 
			
		||||
 | 
			
		||||
	return &LoopEndState{BaseATNState: b}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
 | 
			
		||||
type TokensStartState struct {
 | 
			
		||||
	*BaseDecisionState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewTokensStartState() *TokensStartState {
 | 
			
		||||
	b := NewBaseDecisionState()
 | 
			
		||||
 | 
			
		||||
	b.stateType = ATNStateTokenStart
 | 
			
		||||
 | 
			
		||||
	return &TokensStartState{BaseDecisionState: b}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										11
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,11 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
// Represent the type of recognizer an ATN applies to.
 | 
			
		||||
const (
 | 
			
		||||
	ATNTypeLexer  = 0
 | 
			
		||||
	ATNTypeParser = 1
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										12
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,12 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
type CharStream interface {
 | 
			
		||||
	IntStream
 | 
			
		||||
	GetText(int, int) string
 | 
			
		||||
	GetTextFromTokens(start, end Token) string
 | 
			
		||||
	GetTextFromInterval(*Interval) string
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										56
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,56 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
// TokenFactory creates CommonToken objects.
 | 
			
		||||
type TokenFactory interface {
 | 
			
		||||
	Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CommonTokenFactory is the default TokenFactory implementation.
 | 
			
		||||
type CommonTokenFactory struct {
 | 
			
		||||
	// copyText indicates whether CommonToken.setText should be called after
 | 
			
		||||
	// constructing tokens to explicitly set the text. This is useful for cases
 | 
			
		||||
	// where the input stream might not be able to provide arbitrary substrings of
 | 
			
		||||
	// text from the input after the lexer creates a token (e.g. the
 | 
			
		||||
	// implementation of CharStream.GetText in UnbufferedCharStream panics an
 | 
			
		||||
	// UnsupportedOperationException). Explicitly setting the token text allows
 | 
			
		||||
	// Token.GetText to be called at any time regardless of the input stream
 | 
			
		||||
	// implementation.
 | 
			
		||||
	//
 | 
			
		||||
	// The default value is false to avoid the performance and memory overhead of
 | 
			
		||||
	// copying text for every token unless explicitly requested.
 | 
			
		||||
	copyText bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
 | 
			
		||||
	return &CommonTokenFactory{copyText: copyText}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
 | 
			
		||||
// explicitly copy token text when constructing tokens.
 | 
			
		||||
var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
 | 
			
		||||
	t := NewCommonToken(source, ttype, channel, start, stop)
 | 
			
		||||
 | 
			
		||||
	t.line = line
 | 
			
		||||
	t.column = column
 | 
			
		||||
 | 
			
		||||
	if text != "" {
 | 
			
		||||
		t.SetText(text)
 | 
			
		||||
	} else if c.copyText && source.charStream != nil {
 | 
			
		||||
		t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
 | 
			
		||||
	t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
 | 
			
		||||
	t.SetText(text)
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										447
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										447
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,447 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// CommonTokenStream is an implementation of TokenStream that loads tokens from
 | 
			
		||||
// a TokenSource on-demand and places the tokens in a buffer to provide access
 | 
			
		||||
// to any previous token by index. This token stream ignores the value of
 | 
			
		||||
// Token.getChannel. If your parser requires the token stream filter tokens to
 | 
			
		||||
// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
 | 
			
		||||
// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
 | 
			
		||||
type CommonTokenStream struct {
 | 
			
		||||
	channel int
 | 
			
		||||
 | 
			
		||||
	// fetchedEOF indicates whether the Token.EOF token has been fetched from
 | 
			
		||||
	// tokenSource and added to tokens. This field improves performance for the
 | 
			
		||||
	// following cases:
 | 
			
		||||
	//
 | 
			
		||||
	// consume: The lookahead check in consume to preven consuming the EOF symbol is
 | 
			
		||||
	// optimized by checking the values of fetchedEOF and p instead of calling LA.
 | 
			
		||||
	//
 | 
			
		||||
	// fetch: The check to prevent adding multiple EOF symbols into tokens is
 | 
			
		||||
	// trivial with bt field.
 | 
			
		||||
	fetchedEOF bool
 | 
			
		||||
 | 
			
		||||
	// index indexs into tokens of the current token (next token to consume).
 | 
			
		||||
	// tokens[p] should be LT(1). It is set to -1 when the stream is first
 | 
			
		||||
	// constructed or when SetTokenSource is called, indicating that the first token
 | 
			
		||||
	// has not yet been fetched from the token source. For additional information,
 | 
			
		||||
	// see the documentation of IntStream for a description of initializing methods.
 | 
			
		||||
	index int
 | 
			
		||||
 | 
			
		||||
	// tokenSource is the TokenSource from which tokens for the bt stream are
 | 
			
		||||
	// fetched.
 | 
			
		||||
	tokenSource TokenSource
 | 
			
		||||
 | 
			
		||||
	// tokens is all tokens fetched from the token source. The list is considered a
 | 
			
		||||
	// complete view of the input once fetchedEOF is set to true.
 | 
			
		||||
	tokens []Token
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
 | 
			
		||||
	return &CommonTokenStream{
 | 
			
		||||
		channel:     channel,
 | 
			
		||||
		index:       -1,
 | 
			
		||||
		tokenSource: lexer,
 | 
			
		||||
		tokens:      make([]Token, 0),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) GetAllTokens() []Token {
 | 
			
		||||
	return c.tokens
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) Mark() int {
 | 
			
		||||
	return 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) Release(marker int) {}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) reset() {
 | 
			
		||||
	c.Seek(0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) Seek(index int) {
 | 
			
		||||
	c.lazyInit()
 | 
			
		||||
	c.index = c.adjustSeekIndex(index)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) Get(index int) Token {
 | 
			
		||||
	c.lazyInit()
 | 
			
		||||
 | 
			
		||||
	return c.tokens[index]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) Consume() {
 | 
			
		||||
	SkipEOFCheck := false
 | 
			
		||||
 | 
			
		||||
	if c.index >= 0 {
 | 
			
		||||
		if c.fetchedEOF {
 | 
			
		||||
			// The last token in tokens is EOF. Skip the check if p indexes any fetched.
 | 
			
		||||
			// token except the last.
 | 
			
		||||
			SkipEOFCheck = c.index < len(c.tokens)-1
 | 
			
		||||
		} else {
 | 
			
		||||
			// No EOF token in tokens. Skip the check if p indexes a fetched token.
 | 
			
		||||
			SkipEOFCheck = c.index < len(c.tokens)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		// Not yet initialized
 | 
			
		||||
		SkipEOFCheck = false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !SkipEOFCheck && c.LA(1) == TokenEOF {
 | 
			
		||||
		panic("cannot consume EOF")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if c.Sync(c.index + 1) {
 | 
			
		||||
		c.index = c.adjustSeekIndex(c.index + 1)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Sync makes sure index i in tokens has a token and returns true if a token is
 | 
			
		||||
// located at index i and otherwise false.
 | 
			
		||||
func (c *CommonTokenStream) Sync(i int) bool {
 | 
			
		||||
	n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
 | 
			
		||||
 | 
			
		||||
	if n > 0 {
 | 
			
		||||
		fetched := c.fetch(n)
 | 
			
		||||
		return fetched >= n
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// fetch adds n elements to buffer and returns the actual number of elements
 | 
			
		||||
// added to the buffer.
 | 
			
		||||
func (c *CommonTokenStream) fetch(n int) int {
 | 
			
		||||
	if c.fetchedEOF {
 | 
			
		||||
		return 0
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < n; i++ {
 | 
			
		||||
		t := c.tokenSource.NextToken()
 | 
			
		||||
 | 
			
		||||
		t.SetTokenIndex(len(c.tokens))
 | 
			
		||||
		c.tokens = append(c.tokens, t)
 | 
			
		||||
 | 
			
		||||
		if t.GetTokenType() == TokenEOF {
 | 
			
		||||
			c.fetchedEOF = true
 | 
			
		||||
 | 
			
		||||
			return i + 1
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return n
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetTokens gets all tokens from start to stop inclusive.
 | 
			
		||||
func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
 | 
			
		||||
	if start < 0 || stop < 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.lazyInit()
 | 
			
		||||
 | 
			
		||||
	subset := make([]Token, 0)
 | 
			
		||||
 | 
			
		||||
	if stop >= len(c.tokens) {
 | 
			
		||||
		stop = len(c.tokens) - 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := start; i < stop; i++ {
 | 
			
		||||
		t := c.tokens[i]
 | 
			
		||||
 | 
			
		||||
		if t.GetTokenType() == TokenEOF {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if types == nil || types.contains(t.GetTokenType()) {
 | 
			
		||||
			subset = append(subset, t)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return subset
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) LA(i int) int {
 | 
			
		||||
	return c.LT(i).GetTokenType()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) lazyInit() {
 | 
			
		||||
	if c.index == -1 {
 | 
			
		||||
		c.setup()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) setup() {
 | 
			
		||||
	c.Sync(0)
 | 
			
		||||
	c.index = c.adjustSeekIndex(0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) GetTokenSource() TokenSource {
 | 
			
		||||
	return c.tokenSource
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SetTokenSource resets the c token stream by setting its token source.
 | 
			
		||||
func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
 | 
			
		||||
	c.tokenSource = tokenSource
 | 
			
		||||
	c.tokens = make([]Token, 0)
 | 
			
		||||
	c.index = -1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NextTokenOnChannel returns the index of the next token on channel given a
 | 
			
		||||
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
 | 
			
		||||
// no tokens on channel between i and EOF.
 | 
			
		||||
func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
 | 
			
		||||
	c.Sync(i)
 | 
			
		||||
 | 
			
		||||
	if i >= len(c.tokens) {
 | 
			
		||||
		return -1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	token := c.tokens[i]
 | 
			
		||||
 | 
			
		||||
	for token.GetChannel() != c.channel {
 | 
			
		||||
		if token.GetTokenType() == TokenEOF {
 | 
			
		||||
			return -1
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		i++
 | 
			
		||||
		c.Sync(i)
 | 
			
		||||
		token = c.tokens[i]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return i
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// previousTokenOnChannel returns the index of the previous token on channel
 | 
			
		||||
// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
 | 
			
		||||
// there are no tokens on channel between i and 0.
 | 
			
		||||
func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
 | 
			
		||||
	for i >= 0 && c.tokens[i].GetChannel() != channel {
 | 
			
		||||
		i--
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return i
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetHiddenTokensToRight collects all tokens on a specified channel to the
 | 
			
		||||
// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
 | 
			
		||||
// or EOF. If channel is -1, it finds any non-default channel token.
 | 
			
		||||
func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
 | 
			
		||||
	c.lazyInit()
 | 
			
		||||
 | 
			
		||||
	if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
 | 
			
		||||
		panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
 | 
			
		||||
	from := tokenIndex + 1
 | 
			
		||||
 | 
			
		||||
	// If no onchannel to the right, then nextOnChannel == -1, so set to to last token
 | 
			
		||||
	var to int
 | 
			
		||||
 | 
			
		||||
	if nextOnChannel == -1 {
 | 
			
		||||
		to = len(c.tokens) - 1
 | 
			
		||||
	} else {
 | 
			
		||||
		to = nextOnChannel
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.filterForChannel(from, to, channel)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetHiddenTokensToLeft collects all tokens on channel to the left of the
 | 
			
		||||
// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
 | 
			
		||||
// -1, it finds any non default channel token.
 | 
			
		||||
func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
 | 
			
		||||
	c.lazyInit()
 | 
			
		||||
 | 
			
		||||
	if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
 | 
			
		||||
		panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
 | 
			
		||||
 | 
			
		||||
	if prevOnChannel == tokenIndex-1 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If there are none on channel to the left and prevOnChannel == -1 then from = 0
 | 
			
		||||
	from := prevOnChannel + 1
 | 
			
		||||
	to := tokenIndex - 1
 | 
			
		||||
 | 
			
		||||
	return c.filterForChannel(from, to, channel)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
 | 
			
		||||
	hidden := make([]Token, 0)
 | 
			
		||||
 | 
			
		||||
	for i := left; i < right+1; i++ {
 | 
			
		||||
		t := c.tokens[i]
 | 
			
		||||
 | 
			
		||||
		if channel == -1 {
 | 
			
		||||
			if t.GetChannel() != LexerDefaultTokenChannel {
 | 
			
		||||
				hidden = append(hidden, t)
 | 
			
		||||
			}
 | 
			
		||||
		} else if t.GetChannel() == channel {
 | 
			
		||||
			hidden = append(hidden, t)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(hidden) == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return hidden
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) GetSourceName() string {
 | 
			
		||||
	return c.tokenSource.GetSourceName()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) Size() int {
 | 
			
		||||
	return len(c.tokens)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) Index() int {
 | 
			
		||||
	return c.index
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) GetAllText() string {
 | 
			
		||||
	return c.GetTextFromInterval(nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
 | 
			
		||||
	if start == nil || end == nil {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
 | 
			
		||||
	return c.GetTextFromInterval(interval.GetSourceInterval())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
 | 
			
		||||
	c.lazyInit()
 | 
			
		||||
	c.Fill()
 | 
			
		||||
 | 
			
		||||
	if interval == nil {
 | 
			
		||||
		interval = NewInterval(0, len(c.tokens)-1)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	start := interval.Start
 | 
			
		||||
	stop := interval.Stop
 | 
			
		||||
 | 
			
		||||
	if start < 0 || stop < 0 {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if stop >= len(c.tokens) {
 | 
			
		||||
		stop = len(c.tokens) - 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s := ""
 | 
			
		||||
 | 
			
		||||
	for i := start; i < stop+1; i++ {
 | 
			
		||||
		t := c.tokens[i]
 | 
			
		||||
 | 
			
		||||
		if t.GetTokenType() == TokenEOF {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		s += t.GetText()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Fill gets all tokens from the lexer until EOF.
 | 
			
		||||
func (c *CommonTokenStream) Fill() {
 | 
			
		||||
	c.lazyInit()
 | 
			
		||||
 | 
			
		||||
	for c.fetch(1000) == 1000 {
 | 
			
		||||
		continue
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) adjustSeekIndex(i int) int {
 | 
			
		||||
	return c.NextTokenOnChannel(i, c.channel)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) LB(k int) Token {
 | 
			
		||||
	if k == 0 || c.index-k < 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	i := c.index
 | 
			
		||||
	n := 1
 | 
			
		||||
 | 
			
		||||
	// Find k good tokens looking backward
 | 
			
		||||
	for n <= k {
 | 
			
		||||
		// Skip off-channel tokens
 | 
			
		||||
		i = c.previousTokenOnChannel(i-1, c.channel)
 | 
			
		||||
		n++
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if i < 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.tokens[i]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonTokenStream) LT(k int) Token {
 | 
			
		||||
	c.lazyInit()
 | 
			
		||||
 | 
			
		||||
	if k == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if k < 0 {
 | 
			
		||||
		return c.LB(-k)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	i := c.index
 | 
			
		||||
	n := 1 // We know tokens[n] is valid
 | 
			
		||||
 | 
			
		||||
	// Find k good tokens
 | 
			
		||||
	for n < k {
 | 
			
		||||
		// Skip off-channel tokens, but make sure to not look past EOF
 | 
			
		||||
		if c.Sync(i + 1) {
 | 
			
		||||
			i = c.NextTokenOnChannel(i+1, c.channel)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		n++
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.tokens[i]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getNumberOfOnChannelTokens counts EOF once.
 | 
			
		||||
func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
 | 
			
		||||
	var n int
 | 
			
		||||
 | 
			
		||||
	c.Fill()
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(c.tokens); i++ {
 | 
			
		||||
		t := c.tokens[i]
 | 
			
		||||
 | 
			
		||||
		if t.GetChannel() == c.channel {
 | 
			
		||||
			n++
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if t.GetTokenType() == TokenEOF {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return n
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										183
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										183
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,183 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"sort"
 | 
			
		||||
	"sync"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type DFA struct {
 | 
			
		||||
	// atnStartState is the ATN state in which this was created
 | 
			
		||||
	atnStartState DecisionState
 | 
			
		||||
 | 
			
		||||
	decision int
 | 
			
		||||
 | 
			
		||||
	// states is all the DFA states. Use Map to get the old state back; Set can only
 | 
			
		||||
	// indicate whether it is there.
 | 
			
		||||
	states map[int]*DFAState
 | 
			
		||||
	statesMu sync.RWMutex
 | 
			
		||||
 | 
			
		||||
	s0 *DFAState
 | 
			
		||||
	s0Mu sync.RWMutex
 | 
			
		||||
 | 
			
		||||
	// precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
 | 
			
		||||
	// True if the DFA is for a precedence decision and false otherwise.
 | 
			
		||||
	precedenceDfa bool
 | 
			
		||||
	precedenceDfaMu sync.RWMutex
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewDFA(atnStartState DecisionState, decision int) *DFA {
 | 
			
		||||
	return &DFA{
 | 
			
		||||
		atnStartState: atnStartState,
 | 
			
		||||
		decision:      decision,
 | 
			
		||||
		states:        make(map[int]*DFAState),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getPrecedenceStartState gets the start state for the current precedence and
 | 
			
		||||
// returns the start state corresponding to the specified precedence if a start
 | 
			
		||||
// state exists for the specified precedence and nil otherwise. d must be a
 | 
			
		||||
// precedence DFA. See also isPrecedenceDfa.
 | 
			
		||||
func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
 | 
			
		||||
	if !d.getPrecedenceDfa() {
 | 
			
		||||
		panic("only precedence DFAs may contain a precedence start state")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// s0.edges is never nil for a precedence DFA
 | 
			
		||||
	if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return d.getS0().getIthEdge(precedence)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// setPrecedenceStartState sets the start state for the current precedence. d
 | 
			
		||||
// must be a precedence DFA. See also isPrecedenceDfa.
 | 
			
		||||
func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
 | 
			
		||||
	if !d.getPrecedenceDfa() {
 | 
			
		||||
		panic("only precedence DFAs may contain a precedence start state")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if precedence < 0 {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Synchronization on s0 here is ok. When the DFA is turned into a
 | 
			
		||||
	// precedence DFA, s0 will be initialized once and not updated again. s0.edges
 | 
			
		||||
	// is never nil for a precedence DFA.
 | 
			
		||||
	s0 := d.getS0()
 | 
			
		||||
	if precedence >= s0.numEdges() {
 | 
			
		||||
		edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
 | 
			
		||||
		s0.setEdges(edges)
 | 
			
		||||
		d.setS0(s0)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s0.setIthEdge(precedence, startState)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFA) getPrecedenceDfa() bool {
 | 
			
		||||
	d.precedenceDfaMu.RLock()
 | 
			
		||||
	defer d.precedenceDfaMu.RUnlock()
 | 
			
		||||
	return d.precedenceDfa
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
 | 
			
		||||
// from the current DFA configuration, then d.states is cleared, the initial
 | 
			
		||||
// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
 | 
			
		||||
// store the start states for individual precedence values if precedenceDfa is
 | 
			
		||||
// true or nil otherwise, and d.precedenceDfa is updated.
 | 
			
		||||
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
 | 
			
		||||
	if d.getPrecedenceDfa() != precedenceDfa {
 | 
			
		||||
		d.setStates(make(map[int]*DFAState))
 | 
			
		||||
 | 
			
		||||
		if precedenceDfa {
 | 
			
		||||
			precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
 | 
			
		||||
 | 
			
		||||
			precedenceState.setEdges(make([]*DFAState, 0))
 | 
			
		||||
			precedenceState.isAcceptState = false
 | 
			
		||||
			precedenceState.requiresFullContext = false
 | 
			
		||||
			d.setS0(precedenceState)
 | 
			
		||||
		} else {
 | 
			
		||||
			d.setS0(nil)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		d.precedenceDfaMu.Lock()
 | 
			
		||||
		defer d.precedenceDfaMu.Unlock()
 | 
			
		||||
		d.precedenceDfa = precedenceDfa
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFA) getS0() *DFAState {
 | 
			
		||||
	d.s0Mu.RLock()
 | 
			
		||||
	defer d.s0Mu.RUnlock()
 | 
			
		||||
	return d.s0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFA) setS0(s *DFAState) {
 | 
			
		||||
	d.s0Mu.Lock()
 | 
			
		||||
	defer d.s0Mu.Unlock()
 | 
			
		||||
	d.s0 = s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFA) getState(hash int) (*DFAState, bool) {
 | 
			
		||||
	d.statesMu.RLock()
 | 
			
		||||
	defer d.statesMu.RUnlock()
 | 
			
		||||
	s, ok := d.states[hash]
 | 
			
		||||
	return s, ok
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFA) setStates(states map[int]*DFAState) {
 | 
			
		||||
	d.statesMu.Lock()
 | 
			
		||||
	defer d.statesMu.Unlock()
 | 
			
		||||
	d.states = states
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFA) setState(hash int, state *DFAState) {
 | 
			
		||||
	d.statesMu.Lock()
 | 
			
		||||
	defer d.statesMu.Unlock()
 | 
			
		||||
	d.states[hash] = state
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFA) numStates() int {
 | 
			
		||||
	d.statesMu.RLock()
 | 
			
		||||
	defer d.statesMu.RUnlock()
 | 
			
		||||
	return len(d.states)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type dfaStateList []*DFAState
 | 
			
		||||
 | 
			
		||||
func (d dfaStateList) Len() int           { return len(d) }
 | 
			
		||||
func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
 | 
			
		||||
func (d dfaStateList) Swap(i, j int)      { d[i], d[j] = d[j], d[i] }
 | 
			
		||||
 | 
			
		||||
// sortedStates returns the states in d sorted by their state number.
 | 
			
		||||
func (d *DFA) sortedStates() []*DFAState {
 | 
			
		||||
	vs := make([]*DFAState, 0, len(d.states))
 | 
			
		||||
 | 
			
		||||
	for _, v := range d.states {
 | 
			
		||||
		vs = append(vs, v)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	sort.Sort(dfaStateList(vs))
 | 
			
		||||
 | 
			
		||||
	return vs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFA) String(literalNames []string, symbolicNames []string) string {
 | 
			
		||||
	if d.getS0() == nil {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return NewDFASerializer(d, literalNames, symbolicNames).String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFA) ToLexerString() string {
 | 
			
		||||
	if d.getS0() == nil {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return NewLexerDFASerializer(d).String()
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										152
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										152
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,152 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// DFASerializer is a DFA walker that knows how to dump them to serialized
 | 
			
		||||
// strings.
 | 
			
		||||
type DFASerializer struct {
 | 
			
		||||
	dfa           *DFA
 | 
			
		||||
	literalNames  []string
 | 
			
		||||
	symbolicNames []string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
 | 
			
		||||
	if literalNames == nil {
 | 
			
		||||
		literalNames = make([]string, 0)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if symbolicNames == nil {
 | 
			
		||||
		symbolicNames = make([]string, 0)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &DFASerializer{
 | 
			
		||||
		dfa:           dfa,
 | 
			
		||||
		literalNames:  literalNames,
 | 
			
		||||
		symbolicNames: symbolicNames,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFASerializer) String() string {
 | 
			
		||||
	if d.dfa.getS0() == nil {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	buf := ""
 | 
			
		||||
	states := d.dfa.sortedStates()
 | 
			
		||||
 | 
			
		||||
	for _, s := range states {
 | 
			
		||||
		if s.edges != nil {
 | 
			
		||||
			n := len(s.edges)
 | 
			
		||||
 | 
			
		||||
			for j := 0; j < n; j++ {
 | 
			
		||||
				t := s.edges[j]
 | 
			
		||||
 | 
			
		||||
				if t != nil && t.stateNumber != 0x7FFFFFFF {
 | 
			
		||||
					buf += d.GetStateString(s)
 | 
			
		||||
					buf += "-"
 | 
			
		||||
					buf += d.getEdgeLabel(j)
 | 
			
		||||
					buf += "->"
 | 
			
		||||
					buf += d.GetStateString(t)
 | 
			
		||||
					buf += "\n"
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(buf) == 0 {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return buf
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFASerializer) getEdgeLabel(i int) string {
 | 
			
		||||
	if i == 0 {
 | 
			
		||||
		return "EOF"
 | 
			
		||||
	} else if d.literalNames != nil && i-1 < len(d.literalNames) {
 | 
			
		||||
		return d.literalNames[i-1]
 | 
			
		||||
	} else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
 | 
			
		||||
		return d.symbolicNames[i-1]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return strconv.Itoa(i - 1)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFASerializer) GetStateString(s *DFAState) string {
 | 
			
		||||
	var a, b string
 | 
			
		||||
 | 
			
		||||
	if s.isAcceptState {
 | 
			
		||||
		a = ":"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if s.requiresFullContext {
 | 
			
		||||
		b = "^"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
 | 
			
		||||
 | 
			
		||||
	if s.isAcceptState {
 | 
			
		||||
		if s.predicates != nil {
 | 
			
		||||
			return baseStateStr + "=>" + fmt.Sprint(s.predicates)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return baseStateStr + "=>" + fmt.Sprint(s.prediction)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return baseStateStr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type LexerDFASerializer struct {
 | 
			
		||||
	*DFASerializer
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
 | 
			
		||||
	return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerDFASerializer) getEdgeLabel(i int) string {
 | 
			
		||||
	return "'" + string(i) + "'"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerDFASerializer) String() string {
 | 
			
		||||
	if l.dfa.getS0() == nil {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	buf := ""
 | 
			
		||||
	states := l.dfa.sortedStates()
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(states); i++ {
 | 
			
		||||
		s := states[i]
 | 
			
		||||
 | 
			
		||||
		if s.edges != nil {
 | 
			
		||||
			n := len(s.edges)
 | 
			
		||||
 | 
			
		||||
			for j := 0; j < n; j++ {
 | 
			
		||||
				t := s.edges[j]
 | 
			
		||||
 | 
			
		||||
				if t != nil && t.stateNumber != 0x7FFFFFFF {
 | 
			
		||||
					buf += l.GetStateString(s)
 | 
			
		||||
					buf += "-"
 | 
			
		||||
					buf += l.getEdgeLabel(j)
 | 
			
		||||
					buf += "->"
 | 
			
		||||
					buf += l.GetStateString(t)
 | 
			
		||||
					buf += "\n"
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(buf) == 0 {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return buf
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										198
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										198
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,198 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"sync"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// PredPrediction maps a predicate to a predicted alternative.
 | 
			
		||||
type PredPrediction struct {
 | 
			
		||||
	alt  int
 | 
			
		||||
	pred SemanticContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
 | 
			
		||||
	return &PredPrediction{alt: alt, pred: pred}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PredPrediction) String() string {
 | 
			
		||||
	return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
 | 
			
		||||
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
 | 
			
		||||
// states the ATN can be in after reading each input symbol. That is to say,
 | 
			
		||||
// after reading input a1a2..an, the DFA is in a state that represents the
 | 
			
		||||
// subset T of the states of the ATN that are reachable from the ATN's start
 | 
			
		||||
// state along some path labeled a1a2..an." In conventional NFA-to-DFA
 | 
			
		||||
// conversion, therefore, the subset T would be a bitset representing the set of
 | 
			
		||||
// states the ATN could be in. We need to track the alt predicted by each state
 | 
			
		||||
// as well, however. More importantly, we need to maintain a stack of states,
 | 
			
		||||
// tracking the closure operations as they jump from rule to rule, emulating
 | 
			
		||||
// rule invocations (method calls). I have to add a stack to simulate the proper
 | 
			
		||||
// lookahead sequences for the underlying LL grammar from which the ATN was
 | 
			
		||||
// derived.
 | 
			
		||||
//
 | 
			
		||||
// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
 | 
			
		||||
// state (ala normal conversion) and a RuleContext describing the chain of rules
 | 
			
		||||
// (if any) followed to arrive at that state.
 | 
			
		||||
//
 | 
			
		||||
// A DFAState may have multiple references to a particular state, but with
 | 
			
		||||
// different ATN contexts (with same or different alts) meaning that state was
 | 
			
		||||
// reached via a different set of rule invocations.
 | 
			
		||||
type DFAState struct {
 | 
			
		||||
	stateNumber int
 | 
			
		||||
	configs     ATNConfigSet
 | 
			
		||||
 | 
			
		||||
	// edges elements point to the target of the symbol. Shift up by 1 so (-1)
 | 
			
		||||
	// Token.EOF maps to the first element.
 | 
			
		||||
	edges []*DFAState
 | 
			
		||||
	edgesMu	sync.RWMutex
 | 
			
		||||
 | 
			
		||||
	isAcceptState bool
 | 
			
		||||
 | 
			
		||||
	// prediction is the ttype we match or alt we predict if the state is accept.
 | 
			
		||||
	// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
 | 
			
		||||
	// requiresFullContext.
 | 
			
		||||
	prediction int
 | 
			
		||||
 | 
			
		||||
	lexerActionExecutor *LexerActionExecutor
 | 
			
		||||
 | 
			
		||||
	// requiresFullContext indicates it was created during an SLL prediction that
 | 
			
		||||
	// discovered a conflict between the configurations in the state. Future
 | 
			
		||||
	// ParserATNSimulator.execATN invocations immediately jump doing
 | 
			
		||||
	// full context prediction if true.
 | 
			
		||||
	requiresFullContext bool
 | 
			
		||||
 | 
			
		||||
	// predicates is the predicates associated with the ATN configurations of the
 | 
			
		||||
	// DFA state during SLL parsing. When we have predicates, requiresFullContext
 | 
			
		||||
	// is false, since full context prediction evaluates predicates on-the-fly. If
 | 
			
		||||
	// d is
 | 
			
		||||
	// not nil, then prediction is ATN.INVALID_ALT_NUMBER.
 | 
			
		||||
	//
 | 
			
		||||
	// We only use these for non-requiresFullContext but conflicting states. That
 | 
			
		||||
	// means we know from the context (it's $ or we don't dip into outer context)
 | 
			
		||||
	// that it's an ambiguity not a conflict.
 | 
			
		||||
	//
 | 
			
		||||
	// This list is computed by
 | 
			
		||||
	// ParserATNSimulator.predicateDFAState.
 | 
			
		||||
	predicates []*PredPrediction
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
 | 
			
		||||
	if configs == nil {
 | 
			
		||||
		configs = NewBaseATNConfigSet(false)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &DFAState{configs: configs, stateNumber: stateNumber}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
 | 
			
		||||
func (d *DFAState) GetAltSet() *Set {
 | 
			
		||||
	alts := NewSet(nil, nil)
 | 
			
		||||
 | 
			
		||||
	if d.configs != nil {
 | 
			
		||||
		for _, c := range d.configs.GetItems() {
 | 
			
		||||
			alts.add(c.GetAlt())
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if alts.length() == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return alts
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFAState) getEdges() []*DFAState {
 | 
			
		||||
	d.edgesMu.RLock()
 | 
			
		||||
	defer d.edgesMu.RUnlock()
 | 
			
		||||
	return d.edges
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFAState) numEdges() int {
 | 
			
		||||
	d.edgesMu.RLock()
 | 
			
		||||
	defer d.edgesMu.RUnlock()
 | 
			
		||||
	return len(d.edges)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFAState) getIthEdge(i int) *DFAState {
 | 
			
		||||
	d.edgesMu.RLock()
 | 
			
		||||
	defer d.edgesMu.RUnlock()
 | 
			
		||||
	return d.edges[i]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFAState) setEdges(newEdges []*DFAState) {
 | 
			
		||||
	d.edgesMu.Lock()
 | 
			
		||||
	defer d.edgesMu.Unlock()
 | 
			
		||||
	d.edges = newEdges
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFAState) setIthEdge(i int, edge *DFAState) {
 | 
			
		||||
	d.edgesMu.Lock()
 | 
			
		||||
	defer d.edgesMu.Unlock()
 | 
			
		||||
	d.edges[i] = edge
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFAState) setPrediction(v int) {
 | 
			
		||||
	d.prediction = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// equals returns whether d equals other. Two DFAStates are equal if their ATN
 | 
			
		||||
// configuration sets are the same. This method is used to see if a state
 | 
			
		||||
// already exists.
 | 
			
		||||
//
 | 
			
		||||
// Because the number of alternatives and number of ATN configurations are
 | 
			
		||||
// finite, there is a finite number of DFA states that can be processed. This is
 | 
			
		||||
// necessary to show that the algorithm terminates.
 | 
			
		||||
//
 | 
			
		||||
// Cannot test the DFA state numbers here because in
 | 
			
		||||
// ParserATNSimulator.addDFAState we need to know if any other state exists that
 | 
			
		||||
// has d exact set of ATN configurations. The stateNumber is irrelevant.
 | 
			
		||||
func (d *DFAState) equals(other interface{}) bool {
 | 
			
		||||
	if d == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*DFAState); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return d.configs.Equals(other.(*DFAState).configs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFAState) String() string {
 | 
			
		||||
	var s string
 | 
			
		||||
	if d.isAcceptState {
 | 
			
		||||
		if d.predicates != nil {
 | 
			
		||||
			s = "=>" + fmt.Sprint(d.predicates)
 | 
			
		||||
		} else {
 | 
			
		||||
			s = "=>" + fmt.Sprint(d.prediction)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return fmt.Sprintf("%d:%s%s", fmt.Sprint(d.configs), s)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DFAState) hash() int {
 | 
			
		||||
	h := murmurInit(11)
 | 
			
		||||
 | 
			
		||||
	c := 1
 | 
			
		||||
	if d.isAcceptState {
 | 
			
		||||
		if d.predicates != nil {
 | 
			
		||||
			for _, p := range d.predicates {
 | 
			
		||||
				h = murmurUpdate(h, p.alt)
 | 
			
		||||
				h = murmurUpdate(h, p.pred.hash())
 | 
			
		||||
				c += 2
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			h = murmurUpdate(h, d.prediction)
 | 
			
		||||
			c += 1
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	h = murmurUpdate(h, d.configs.hash())
 | 
			
		||||
	return murmurFinish(h, c)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										111
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										111
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,111 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// This implementation of {@link ANTLRErrorListener} can be used to identify
 | 
			
		||||
// certain potential correctness and performance problems in grammars. "reports"
 | 
			
		||||
// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
 | 
			
		||||
// message.
 | 
			
		||||
//
 | 
			
		||||
// <ul>
 | 
			
		||||
// <li><b>Ambiguities</b>: These are cases where more than one path through the
 | 
			
		||||
// grammar can Match the input.</li>
 | 
			
		||||
// <li><b>Weak context sensitivity</b>: These are cases where full-context
 | 
			
		||||
// prediction resolved an SLL conflict to a unique alternative which equaled the
 | 
			
		||||
// minimum alternative of the SLL conflict.</li>
 | 
			
		||||
// <li><b>Strong (forced) context sensitivity</b>: These are cases where the
 | 
			
		||||
// full-context prediction resolved an SLL conflict to a unique alternative,
 | 
			
		||||
// <em>and</em> the minimum alternative of the SLL conflict was found to not be
 | 
			
		||||
// a truly viable alternative. Two-stage parsing cannot be used for inputs where
 | 
			
		||||
// d situation occurs.</li>
 | 
			
		||||
// </ul>
 | 
			
		||||
 | 
			
		||||
type DiagnosticErrorListener struct {
 | 
			
		||||
	*DefaultErrorListener
 | 
			
		||||
 | 
			
		||||
	exactOnly bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
 | 
			
		||||
 | 
			
		||||
	n := new(DiagnosticErrorListener)
 | 
			
		||||
 | 
			
		||||
	// whether all ambiguities or only exact ambiguities are Reported.
 | 
			
		||||
	n.exactOnly = exactOnly
 | 
			
		||||
	return n
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
 | 
			
		||||
	if d.exactOnly && !exact {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	msg := "reportAmbiguity d=" +
 | 
			
		||||
		d.getDecisionDescription(recognizer, dfa) +
 | 
			
		||||
		": ambigAlts=" +
 | 
			
		||||
		d.getConflictingAlts(ambigAlts, configs).String() +
 | 
			
		||||
		", input='" +
 | 
			
		||||
		recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
 | 
			
		||||
	recognizer.NotifyErrorListeners(msg, nil, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
 | 
			
		||||
 | 
			
		||||
	msg := "reportAttemptingFullContext d=" +
 | 
			
		||||
		d.getDecisionDescription(recognizer, dfa) +
 | 
			
		||||
		", input='" +
 | 
			
		||||
		recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
 | 
			
		||||
	recognizer.NotifyErrorListeners(msg, nil, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
 | 
			
		||||
	msg := "reportContextSensitivity d=" +
 | 
			
		||||
		d.getDecisionDescription(recognizer, dfa) +
 | 
			
		||||
		", input='" +
 | 
			
		||||
		recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
 | 
			
		||||
	recognizer.NotifyErrorListeners(msg, nil, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
 | 
			
		||||
	decision := dfa.decision
 | 
			
		||||
	ruleIndex := dfa.atnStartState.GetRuleIndex()
 | 
			
		||||
 | 
			
		||||
	ruleNames := recognizer.GetRuleNames()
 | 
			
		||||
	if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
 | 
			
		||||
		return strconv.Itoa(decision)
 | 
			
		||||
	}
 | 
			
		||||
	ruleName := ruleNames[ruleIndex]
 | 
			
		||||
	if ruleName == "" {
 | 
			
		||||
		return strconv.Itoa(decision)
 | 
			
		||||
	}
 | 
			
		||||
	return strconv.Itoa(decision) + " (" + ruleName + ")"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Computes the set of conflicting or ambiguous alternatives from a
 | 
			
		||||
// configuration set, if that information was not already provided by the
 | 
			
		||||
// parser.
 | 
			
		||||
//
 | 
			
		||||
// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
 | 
			
		||||
// Reported by the parser.
 | 
			
		||||
// @param configs The conflicting or ambiguous configuration set.
 | 
			
		||||
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
 | 
			
		||||
// returns the set of alternatives represented in {@code configs}.
 | 
			
		||||
//
 | 
			
		||||
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
 | 
			
		||||
	if ReportedAlts != nil {
 | 
			
		||||
		return ReportedAlts
 | 
			
		||||
	}
 | 
			
		||||
	result := NewBitSet()
 | 
			
		||||
	for _, c := range set.GetItems() {
 | 
			
		||||
		result.add(c.GetAlt())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										108
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										108
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,108 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"os"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Provides an empty default implementation of {@link ANTLRErrorListener}. The
 | 
			
		||||
// default implementation of each method does nothing, but can be overridden as
 | 
			
		||||
// necessary.
 | 
			
		||||
 | 
			
		||||
type ErrorListener interface {
 | 
			
		||||
	SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
 | 
			
		||||
	ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
 | 
			
		||||
	ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
 | 
			
		||||
	ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type DefaultErrorListener struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewDefaultErrorListener() *DefaultErrorListener {
 | 
			
		||||
	return new(DefaultErrorListener)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ConsoleErrorListener struct {
 | 
			
		||||
	*DefaultErrorListener
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewConsoleErrorListener() *ConsoleErrorListener {
 | 
			
		||||
	return new(ConsoleErrorListener)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Provides a default instance of {@link ConsoleErrorListener}.
 | 
			
		||||
//
 | 
			
		||||
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// {@inheritDoc}
 | 
			
		||||
//
 | 
			
		||||
// <p>
 | 
			
		||||
// This implementation prints messages to {@link System//err} containing the
 | 
			
		||||
// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
 | 
			
		||||
// the following format.</p>
 | 
			
		||||
//
 | 
			
		||||
// <pre>
 | 
			
		||||
// line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
 | 
			
		||||
// </pre>
 | 
			
		||||
//
 | 
			
		||||
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
 | 
			
		||||
	fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ProxyErrorListener struct {
 | 
			
		||||
	*DefaultErrorListener
 | 
			
		||||
	delegates []ErrorListener
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
 | 
			
		||||
	if delegates == nil {
 | 
			
		||||
		panic("delegates is not provided")
 | 
			
		||||
	}
 | 
			
		||||
	l := new(ProxyErrorListener)
 | 
			
		||||
	l.delegates = delegates
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
 | 
			
		||||
	for _, d := range p.delegates {
 | 
			
		||||
		d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
 | 
			
		||||
	for _, d := range p.delegates {
 | 
			
		||||
		d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
 | 
			
		||||
	for _, d := range p.delegates {
 | 
			
		||||
		d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
 | 
			
		||||
	for _, d := range p.delegates {
 | 
			
		||||
		d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										758
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										758
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,758 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type ErrorStrategy interface {
 | 
			
		||||
	reset(Parser)
 | 
			
		||||
	RecoverInline(Parser) Token
 | 
			
		||||
	Recover(Parser, RecognitionException)
 | 
			
		||||
	Sync(Parser)
 | 
			
		||||
	inErrorRecoveryMode(Parser) bool
 | 
			
		||||
	ReportError(Parser, RecognitionException)
 | 
			
		||||
	ReportMatch(Parser)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This is the default implementation of {@link ANTLRErrorStrategy} used for
 | 
			
		||||
// error Reporting and recovery in ANTLR parsers.
 | 
			
		||||
//
 | 
			
		||||
type DefaultErrorStrategy struct {
 | 
			
		||||
	errorRecoveryMode bool
 | 
			
		||||
	lastErrorIndex    int
 | 
			
		||||
	lastErrorStates   *IntervalSet
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ ErrorStrategy = &DefaultErrorStrategy{}
 | 
			
		||||
 | 
			
		||||
func NewDefaultErrorStrategy() *DefaultErrorStrategy {
 | 
			
		||||
 | 
			
		||||
	d := new(DefaultErrorStrategy)
 | 
			
		||||
 | 
			
		||||
	// Indicates whether the error strategy is currently "recovering from an
 | 
			
		||||
	// error". This is used to suppress Reporting multiple error messages while
 | 
			
		||||
	// attempting to recover from a detected syntax error.
 | 
			
		||||
	//
 | 
			
		||||
	// @see //inErrorRecoveryMode
 | 
			
		||||
	//
 | 
			
		||||
	d.errorRecoveryMode = false
 | 
			
		||||
 | 
			
		||||
	// The index into the input stream where the last error occurred.
 | 
			
		||||
	// This is used to prevent infinite loops where an error is found
 | 
			
		||||
	// but no token is consumed during recovery...another error is found,
 | 
			
		||||
	// ad nauseum. This is a failsafe mechanism to guarantee that at least
 | 
			
		||||
	// one token/tree node is consumed for two errors.
 | 
			
		||||
	//
 | 
			
		||||
	d.lastErrorIndex = -1
 | 
			
		||||
	d.lastErrorStates = nil
 | 
			
		||||
	return d
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// <p>The default implementation simply calls {@link //endErrorCondition} to
 | 
			
		||||
// ensure that the handler is not in error recovery mode.</p>
 | 
			
		||||
func (d *DefaultErrorStrategy) reset(recognizer Parser) {
 | 
			
		||||
	d.endErrorCondition(recognizer)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// This method is called to enter error recovery mode when a recognition
 | 
			
		||||
// exception is Reported.
 | 
			
		||||
//
 | 
			
		||||
// @param recognizer the parser instance
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
 | 
			
		||||
	d.errorRecoveryMode = true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool {
 | 
			
		||||
	return d.errorRecoveryMode
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// This method is called to leave error recovery mode after recovering from
 | 
			
		||||
// a recognition exception.
 | 
			
		||||
//
 | 
			
		||||
// @param recognizer
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
 | 
			
		||||
	d.errorRecoveryMode = false
 | 
			
		||||
	d.lastErrorStates = nil
 | 
			
		||||
	d.lastErrorIndex = -1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// {@inheritDoc}
 | 
			
		||||
//
 | 
			
		||||
// <p>The default implementation simply calls {@link //endErrorCondition}.</p>
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
 | 
			
		||||
	d.endErrorCondition(recognizer)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// {@inheritDoc}
 | 
			
		||||
//
 | 
			
		||||
// <p>The default implementation returns immediately if the handler is already
 | 
			
		||||
// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
 | 
			
		||||
// and dispatches the Reporting task based on the runtime type of {@code e}
 | 
			
		||||
// according to the following table.</p>
 | 
			
		||||
//
 | 
			
		||||
// <ul>
 | 
			
		||||
// <li>{@link NoViableAltException}: Dispatches the call to
 | 
			
		||||
// {@link //ReportNoViableAlternative}</li>
 | 
			
		||||
// <li>{@link InputMisMatchException}: Dispatches the call to
 | 
			
		||||
// {@link //ReportInputMisMatch}</li>
 | 
			
		||||
// <li>{@link FailedPredicateException}: Dispatches the call to
 | 
			
		||||
// {@link //ReportFailedPredicate}</li>
 | 
			
		||||
// <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report
 | 
			
		||||
// the exception</li>
 | 
			
		||||
// </ul>
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
 | 
			
		||||
	// if we've already Reported an error and have not Matched a token
 | 
			
		||||
	// yet successfully, don't Report any errors.
 | 
			
		||||
	if d.inErrorRecoveryMode(recognizer) {
 | 
			
		||||
		return // don't Report spurious errors
 | 
			
		||||
	}
 | 
			
		||||
	d.beginErrorCondition(recognizer)
 | 
			
		||||
 | 
			
		||||
	switch t := e.(type) {
 | 
			
		||||
	default:
 | 
			
		||||
		fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
 | 
			
		||||
		//            fmt.Println(e.stack)
 | 
			
		||||
		recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
 | 
			
		||||
	case *NoViableAltException:
 | 
			
		||||
		d.ReportNoViableAlternative(recognizer, t)
 | 
			
		||||
	case *InputMisMatchException:
 | 
			
		||||
		d.ReportInputMisMatch(recognizer, t)
 | 
			
		||||
	case *FailedPredicateException:
 | 
			
		||||
		d.ReportFailedPredicate(recognizer, t)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// {@inheritDoc}
 | 
			
		||||
//
 | 
			
		||||
// <p>The default implementation reSynchronizes the parser by consuming tokens
 | 
			
		||||
// until we find one in the reSynchronization set--loosely the set of tokens
 | 
			
		||||
// that can follow the current rule.</p>
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
 | 
			
		||||
 | 
			
		||||
	if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
 | 
			
		||||
		d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
 | 
			
		||||
		// uh oh, another error at same token index and previously-Visited
 | 
			
		||||
		// state in ATN must be a case where LT(1) is in the recovery
 | 
			
		||||
		// token set so nothing got consumed. Consume a single token
 | 
			
		||||
		// at least to prevent an infinite loop d is a failsafe.
 | 
			
		||||
		recognizer.Consume()
 | 
			
		||||
	}
 | 
			
		||||
	d.lastErrorIndex = recognizer.GetInputStream().Index()
 | 
			
		||||
	if d.lastErrorStates == nil {
 | 
			
		||||
		d.lastErrorStates = NewIntervalSet()
 | 
			
		||||
	}
 | 
			
		||||
	d.lastErrorStates.addOne(recognizer.GetState())
 | 
			
		||||
	followSet := d.getErrorRecoverySet(recognizer)
 | 
			
		||||
	d.consumeUntil(recognizer, followSet)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
 | 
			
		||||
// that the current lookahead symbol is consistent with what were expecting
 | 
			
		||||
// at d point in the ATN. You can call d anytime but ANTLR only
 | 
			
		||||
// generates code to check before subrules/loops and each iteration.
 | 
			
		||||
//
 | 
			
		||||
// <p>Implements Jim Idle's magic Sync mechanism in closures and optional
 | 
			
		||||
// subrules. E.g.,</p>
 | 
			
		||||
//
 | 
			
		||||
// <pre>
 | 
			
		||||
// a : Sync ( stuff Sync )*
 | 
			
		||||
// Sync : {consume to what can follow Sync}
 | 
			
		||||
// </pre>
 | 
			
		||||
//
 | 
			
		||||
// At the start of a sub rule upon error, {@link //Sync} performs single
 | 
			
		||||
// token deletion, if possible. If it can't do that, it bails on the current
 | 
			
		||||
// rule and uses the default error recovery, which consumes until the
 | 
			
		||||
// reSynchronization set of the current rule.
 | 
			
		||||
//
 | 
			
		||||
// <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
 | 
			
		||||
// with an empty alternative), then the expected set includes what follows
 | 
			
		||||
// the subrule.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>During loop iteration, it consumes until it sees a token that can start a
 | 
			
		||||
// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
 | 
			
		||||
// stay in the loop as long as possible.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>ORIGINS</strong></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Previous versions of ANTLR did a poor job of their recovery within loops.
 | 
			
		||||
// A single mismatch token or missing token would force the parser to bail
 | 
			
		||||
// out of the entire rules surrounding the loop. So, for rule</p>
 | 
			
		||||
//
 | 
			
		||||
// <pre>
 | 
			
		||||
// classfunc : 'class' ID '{' member* '}'
 | 
			
		||||
// </pre>
 | 
			
		||||
//
 | 
			
		||||
// input with an extra token between members would force the parser to
 | 
			
		||||
// consume until it found the next class definition rather than the next
 | 
			
		||||
// member definition of the current class.
 | 
			
		||||
//
 | 
			
		||||
// <p>This functionality cost a little bit of effort because the parser has to
 | 
			
		||||
// compare token set at the start of the loop and at each iteration. If for
 | 
			
		||||
// some reason speed is suffering for you, you can turn off d
 | 
			
		||||
// functionality by simply overriding d method as a blank { }.</p>
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
 | 
			
		||||
	// If already recovering, don't try to Sync
 | 
			
		||||
	if d.inErrorRecoveryMode(recognizer) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
 | 
			
		||||
	la := recognizer.GetTokenStream().LA(1)
 | 
			
		||||
 | 
			
		||||
	// try cheaper subset first might get lucky. seems to shave a wee bit off
 | 
			
		||||
	nextTokens := recognizer.GetATN().NextTokens(s, nil)
 | 
			
		||||
	if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	switch s.GetStateType() {
 | 
			
		||||
	case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
 | 
			
		||||
		// Report error and recover if possible
 | 
			
		||||
		if d.SingleTokenDeletion(recognizer) != nil {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		panic(NewInputMisMatchException(recognizer))
 | 
			
		||||
	case ATNStatePlusLoopBack, ATNStateStarLoopBack:
 | 
			
		||||
		d.ReportUnwantedToken(recognizer)
 | 
			
		||||
		expecting := NewIntervalSet()
 | 
			
		||||
		expecting.addSet(recognizer.GetExpectedTokens())
 | 
			
		||||
		whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
 | 
			
		||||
		d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
 | 
			
		||||
	default:
 | 
			
		||||
		// do nothing if we can't identify the exact kind of ATN state
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This is called by {@link //ReportError} when the exception is a
 | 
			
		||||
// {@link NoViableAltException}.
 | 
			
		||||
//
 | 
			
		||||
// @see //ReportError
 | 
			
		||||
//
 | 
			
		||||
// @param recognizer the parser instance
 | 
			
		||||
// @param e the recognition exception
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
 | 
			
		||||
	tokens := recognizer.GetTokenStream()
 | 
			
		||||
	var input string
 | 
			
		||||
	if tokens != nil {
 | 
			
		||||
		if e.startToken.GetTokenType() == TokenEOF {
 | 
			
		||||
			input = "<EOF>"
 | 
			
		||||
		} else {
 | 
			
		||||
			input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		input = "<unknown input>"
 | 
			
		||||
	}
 | 
			
		||||
	msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
 | 
			
		||||
	recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// This is called by {@link //ReportError} when the exception is an
 | 
			
		||||
// {@link InputMisMatchException}.
 | 
			
		||||
//
 | 
			
		||||
// @see //ReportError
 | 
			
		||||
//
 | 
			
		||||
// @param recognizer the parser instance
 | 
			
		||||
// @param e the recognition exception
 | 
			
		||||
//
 | 
			
		||||
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
 | 
			
		||||
	msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
 | 
			
		||||
		" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
 | 
			
		||||
	recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// This is called by {@link //ReportError} when the exception is a
 | 
			
		||||
// {@link FailedPredicateException}.
 | 
			
		||||
//
 | 
			
		||||
// @see //ReportError
 | 
			
		||||
//
 | 
			
		||||
// @param recognizer the parser instance
 | 
			
		||||
// @param e the recognition exception
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
 | 
			
		||||
	ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
 | 
			
		||||
	msg := "rule " + ruleName + " " + e.message
 | 
			
		||||
	recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This method is called to Report a syntax error which requires the removal
 | 
			
		||||
// of a token from the input stream. At the time d method is called, the
 | 
			
		||||
// erroneous symbol is current {@code LT(1)} symbol and has not yet been
 | 
			
		||||
// removed from the input stream. When d method returns,
 | 
			
		||||
// {@code recognizer} is in error recovery mode.
 | 
			
		||||
//
 | 
			
		||||
// <p>This method is called when {@link //singleTokenDeletion} identifies
 | 
			
		||||
// single-token deletion as a viable recovery strategy for a mismatched
 | 
			
		||||
// input error.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>The default implementation simply returns if the handler is already in
 | 
			
		||||
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
 | 
			
		||||
// enter error recovery mode, followed by calling
 | 
			
		||||
// {@link Parser//NotifyErrorListeners}.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param recognizer the parser instance
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
 | 
			
		||||
	if d.inErrorRecoveryMode(recognizer) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	d.beginErrorCondition(recognizer)
 | 
			
		||||
	t := recognizer.GetCurrentToken()
 | 
			
		||||
	tokenName := d.GetTokenErrorDisplay(t)
 | 
			
		||||
	expecting := d.GetExpectedTokens(recognizer)
 | 
			
		||||
	msg := "extraneous input " + tokenName + " expecting " +
 | 
			
		||||
		expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
 | 
			
		||||
	recognizer.NotifyErrorListeners(msg, t, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This method is called to Report a syntax error which requires the
 | 
			
		||||
// insertion of a missing token into the input stream. At the time d
 | 
			
		||||
// method is called, the missing token has not yet been inserted. When d
 | 
			
		||||
// method returns, {@code recognizer} is in error recovery mode.
 | 
			
		||||
//
 | 
			
		||||
// <p>This method is called when {@link //singleTokenInsertion} identifies
 | 
			
		||||
// single-token insertion as a viable recovery strategy for a mismatched
 | 
			
		||||
// input error.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>The default implementation simply returns if the handler is already in
 | 
			
		||||
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
 | 
			
		||||
// enter error recovery mode, followed by calling
 | 
			
		||||
// {@link Parser//NotifyErrorListeners}.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param recognizer the parser instance
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
 | 
			
		||||
	if d.inErrorRecoveryMode(recognizer) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	d.beginErrorCondition(recognizer)
 | 
			
		||||
	t := recognizer.GetCurrentToken()
 | 
			
		||||
	expecting := d.GetExpectedTokens(recognizer)
 | 
			
		||||
	msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
 | 
			
		||||
		" at " + d.GetTokenErrorDisplay(t)
 | 
			
		||||
	recognizer.NotifyErrorListeners(msg, t, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// <p>The default implementation attempts to recover from the mismatched input
 | 
			
		||||
// by using single token insertion and deletion as described below. If the
 | 
			
		||||
// recovery attempt fails, d method panics an
 | 
			
		||||
// {@link InputMisMatchException}.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
 | 
			
		||||
// right token, however, then assume {@code LA(1)} is some extra spurious
 | 
			
		||||
// token and delete it. Then consume and return the next token (which was
 | 
			
		||||
// the {@code LA(2)} token) as the successful result of the Match operation.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>This recovery strategy is implemented by {@link
 | 
			
		||||
// //singleTokenDeletion}.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>If current token (at {@code LA(1)}) is consistent with what could come
 | 
			
		||||
// after the expected {@code LA(1)} token, then assume the token is missing
 | 
			
		||||
// and use the parser's {@link TokenFactory} to create it on the fly. The
 | 
			
		||||
// "insertion" is performed by returning the created token as the successful
 | 
			
		||||
// result of the Match operation.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>This recovery strategy is implemented by {@link
 | 
			
		||||
// //singleTokenInsertion}.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>EXAMPLE</strong></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
 | 
			
		||||
// the parser returns from the nested call to {@code expr}, it will have
 | 
			
		||||
// call chain:</p>
 | 
			
		||||
//
 | 
			
		||||
// <pre>
 | 
			
		||||
// stat &rarr expr &rarr atom
 | 
			
		||||
// </pre>
 | 
			
		||||
//
 | 
			
		||||
// and it will be trying to Match the {@code ')'} at d point in the
 | 
			
		||||
// derivation:
 | 
			
		||||
//
 | 
			
		||||
// <pre>
 | 
			
		||||
// => ID '=' '(' INT ')' ('+' atom)* ''
 | 
			
		||||
// ^
 | 
			
		||||
// </pre>
 | 
			
		||||
//
 | 
			
		||||
// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
 | 
			
		||||
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
 | 
			
		||||
// is in the set of tokens that can follow the {@code ')'} token reference
 | 
			
		||||
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
 | 
			
		||||
	// SINGLE TOKEN DELETION
 | 
			
		||||
	MatchedSymbol := d.SingleTokenDeletion(recognizer)
 | 
			
		||||
	if MatchedSymbol != nil {
 | 
			
		||||
		// we have deleted the extra token.
 | 
			
		||||
		// now, move past ttype token as if all were ok
 | 
			
		||||
		recognizer.Consume()
 | 
			
		||||
		return MatchedSymbol
 | 
			
		||||
	}
 | 
			
		||||
	// SINGLE TOKEN INSERTION
 | 
			
		||||
	if d.SingleTokenInsertion(recognizer) {
 | 
			
		||||
		return d.GetMissingSymbol(recognizer)
 | 
			
		||||
	}
 | 
			
		||||
	// even that didn't work must panic the exception
 | 
			
		||||
	panic(NewInputMisMatchException(recognizer))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// This method implements the single-token insertion inline error recovery
 | 
			
		||||
// strategy. It is called by {@link //recoverInline} if the single-token
 | 
			
		||||
// deletion strategy fails to recover from the mismatched input. If this
 | 
			
		||||
// method returns {@code true}, {@code recognizer} will be in error recovery
 | 
			
		||||
// mode.
 | 
			
		||||
//
 | 
			
		||||
// <p>This method determines whether or not single-token insertion is viable by
 | 
			
		||||
// checking if the {@code LA(1)} input symbol could be successfully Matched
 | 
			
		||||
// if it were instead the {@code LA(2)} symbol. If d method returns
 | 
			
		||||
// {@code true}, the caller is responsible for creating and inserting a
 | 
			
		||||
// token with the correct type to produce d behavior.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param recognizer the parser instance
 | 
			
		||||
// @return {@code true} if single-token insertion is a viable recovery
 | 
			
		||||
// strategy for the current mismatched input, otherwise {@code false}
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
 | 
			
		||||
	currentSymbolType := recognizer.GetTokenStream().LA(1)
 | 
			
		||||
	// if current token is consistent with what could come after current
 | 
			
		||||
	// ATN state, then we know we're missing a token error recovery
 | 
			
		||||
	// is free to conjure up and insert the missing token
 | 
			
		||||
	atn := recognizer.GetInterpreter().atn
 | 
			
		||||
	currentState := atn.states[recognizer.GetState()]
 | 
			
		||||
	next := currentState.GetTransitions()[0].getTarget()
 | 
			
		||||
	expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
 | 
			
		||||
	if expectingAtLL2.contains(currentSymbolType) {
 | 
			
		||||
		d.ReportMissingToken(recognizer)
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This method implements the single-token deletion inline error recovery
 | 
			
		||||
// strategy. It is called by {@link //recoverInline} to attempt to recover
 | 
			
		||||
// from mismatched input. If this method returns nil, the parser and error
 | 
			
		||||
// handler state will not have changed. If this method returns non-nil,
 | 
			
		||||
// {@code recognizer} will <em>not</em> be in error recovery mode since the
 | 
			
		||||
// returned token was a successful Match.
 | 
			
		||||
//
 | 
			
		||||
// <p>If the single-token deletion is successful, d method calls
 | 
			
		||||
// {@link //ReportUnwantedToken} to Report the error, followed by
 | 
			
		||||
// {@link Parser//consume} to actually "delete" the extraneous token. Then,
 | 
			
		||||
// before returning {@link //ReportMatch} is called to signal a successful
 | 
			
		||||
// Match.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param recognizer the parser instance
 | 
			
		||||
// @return the successfully Matched {@link Token} instance if single-token
 | 
			
		||||
// deletion successfully recovers from the mismatched input, otherwise
 | 
			
		||||
// {@code nil}
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
 | 
			
		||||
	NextTokenType := recognizer.GetTokenStream().LA(2)
 | 
			
		||||
	expecting := d.GetExpectedTokens(recognizer)
 | 
			
		||||
	if expecting.contains(NextTokenType) {
 | 
			
		||||
		d.ReportUnwantedToken(recognizer)
 | 
			
		||||
		// print("recoverFromMisMatchedToken deleting " \
 | 
			
		||||
		// + str(recognizer.GetTokenStream().LT(1)) \
 | 
			
		||||
		// + " since " + str(recognizer.GetTokenStream().LT(2)) \
 | 
			
		||||
		// + " is what we want", file=sys.stderr)
 | 
			
		||||
		recognizer.Consume() // simply delete extra token
 | 
			
		||||
		// we want to return the token we're actually Matching
 | 
			
		||||
		MatchedSymbol := recognizer.GetCurrentToken()
 | 
			
		||||
		d.ReportMatch(recognizer) // we know current token is correct
 | 
			
		||||
		return MatchedSymbol
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Conjure up a missing token during error recovery.
 | 
			
		||||
//
 | 
			
		||||
// The recognizer attempts to recover from single missing
 | 
			
		||||
// symbols. But, actions might refer to that missing symbol.
 | 
			
		||||
// For example, x=ID {f($x)}. The action clearly assumes
 | 
			
		||||
// that there has been an identifier Matched previously and that
 | 
			
		||||
// $x points at that token. If that token is missing, but
 | 
			
		||||
// the next token in the stream is what we want we assume that
 | 
			
		||||
// d token is missing and we keep going. Because we
 | 
			
		||||
// have to return some token to replace the missing token,
 | 
			
		||||
// we have to conjure one up. This method gives the user control
 | 
			
		||||
// over the tokens returned for missing tokens. Mostly,
 | 
			
		||||
// you will want to create something special for identifier
 | 
			
		||||
// tokens. For literals such as '{' and ',', the default
 | 
			
		||||
// action in the parser or tree parser works. It simply creates
 | 
			
		||||
// a CommonToken of the appropriate type. The text will be the token.
 | 
			
		||||
// If you change what tokens must be created by the lexer,
 | 
			
		||||
// override d method to create the appropriate tokens.
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
 | 
			
		||||
	currentSymbol := recognizer.GetCurrentToken()
 | 
			
		||||
	expecting := d.GetExpectedTokens(recognizer)
 | 
			
		||||
	expectedTokenType := expecting.first()
 | 
			
		||||
	var tokenText string
 | 
			
		||||
 | 
			
		||||
	if expectedTokenType == TokenEOF {
 | 
			
		||||
		tokenText = "<missing EOF>"
 | 
			
		||||
	} else {
 | 
			
		||||
		ln := recognizer.GetLiteralNames()
 | 
			
		||||
		if expectedTokenType > 0 && expectedTokenType < len(ln) {
 | 
			
		||||
			tokenText = "<missing " + recognizer.GetLiteralNames()[expectedTokenType] + ">"
 | 
			
		||||
		} else {
 | 
			
		||||
			tokenText = "<missing undefined>" // TODO matches the JS impl
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	current := currentSymbol
 | 
			
		||||
	lookback := recognizer.GetTokenStream().LT(-1)
 | 
			
		||||
	if current.GetTokenType() == TokenEOF && lookback != nil {
 | 
			
		||||
		current = lookback
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tf := recognizer.GetTokenFactory()
 | 
			
		||||
 | 
			
		||||
	return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
 | 
			
		||||
	return recognizer.GetExpectedTokens()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// How should a token be displayed in an error message? The default
 | 
			
		||||
// is to display just the text, but during development you might
 | 
			
		||||
// want to have a lot of information spit out. Override in that case
 | 
			
		||||
// to use t.String() (which, for CommonToken, dumps everything about
 | 
			
		||||
// the token). This is better than forcing you to override a method in
 | 
			
		||||
// your token objects because you don't have to go modify your lexer
 | 
			
		||||
// so that it creates a NewJava type.
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
 | 
			
		||||
	if t == nil {
 | 
			
		||||
		return "<no token>"
 | 
			
		||||
	}
 | 
			
		||||
	s := t.GetText()
 | 
			
		||||
	if s == "" {
 | 
			
		||||
		if t.GetTokenType() == TokenEOF {
 | 
			
		||||
			s = "<EOF>"
 | 
			
		||||
		} else {
 | 
			
		||||
			s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return d.escapeWSAndQuote(s)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
 | 
			
		||||
	s = strings.Replace(s, "\t", "\\t", -1)
 | 
			
		||||
	s = strings.Replace(s, "\n", "\\n", -1)
 | 
			
		||||
	s = strings.Replace(s, "\r", "\\r", -1)
 | 
			
		||||
	return "'" + s + "'"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Compute the error recovery set for the current rule. During
 | 
			
		||||
// rule invocation, the parser pushes the set of tokens that can
 | 
			
		||||
// follow that rule reference on the stack d amounts to
 | 
			
		||||
// computing FIRST of what follows the rule reference in the
 | 
			
		||||
// enclosing rule. See LinearApproximator.FIRST().
 | 
			
		||||
// This local follow set only includes tokens
 | 
			
		||||
// from within the rule i.e., the FIRST computation done by
 | 
			
		||||
// ANTLR stops at the end of a rule.
 | 
			
		||||
//
 | 
			
		||||
// EXAMPLE
 | 
			
		||||
//
 | 
			
		||||
// When you find a "no viable alt exception", the input is not
 | 
			
		||||
// consistent with any of the alternatives for rule r. The best
 | 
			
		||||
// thing to do is to consume tokens until you see something that
 | 
			
		||||
// can legally follow a call to r//or* any rule that called r.
 | 
			
		||||
// You don't want the exact set of viable next tokens because the
 | 
			
		||||
// input might just be missing a token--you might consume the
 | 
			
		||||
// rest of the input looking for one of the missing tokens.
 | 
			
		||||
//
 | 
			
		||||
// Consider grammar:
 | 
			
		||||
//
 | 
			
		||||
// a : '[' b ']'
 | 
			
		||||
// | '(' b ')'
 | 
			
		||||
//
 | 
			
		||||
// b : c '^' INT
 | 
			
		||||
// c : ID
 | 
			
		||||
// | INT
 | 
			
		||||
//
 | 
			
		||||
//
 | 
			
		||||
// At each rule invocation, the set of tokens that could follow
 | 
			
		||||
// that rule is pushed on a stack. Here are the various
 | 
			
		||||
// context-sensitive follow sets:
 | 
			
		||||
//
 | 
			
		||||
// FOLLOW(b1_in_a) = FIRST(']') = ']'
 | 
			
		||||
// FOLLOW(b2_in_a) = FIRST(')') = ')'
 | 
			
		||||
// FOLLOW(c_in_b) = FIRST('^') = '^'
 | 
			
		||||
//
 | 
			
		||||
// Upon erroneous input "[]", the call chain is
 | 
			
		||||
//
 | 
			
		||||
// a -> b -> c
 | 
			
		||||
//
 | 
			
		||||
// and, hence, the follow context stack is:
 | 
			
		||||
//
 | 
			
		||||
// depth follow set start of rule execution
 | 
			
		||||
// 0 <EOF> a (from main())
 | 
			
		||||
// 1 ']' b
 | 
			
		||||
// 2 '^' c
 | 
			
		||||
//
 | 
			
		||||
// Notice that ')' is not included, because b would have to have
 | 
			
		||||
// been called from a different context in rule a for ')' to be
 | 
			
		||||
// included.
 | 
			
		||||
//
 | 
			
		||||
// For error recovery, we cannot consider FOLLOW(c)
 | 
			
		||||
// (context-sensitive or otherwise). We need the combined set of
 | 
			
		||||
// all context-sensitive FOLLOW sets--the set of all tokens that
 | 
			
		||||
// could follow any reference in the call chain. We need to
 | 
			
		||||
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
 | 
			
		||||
// we reSync'd to that token, we'd consume until EOF. We need to
 | 
			
		||||
// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
 | 
			
		||||
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
 | 
			
		||||
// not consume anything. After printing an error, rule c would
 | 
			
		||||
// return normally. Rule b would not find the required '^' though.
 | 
			
		||||
// At this point, it gets a mismatched token error and panics an
 | 
			
		||||
// exception (since LA(1) is not in the viable following token
 | 
			
		||||
// set). The rule exception handler tries to recover, but finds
 | 
			
		||||
// the same recovery set and doesn't consume anything. Rule b
 | 
			
		||||
// exits normally returning to rule a. Now it finds the ']' (and
 | 
			
		||||
// with the successful Match exits errorRecovery mode).
 | 
			
		||||
//
 | 
			
		||||
// So, you can see that the parser walks up the call chain looking
 | 
			
		||||
// for the token that was a member of the recovery set.
 | 
			
		||||
//
 | 
			
		||||
// Errors are not generated in errorRecovery mode.
 | 
			
		||||
//
 | 
			
		||||
// ANTLR's error recovery mechanism is based upon original ideas:
 | 
			
		||||
//
 | 
			
		||||
// "Algorithms + Data Structures = Programs" by Niklaus Wirth
 | 
			
		||||
//
 | 
			
		||||
// and
 | 
			
		||||
//
 | 
			
		||||
// "A note on error recovery in recursive descent parsers":
 | 
			
		||||
// http://portal.acm.org/citation.cfm?id=947902.947905
 | 
			
		||||
//
 | 
			
		||||
// Later, Josef Grosch had some good ideas:
 | 
			
		||||
//
 | 
			
		||||
// "Efficient and Comfortable Error Recovery in Recursive Descent
 | 
			
		||||
// Parsers":
 | 
			
		||||
// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
 | 
			
		||||
//
 | 
			
		||||
// Like Grosch I implement context-sensitive FOLLOW sets that are combined
 | 
			
		||||
// at run-time upon error to avoid overhead during parsing.
 | 
			
		||||
//
 | 
			
		||||
func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
 | 
			
		||||
	atn := recognizer.GetInterpreter().atn
 | 
			
		||||
	ctx := recognizer.GetParserRuleContext()
 | 
			
		||||
	recoverSet := NewIntervalSet()
 | 
			
		||||
	for ctx != nil && ctx.GetInvokingState() >= 0 {
 | 
			
		||||
		// compute what follows who invoked us
 | 
			
		||||
		invokingState := atn.states[ctx.GetInvokingState()]
 | 
			
		||||
		rt := invokingState.GetTransitions()[0]
 | 
			
		||||
		follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
 | 
			
		||||
		recoverSet.addSet(follow)
 | 
			
		||||
		ctx = ctx.GetParent().(ParserRuleContext)
 | 
			
		||||
	}
 | 
			
		||||
	recoverSet.removeOne(TokenEpsilon)
 | 
			
		||||
	return recoverSet
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Consume tokens until one Matches the given token set.//
 | 
			
		||||
func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
 | 
			
		||||
	ttype := recognizer.GetTokenStream().LA(1)
 | 
			
		||||
	for ttype != TokenEOF && !set.contains(ttype) {
 | 
			
		||||
		recognizer.Consume()
 | 
			
		||||
		ttype = recognizer.GetTokenStream().LA(1)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
 | 
			
		||||
// by immediately canceling the parse operation with a
 | 
			
		||||
// {@link ParseCancellationException}. The implementation ensures that the
 | 
			
		||||
// {@link ParserRuleContext//exception} field is set for all parse tree nodes
 | 
			
		||||
// that were not completed prior to encountering the error.
 | 
			
		||||
//
 | 
			
		||||
// <p>
 | 
			
		||||
// This error strategy is useful in the following scenarios.</p>
 | 
			
		||||
//
 | 
			
		||||
// <ul>
 | 
			
		||||
// <li><strong>Two-stage parsing:</strong> This error strategy allows the first
 | 
			
		||||
// stage of two-stage parsing to immediately terminate if an error is
 | 
			
		||||
// encountered, and immediately fall back to the second stage. In addition to
 | 
			
		||||
// avoiding wasted work by attempting to recover from errors here, the empty
 | 
			
		||||
// implementation of {@link BailErrorStrategy//Sync} improves the performance of
 | 
			
		||||
// the first stage.</li>
 | 
			
		||||
// <li><strong>Silent validation:</strong> When syntax errors are not being
 | 
			
		||||
// Reported or logged, and the parse result is simply ignored if errors occur,
 | 
			
		||||
// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
 | 
			
		||||
// when the result will be ignored either way.</li>
 | 
			
		||||
// </ul>
 | 
			
		||||
//
 | 
			
		||||
// <p>
 | 
			
		||||
// {@code myparser.setErrorHandler(NewBailErrorStrategy())}</p>
 | 
			
		||||
//
 | 
			
		||||
// @see Parser//setErrorHandler(ANTLRErrorStrategy)
 | 
			
		||||
 | 
			
		||||
type BailErrorStrategy struct {
 | 
			
		||||
	*DefaultErrorStrategy
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ ErrorStrategy = &BailErrorStrategy{}
 | 
			
		||||
 | 
			
		||||
func NewBailErrorStrategy() *BailErrorStrategy {
 | 
			
		||||
 | 
			
		||||
	b := new(BailErrorStrategy)
 | 
			
		||||
 | 
			
		||||
	b.DefaultErrorStrategy = NewDefaultErrorStrategy()
 | 
			
		||||
 | 
			
		||||
	return b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Instead of recovering from exception {@code e}, re-panic it wrapped
 | 
			
		||||
// in a {@link ParseCancellationException} so it is not caught by the
 | 
			
		||||
// rule func catches. Use {@link Exception//getCause()} to get the
 | 
			
		||||
// original {@link RecognitionException}.
 | 
			
		||||
//
 | 
			
		||||
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
 | 
			
		||||
	context := recognizer.GetParserRuleContext()
 | 
			
		||||
	for context != nil {
 | 
			
		||||
		context.SetException(e)
 | 
			
		||||
		context = context.GetParent().(ParserRuleContext)
 | 
			
		||||
	}
 | 
			
		||||
	panic(NewParseCancellationException()) // TODO we don't emit e properly
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Make sure we don't attempt to recover inline if the parser
 | 
			
		||||
// successfully recovers, it won't panic an exception.
 | 
			
		||||
//
 | 
			
		||||
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
 | 
			
		||||
	b.Recover(recognizer, NewInputMisMatchException(recognizer))
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Make sure we don't attempt to recover from problems in subrules.//
 | 
			
		||||
func (b *BailErrorStrategy) Sync(recognizer Parser) {
 | 
			
		||||
	// pass
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										241
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										241
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,241 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
 | 
			
		||||
//  3 kinds of errors: prediction errors, failed predicate errors, and
 | 
			
		||||
//  mismatched input errors. In each case, the parser knows where it is
 | 
			
		||||
//  in the input, where it is in the ATN, the rule invocation stack,
 | 
			
		||||
//  and what kind of problem occurred.
 | 
			
		||||
 | 
			
		||||
type RecognitionException interface {
 | 
			
		||||
	GetOffendingToken() Token
 | 
			
		||||
	GetMessage() string
 | 
			
		||||
	GetInputStream() IntStream
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseRecognitionException struct {
 | 
			
		||||
	message        string
 | 
			
		||||
	recognizer     Recognizer
 | 
			
		||||
	offendingToken Token
 | 
			
		||||
	offendingState int
 | 
			
		||||
	ctx            RuleContext
 | 
			
		||||
	input          IntStream
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
 | 
			
		||||
 | 
			
		||||
	// todo
 | 
			
		||||
	//	Error.call(this)
 | 
			
		||||
	//
 | 
			
		||||
	//	if (!!Error.captureStackTrace) {
 | 
			
		||||
	//        Error.captureStackTrace(this, RecognitionException)
 | 
			
		||||
	//	} else {
 | 
			
		||||
	//		stack := NewError().stack
 | 
			
		||||
	//	}
 | 
			
		||||
	// TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
 | 
			
		||||
 | 
			
		||||
	t := new(BaseRecognitionException)
 | 
			
		||||
 | 
			
		||||
	t.message = message
 | 
			
		||||
	t.recognizer = recognizer
 | 
			
		||||
	t.input = input
 | 
			
		||||
	t.ctx = ctx
 | 
			
		||||
	// The current {@link Token} when an error occurred. Since not all streams
 | 
			
		||||
	// support accessing symbols by index, we have to track the {@link Token}
 | 
			
		||||
	// instance itself.
 | 
			
		||||
	t.offendingToken = nil
 | 
			
		||||
	// Get the ATN state number the parser was in at the time the error
 | 
			
		||||
	// occurred. For {@link NoViableAltException} and
 | 
			
		||||
	// {@link LexerNoViableAltException} exceptions, this is the
 | 
			
		||||
	// {@link DecisionState} number. For others, it is the state whose outgoing
 | 
			
		||||
	// edge we couldn't Match.
 | 
			
		||||
	t.offendingState = -1
 | 
			
		||||
	if t.recognizer != nil {
 | 
			
		||||
		t.offendingState = t.recognizer.GetState()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognitionException) GetMessage() string {
 | 
			
		||||
	return b.message
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognitionException) GetOffendingToken() Token {
 | 
			
		||||
	return b.offendingToken
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognitionException) GetInputStream() IntStream {
 | 
			
		||||
	return b.input
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// <p>If the state number is not known, b method returns -1.</p>
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Gets the set of input symbols which could potentially follow the
 | 
			
		||||
// previously Matched symbol at the time b exception was panicn.
 | 
			
		||||
//
 | 
			
		||||
// <p>If the set of expected tokens is not known and could not be computed,
 | 
			
		||||
// b method returns {@code nil}.</p>
 | 
			
		||||
//
 | 
			
		||||
// @return The set of token types that could potentially follow the current
 | 
			
		||||
// state in the ATN, or {@code nil} if the information is not available.
 | 
			
		||||
// /
 | 
			
		||||
func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
 | 
			
		||||
	if b.recognizer != nil {
 | 
			
		||||
		return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognitionException) String() string {
 | 
			
		||||
	return b.message
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type LexerNoViableAltException struct {
 | 
			
		||||
	*BaseRecognitionException
 | 
			
		||||
 | 
			
		||||
	startIndex     int
 | 
			
		||||
	deadEndConfigs ATNConfigSet
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
 | 
			
		||||
 | 
			
		||||
	l := new(LexerNoViableAltException)
 | 
			
		||||
 | 
			
		||||
	l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
 | 
			
		||||
 | 
			
		||||
	l.startIndex = startIndex
 | 
			
		||||
	l.deadEndConfigs = deadEndConfigs
 | 
			
		||||
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerNoViableAltException) String() string {
 | 
			
		||||
	symbol := ""
 | 
			
		||||
	if l.startIndex >= 0 && l.startIndex < l.input.Size() {
 | 
			
		||||
		symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
 | 
			
		||||
	}
 | 
			
		||||
	return "LexerNoViableAltException" + symbol
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type NoViableAltException struct {
 | 
			
		||||
	*BaseRecognitionException
 | 
			
		||||
 | 
			
		||||
	startToken     Token
 | 
			
		||||
	offendingToken Token
 | 
			
		||||
	ctx            ParserRuleContext
 | 
			
		||||
	deadEndConfigs ATNConfigSet
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Indicates that the parser could not decide which of two or more paths
 | 
			
		||||
// to take based upon the remaining input. It tracks the starting token
 | 
			
		||||
// of the offending input and also knows where the parser was
 | 
			
		||||
// in the various paths when the error. Reported by ReportNoViableAlternative()
 | 
			
		||||
//
 | 
			
		||||
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
 | 
			
		||||
 | 
			
		||||
	if ctx == nil {
 | 
			
		||||
		ctx = recognizer.GetParserRuleContext()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if offendingToken == nil {
 | 
			
		||||
		offendingToken = recognizer.GetCurrentToken()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if startToken == nil {
 | 
			
		||||
		startToken = recognizer.GetCurrentToken()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if input == nil {
 | 
			
		||||
		input = recognizer.GetInputStream().(TokenStream)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	n := new(NoViableAltException)
 | 
			
		||||
	n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
 | 
			
		||||
 | 
			
		||||
	// Which configurations did we try at input.Index() that couldn't Match
 | 
			
		||||
	// input.LT(1)?//
 | 
			
		||||
	n.deadEndConfigs = deadEndConfigs
 | 
			
		||||
	// The token object at the start index the input stream might
 | 
			
		||||
	// not be buffering tokens so get a reference to it. (At the
 | 
			
		||||
	// time the error occurred, of course the stream needs to keep a
 | 
			
		||||
	// buffer all of the tokens but later we might not have access to those.)
 | 
			
		||||
	n.startToken = startToken
 | 
			
		||||
	n.offendingToken = offendingToken
 | 
			
		||||
 | 
			
		||||
	return n
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type InputMisMatchException struct {
 | 
			
		||||
	*BaseRecognitionException
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This signifies any kind of mismatched input exceptions such as
 | 
			
		||||
// when the current input does not Match the expected token.
 | 
			
		||||
//
 | 
			
		||||
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
 | 
			
		||||
 | 
			
		||||
	i := new(InputMisMatchException)
 | 
			
		||||
	i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
 | 
			
		||||
 | 
			
		||||
	i.offendingToken = recognizer.GetCurrentToken()
 | 
			
		||||
 | 
			
		||||
	return i
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// A semantic predicate failed during validation. Validation of predicates
 | 
			
		||||
// occurs when normally parsing the alternative just like Matching a token.
 | 
			
		||||
// Disambiguating predicate evaluation occurs when we test a predicate during
 | 
			
		||||
// prediction.
 | 
			
		||||
 | 
			
		||||
type FailedPredicateException struct {
 | 
			
		||||
	*BaseRecognitionException
 | 
			
		||||
 | 
			
		||||
	ruleIndex      int
 | 
			
		||||
	predicateIndex int
 | 
			
		||||
	predicate      string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
 | 
			
		||||
 | 
			
		||||
	f := new(FailedPredicateException)
 | 
			
		||||
 | 
			
		||||
	f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
 | 
			
		||||
 | 
			
		||||
	s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
 | 
			
		||||
	trans := s.GetTransitions()[0]
 | 
			
		||||
	if trans2, ok := trans.(*PredicateTransition); ok {
 | 
			
		||||
		f.ruleIndex = trans2.ruleIndex
 | 
			
		||||
		f.predicateIndex = trans2.predIndex
 | 
			
		||||
	} else {
 | 
			
		||||
		f.ruleIndex = 0
 | 
			
		||||
		f.predicateIndex = 0
 | 
			
		||||
	}
 | 
			
		||||
	f.predicate = predicate
 | 
			
		||||
	f.offendingToken = recognizer.GetCurrentToken()
 | 
			
		||||
 | 
			
		||||
	return f
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (f *FailedPredicateException) formatMessage(predicate, message string) string {
 | 
			
		||||
	if message != "" {
 | 
			
		||||
		return message
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return "failed predicate: {" + predicate + "}?"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ParseCancellationException struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewParseCancellationException() *ParseCancellationException {
 | 
			
		||||
	//	Error.call(this)
 | 
			
		||||
	//	Error.captureStackTrace(this, ParseCancellationException)
 | 
			
		||||
	return new(ParseCancellationException)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										49
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,49 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"io"
 | 
			
		||||
	"os"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
//  This is an InputStream that is loaded from a file all at once
 | 
			
		||||
//  when you construct the object.
 | 
			
		||||
 | 
			
		||||
type FileStream struct {
 | 
			
		||||
	*InputStream
 | 
			
		||||
 | 
			
		||||
	filename string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewFileStream(fileName string) (*FileStream, error) {
 | 
			
		||||
 | 
			
		||||
	buf := bytes.NewBuffer(nil)
 | 
			
		||||
 | 
			
		||||
	f, err := os.Open(fileName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	defer f.Close()
 | 
			
		||||
	_, err = io.Copy(buf, f)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fs := new(FileStream)
 | 
			
		||||
 | 
			
		||||
	fs.filename = fileName
 | 
			
		||||
	s := string(buf.Bytes())
 | 
			
		||||
 | 
			
		||||
	fs.InputStream = NewInputStream(s)
 | 
			
		||||
 | 
			
		||||
	return fs, nil
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (f *FileStream) GetSourceName() string {
 | 
			
		||||
	return f.filename
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										3
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,3 @@
 | 
			
		||||
module github.com/antlr/antlr4/runtime/Go/antlr
 | 
			
		||||
 | 
			
		||||
go 1.16
 | 
			
		||||
							
								
								
									
										113
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										113
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,113 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
type InputStream struct {
 | 
			
		||||
	name  string
 | 
			
		||||
	index int
 | 
			
		||||
	data  []rune
 | 
			
		||||
	size  int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewInputStream(data string) *InputStream {
 | 
			
		||||
 | 
			
		||||
	is := new(InputStream)
 | 
			
		||||
 | 
			
		||||
	is.name = "<empty>"
 | 
			
		||||
	is.index = 0
 | 
			
		||||
	is.data = []rune(data)
 | 
			
		||||
	is.size = len(is.data) // number of runes
 | 
			
		||||
 | 
			
		||||
	return is
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) reset() {
 | 
			
		||||
	is.index = 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) Consume() {
 | 
			
		||||
	if is.index >= is.size {
 | 
			
		||||
		// assert is.LA(1) == TokenEOF
 | 
			
		||||
		panic("cannot consume EOF")
 | 
			
		||||
	}
 | 
			
		||||
	is.index++
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) LA(offset int) int {
 | 
			
		||||
 | 
			
		||||
	if offset == 0 {
 | 
			
		||||
		return 0 // nil
 | 
			
		||||
	}
 | 
			
		||||
	if offset < 0 {
 | 
			
		||||
		offset++ // e.g., translate LA(-1) to use offset=0
 | 
			
		||||
	}
 | 
			
		||||
	pos := is.index + offset - 1
 | 
			
		||||
 | 
			
		||||
	if pos < 0 || pos >= is.size { // invalid
 | 
			
		||||
		return TokenEOF
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return int(is.data[pos])
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) LT(offset int) int {
 | 
			
		||||
	return is.LA(offset)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) Index() int {
 | 
			
		||||
	return is.index
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) Size() int {
 | 
			
		||||
	return is.size
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// mark/release do nothing we have entire buffer
 | 
			
		||||
func (is *InputStream) Mark() int {
 | 
			
		||||
	return -1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) Release(marker int) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) Seek(index int) {
 | 
			
		||||
	if index <= is.index {
 | 
			
		||||
		is.index = index // just jump don't update stream state (line,...)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	// seek forward
 | 
			
		||||
	is.index = intMin(index, is.size)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) GetText(start int, stop int) string {
 | 
			
		||||
	if stop >= is.size {
 | 
			
		||||
		stop = is.size - 1
 | 
			
		||||
	}
 | 
			
		||||
	if start >= is.size {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return string(is.data[start : stop+1])
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) GetTextFromTokens(start, stop Token) string {
 | 
			
		||||
	if start != nil && stop != nil {
 | 
			
		||||
		return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ""
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) GetTextFromInterval(i *Interval) string {
 | 
			
		||||
	return is.GetText(i.Start, i.Stop)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (*InputStream) GetSourceName() string {
 | 
			
		||||
	return "Obtained from string"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (is *InputStream) String() string {
 | 
			
		||||
	return string(is.data)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										16
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,16 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
type IntStream interface {
 | 
			
		||||
	Consume()
 | 
			
		||||
	LA(int) int
 | 
			
		||||
	Mark() int
 | 
			
		||||
	Release(marker int)
 | 
			
		||||
	Index() int
 | 
			
		||||
	Seek(index int)
 | 
			
		||||
	Size() int
 | 
			
		||||
	GetSourceName() string
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										296
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										296
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,296 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Interval struct {
 | 
			
		||||
	Start int
 | 
			
		||||
	Stop  int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* stop is not included! */
 | 
			
		||||
func NewInterval(start, stop int) *Interval {
 | 
			
		||||
	i := new(Interval)
 | 
			
		||||
 | 
			
		||||
	i.Start = start
 | 
			
		||||
	i.Stop = stop
 | 
			
		||||
	return i
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *Interval) Contains(item int) bool {
 | 
			
		||||
	return item >= i.Start && item < i.Stop
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *Interval) String() string {
 | 
			
		||||
	if i.Start == i.Stop-1 {
 | 
			
		||||
		return strconv.Itoa(i.Start)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *Interval) length() int {
 | 
			
		||||
	return i.Stop - i.Start
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type IntervalSet struct {
 | 
			
		||||
	intervals []*Interval
 | 
			
		||||
	readOnly  bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewIntervalSet() *IntervalSet {
 | 
			
		||||
 | 
			
		||||
	i := new(IntervalSet)
 | 
			
		||||
 | 
			
		||||
	i.intervals = nil
 | 
			
		||||
	i.readOnly = false
 | 
			
		||||
 | 
			
		||||
	return i
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) first() int {
 | 
			
		||||
	if len(i.intervals) == 0 {
 | 
			
		||||
		return TokenInvalidType
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return i.intervals[0].Start
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) addOne(v int) {
 | 
			
		||||
	i.addInterval(NewInterval(v, v+1))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) addRange(l, h int) {
 | 
			
		||||
	i.addInterval(NewInterval(l, h+1))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) addInterval(v *Interval) {
 | 
			
		||||
	if i.intervals == nil {
 | 
			
		||||
		i.intervals = make([]*Interval, 0)
 | 
			
		||||
		i.intervals = append(i.intervals, v)
 | 
			
		||||
	} else {
 | 
			
		||||
		// find insert pos
 | 
			
		||||
		for k, interval := range i.intervals {
 | 
			
		||||
			// distinct range -> insert
 | 
			
		||||
			if v.Stop < interval.Start {
 | 
			
		||||
				i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
 | 
			
		||||
				return
 | 
			
		||||
			} else if v.Stop == interval.Start {
 | 
			
		||||
				i.intervals[k].Start = v.Start
 | 
			
		||||
				return
 | 
			
		||||
			} else if v.Start <= interval.Stop {
 | 
			
		||||
				i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
 | 
			
		||||
 | 
			
		||||
				// if not applying to end, merge potential overlaps
 | 
			
		||||
				if k < len(i.intervals)-1 {
 | 
			
		||||
					l := i.intervals[k]
 | 
			
		||||
					r := i.intervals[k+1]
 | 
			
		||||
					// if r contained in l
 | 
			
		||||
					if l.Stop >= r.Stop {
 | 
			
		||||
						i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
 | 
			
		||||
					} else if l.Stop >= r.Start { // partial overlap
 | 
			
		||||
						i.intervals[k] = NewInterval(l.Start, r.Stop)
 | 
			
		||||
						i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		// greater than any exiting
 | 
			
		||||
		i.intervals = append(i.intervals, v)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
 | 
			
		||||
	if other.intervals != nil {
 | 
			
		||||
		for k := 0; k < len(other.intervals); k++ {
 | 
			
		||||
			i2 := other.intervals[k]
 | 
			
		||||
			i.addInterval(NewInterval(i2.Start, i2.Stop))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return i
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
 | 
			
		||||
	result := NewIntervalSet()
 | 
			
		||||
	result.addInterval(NewInterval(start, stop+1))
 | 
			
		||||
	for j := 0; j < len(i.intervals); j++ {
 | 
			
		||||
		result.removeRange(i.intervals[j])
 | 
			
		||||
	}
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) contains(item int) bool {
 | 
			
		||||
	if i.intervals == nil {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	for k := 0; k < len(i.intervals); k++ {
 | 
			
		||||
		if i.intervals[k].Contains(item) {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) length() int {
 | 
			
		||||
	len := 0
 | 
			
		||||
 | 
			
		||||
	for _, v := range i.intervals {
 | 
			
		||||
		len += v.length()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return len
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) removeRange(v *Interval) {
 | 
			
		||||
	if v.Start == v.Stop-1 {
 | 
			
		||||
		i.removeOne(v.Start)
 | 
			
		||||
	} else if i.intervals != nil {
 | 
			
		||||
		k := 0
 | 
			
		||||
		for n := 0; n < len(i.intervals); n++ {
 | 
			
		||||
			ni := i.intervals[k]
 | 
			
		||||
			// intervals are ordered
 | 
			
		||||
			if v.Stop <= ni.Start {
 | 
			
		||||
				return
 | 
			
		||||
			} else if v.Start > ni.Start && v.Stop < ni.Stop {
 | 
			
		||||
				i.intervals[k] = NewInterval(ni.Start, v.Start)
 | 
			
		||||
				x := NewInterval(v.Stop, ni.Stop)
 | 
			
		||||
				// i.intervals.splice(k, 0, x)
 | 
			
		||||
				i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
 | 
			
		||||
				return
 | 
			
		||||
			} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
 | 
			
		||||
				//                i.intervals.splice(k, 1)
 | 
			
		||||
				i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
 | 
			
		||||
				k = k - 1 // need another pass
 | 
			
		||||
			} else if v.Start < ni.Stop {
 | 
			
		||||
				i.intervals[k] = NewInterval(ni.Start, v.Start)
 | 
			
		||||
			} else if v.Stop < ni.Stop {
 | 
			
		||||
				i.intervals[k] = NewInterval(v.Stop, ni.Stop)
 | 
			
		||||
			}
 | 
			
		||||
			k++
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) removeOne(v int) {
 | 
			
		||||
	if i.intervals != nil {
 | 
			
		||||
		for k := 0; k < len(i.intervals); k++ {
 | 
			
		||||
			ki := i.intervals[k]
 | 
			
		||||
			// intervals i ordered
 | 
			
		||||
			if v < ki.Start {
 | 
			
		||||
				return
 | 
			
		||||
			} else if v == ki.Start && v == ki.Stop-1 {
 | 
			
		||||
				//				i.intervals.splice(k, 1)
 | 
			
		||||
				i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
 | 
			
		||||
				return
 | 
			
		||||
			} else if v == ki.Start {
 | 
			
		||||
				i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
 | 
			
		||||
				return
 | 
			
		||||
			} else if v == ki.Stop-1 {
 | 
			
		||||
				i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
 | 
			
		||||
				return
 | 
			
		||||
			} else if v < ki.Stop-1 {
 | 
			
		||||
				x := NewInterval(ki.Start, v)
 | 
			
		||||
				ki.Start = v + 1
 | 
			
		||||
				//				i.intervals.splice(k, 0, x)
 | 
			
		||||
				i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) String() string {
 | 
			
		||||
	return i.StringVerbose(nil, nil, false)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
 | 
			
		||||
 | 
			
		||||
	if i.intervals == nil {
 | 
			
		||||
		return "{}"
 | 
			
		||||
	} else if literalNames != nil || symbolicNames != nil {
 | 
			
		||||
		return i.toTokenString(literalNames, symbolicNames)
 | 
			
		||||
	} else if elemsAreChar {
 | 
			
		||||
		return i.toCharString()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return i.toIndexString()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) toCharString() string {
 | 
			
		||||
	names := make([]string, len(i.intervals))
 | 
			
		||||
 | 
			
		||||
	for j := 0; j < len(i.intervals); j++ {
 | 
			
		||||
		v := i.intervals[j]
 | 
			
		||||
		if v.Stop == v.Start+1 {
 | 
			
		||||
			if v.Start == TokenEOF {
 | 
			
		||||
				names = append(names, "<EOF>")
 | 
			
		||||
			} else {
 | 
			
		||||
				names = append(names, ("'" + string(v.Start) + "'"))
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if len(names) > 1 {
 | 
			
		||||
		return "{" + strings.Join(names, ", ") + "}"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return names[0]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) toIndexString() string {
 | 
			
		||||
 | 
			
		||||
	names := make([]string, 0)
 | 
			
		||||
	for j := 0; j < len(i.intervals); j++ {
 | 
			
		||||
		v := i.intervals[j]
 | 
			
		||||
		if v.Stop == v.Start+1 {
 | 
			
		||||
			if v.Start == TokenEOF {
 | 
			
		||||
				names = append(names, "<EOF>")
 | 
			
		||||
			} else {
 | 
			
		||||
				names = append(names, strconv.Itoa(v.Start))
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if len(names) > 1 {
 | 
			
		||||
		return "{" + strings.Join(names, ", ") + "}"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return names[0]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
 | 
			
		||||
	names := make([]string, 0)
 | 
			
		||||
	for _, v := range i.intervals {
 | 
			
		||||
		for j := v.Start; j < v.Stop; j++ {
 | 
			
		||||
			names = append(names, i.elementName(literalNames, symbolicNames, j))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if len(names) > 1 {
 | 
			
		||||
		return "{" + strings.Join(names, ", ") + "}"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return names[0]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
 | 
			
		||||
	if a == TokenEOF {
 | 
			
		||||
		return "<EOF>"
 | 
			
		||||
	} else if a == TokenEpsilon {
 | 
			
		||||
		return "<EPSILON>"
 | 
			
		||||
	} else {
 | 
			
		||||
		if a < len(literalNames) && literalNames[a] != "" {
 | 
			
		||||
			return literalNames[a]
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return symbolicNames[a]
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										418
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										418
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,418 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// A lexer is recognizer that draws input symbols from a character stream.
 | 
			
		||||
//  lexer grammars result in a subclass of this object. A Lexer object
 | 
			
		||||
//  uses simplified Match() and error recovery mechanisms in the interest
 | 
			
		||||
//  of speed.
 | 
			
		||||
///
 | 
			
		||||
 | 
			
		||||
type Lexer interface {
 | 
			
		||||
	TokenSource
 | 
			
		||||
	Recognizer
 | 
			
		||||
 | 
			
		||||
	Emit() Token
 | 
			
		||||
 | 
			
		||||
	SetChannel(int)
 | 
			
		||||
	PushMode(int)
 | 
			
		||||
	PopMode() int
 | 
			
		||||
	SetType(int)
 | 
			
		||||
	SetMode(int)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseLexer struct {
 | 
			
		||||
	*BaseRecognizer
 | 
			
		||||
 | 
			
		||||
	Interpreter         ILexerATNSimulator
 | 
			
		||||
	TokenStartCharIndex int
 | 
			
		||||
	TokenStartLine      int
 | 
			
		||||
	TokenStartColumn    int
 | 
			
		||||
	ActionType          int
 | 
			
		||||
	Virt                Lexer // The most derived lexer implementation. Allows virtual method calls.
 | 
			
		||||
 | 
			
		||||
	input                  CharStream
 | 
			
		||||
	factory                TokenFactory
 | 
			
		||||
	tokenFactorySourcePair *TokenSourceCharStreamPair
 | 
			
		||||
	token                  Token
 | 
			
		||||
	hitEOF                 bool
 | 
			
		||||
	channel                int
 | 
			
		||||
	thetype                int
 | 
			
		||||
	modeStack              IntStack
 | 
			
		||||
	mode                   int
 | 
			
		||||
	text                   string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseLexer(input CharStream) *BaseLexer {
 | 
			
		||||
 | 
			
		||||
	lexer := new(BaseLexer)
 | 
			
		||||
 | 
			
		||||
	lexer.BaseRecognizer = NewBaseRecognizer()
 | 
			
		||||
 | 
			
		||||
	lexer.input = input
 | 
			
		||||
	lexer.factory = CommonTokenFactoryDEFAULT
 | 
			
		||||
	lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
 | 
			
		||||
 | 
			
		||||
	lexer.Virt = lexer
 | 
			
		||||
 | 
			
		||||
	lexer.Interpreter = nil // child classes must populate it
 | 
			
		||||
 | 
			
		||||
	// The goal of all lexer rules/methods is to create a token object.
 | 
			
		||||
	// l is an instance variable as multiple rules may collaborate to
 | 
			
		||||
	// create a single token. NextToken will return l object after
 | 
			
		||||
	// Matching lexer rule(s). If you subclass to allow multiple token
 | 
			
		||||
	// emissions, then set l to the last token to be Matched or
 | 
			
		||||
	// something nonnil so that the auto token emit mechanism will not
 | 
			
		||||
	// emit another token.
 | 
			
		||||
	lexer.token = nil
 | 
			
		||||
 | 
			
		||||
	// What character index in the stream did the current token start at?
 | 
			
		||||
	// Needed, for example, to get the text for current token. Set at
 | 
			
		||||
	// the start of NextToken.
 | 
			
		||||
	lexer.TokenStartCharIndex = -1
 | 
			
		||||
 | 
			
		||||
	// The line on which the first character of the token resides///
 | 
			
		||||
	lexer.TokenStartLine = -1
 | 
			
		||||
 | 
			
		||||
	// The character position of first character within the line///
 | 
			
		||||
	lexer.TokenStartColumn = -1
 | 
			
		||||
 | 
			
		||||
	// Once we see EOF on char stream, next token will be EOF.
 | 
			
		||||
	// If you have DONE : EOF  then you see DONE EOF.
 | 
			
		||||
	lexer.hitEOF = false
 | 
			
		||||
 | 
			
		||||
	// The channel number for the current token///
 | 
			
		||||
	lexer.channel = TokenDefaultChannel
 | 
			
		||||
 | 
			
		||||
	// The token type for the current token///
 | 
			
		||||
	lexer.thetype = TokenInvalidType
 | 
			
		||||
 | 
			
		||||
	lexer.modeStack = make([]int, 0)
 | 
			
		||||
	lexer.mode = LexerDefaultMode
 | 
			
		||||
 | 
			
		||||
	// You can set the text for the current token to override what is in
 | 
			
		||||
	// the input char buffer. Use setText() or can set l instance var.
 | 
			
		||||
	// /
 | 
			
		||||
	lexer.text = ""
 | 
			
		||||
 | 
			
		||||
	return lexer
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	LexerDefaultMode = 0
 | 
			
		||||
	LexerMore        = -2
 | 
			
		||||
	LexerSkip        = -3
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	LexerDefaultTokenChannel = TokenDefaultChannel
 | 
			
		||||
	LexerHidden              = TokenHiddenChannel
 | 
			
		||||
	LexerMinCharValue        = 0x0000
 | 
			
		||||
	LexerMaxCharValue        = 0x10FFFF
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) reset() {
 | 
			
		||||
	// wack Lexer state variables
 | 
			
		||||
	if b.input != nil {
 | 
			
		||||
		b.input.Seek(0) // rewind the input
 | 
			
		||||
	}
 | 
			
		||||
	b.token = nil
 | 
			
		||||
	b.thetype = TokenInvalidType
 | 
			
		||||
	b.channel = TokenDefaultChannel
 | 
			
		||||
	b.TokenStartCharIndex = -1
 | 
			
		||||
	b.TokenStartColumn = -1
 | 
			
		||||
	b.TokenStartLine = -1
 | 
			
		||||
	b.text = ""
 | 
			
		||||
 | 
			
		||||
	b.hitEOF = false
 | 
			
		||||
	b.mode = LexerDefaultMode
 | 
			
		||||
	b.modeStack = make([]int, 0)
 | 
			
		||||
 | 
			
		||||
	b.Interpreter.reset()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
 | 
			
		||||
	return b.Interpreter
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) GetInputStream() CharStream {
 | 
			
		||||
	return b.input
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) GetSourceName() string {
 | 
			
		||||
	return b.GrammarFileName
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) SetChannel(v int) {
 | 
			
		||||
	b.channel = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) GetTokenFactory() TokenFactory {
 | 
			
		||||
	return b.factory
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) setTokenFactory(f TokenFactory) {
 | 
			
		||||
	b.factory = f
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) safeMatch() (ret int) {
 | 
			
		||||
	defer func() {
 | 
			
		||||
		if e := recover(); e != nil {
 | 
			
		||||
			if re, ok := e.(RecognitionException); ok {
 | 
			
		||||
				b.notifyListeners(re) // Report error
 | 
			
		||||
				b.Recover(re)
 | 
			
		||||
				ret = LexerSkip // default
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	return b.Interpreter.Match(b.input, b.mode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Return a token from l source i.e., Match a token on the char stream.
 | 
			
		||||
func (b *BaseLexer) NextToken() Token {
 | 
			
		||||
	if b.input == nil {
 | 
			
		||||
		panic("NextToken requires a non-nil input stream.")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tokenStartMarker := b.input.Mark()
 | 
			
		||||
 | 
			
		||||
	// previously in finally block
 | 
			
		||||
	defer func() {
 | 
			
		||||
		// make sure we release marker after Match or
 | 
			
		||||
		// unbuffered char stream will keep buffering
 | 
			
		||||
		b.input.Release(tokenStartMarker)
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	for {
 | 
			
		||||
		if b.hitEOF {
 | 
			
		||||
			b.EmitEOF()
 | 
			
		||||
			return b.token
 | 
			
		||||
		}
 | 
			
		||||
		b.token = nil
 | 
			
		||||
		b.channel = TokenDefaultChannel
 | 
			
		||||
		b.TokenStartCharIndex = b.input.Index()
 | 
			
		||||
		b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
 | 
			
		||||
		b.TokenStartLine = b.Interpreter.GetLine()
 | 
			
		||||
		b.text = ""
 | 
			
		||||
		continueOuter := false
 | 
			
		||||
		for {
 | 
			
		||||
			b.thetype = TokenInvalidType
 | 
			
		||||
			ttype := LexerSkip
 | 
			
		||||
 | 
			
		||||
			ttype = b.safeMatch()
 | 
			
		||||
 | 
			
		||||
			if b.input.LA(1) == TokenEOF {
 | 
			
		||||
				b.hitEOF = true
 | 
			
		||||
			}
 | 
			
		||||
			if b.thetype == TokenInvalidType {
 | 
			
		||||
				b.thetype = ttype
 | 
			
		||||
			}
 | 
			
		||||
			if b.thetype == LexerSkip {
 | 
			
		||||
				continueOuter = true
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
			if b.thetype != LexerMore {
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if continueOuter {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if b.token == nil {
 | 
			
		||||
			b.Virt.Emit()
 | 
			
		||||
		}
 | 
			
		||||
		return b.token
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Instruct the lexer to Skip creating a token for current lexer rule
 | 
			
		||||
// and look for another token. NextToken() knows to keep looking when
 | 
			
		||||
// a lexer rule finishes with token set to SKIPTOKEN. Recall that
 | 
			
		||||
// if token==nil at end of any token rule, it creates one for you
 | 
			
		||||
// and emits it.
 | 
			
		||||
// /
 | 
			
		||||
func (b *BaseLexer) Skip() {
 | 
			
		||||
	b.thetype = LexerSkip
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) More() {
 | 
			
		||||
	b.thetype = LexerMore
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) SetMode(m int) {
 | 
			
		||||
	b.mode = m
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) PushMode(m int) {
 | 
			
		||||
	if LexerATNSimulatorDebug {
 | 
			
		||||
		fmt.Println("pushMode " + strconv.Itoa(m))
 | 
			
		||||
	}
 | 
			
		||||
	b.modeStack.Push(b.mode)
 | 
			
		||||
	b.mode = m
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) PopMode() int {
 | 
			
		||||
	if len(b.modeStack) == 0 {
 | 
			
		||||
		panic("Empty Stack")
 | 
			
		||||
	}
 | 
			
		||||
	if LexerATNSimulatorDebug {
 | 
			
		||||
		fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
 | 
			
		||||
	}
 | 
			
		||||
	i, _ := b.modeStack.Pop()
 | 
			
		||||
	b.mode = i
 | 
			
		||||
	return b.mode
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) inputStream() CharStream {
 | 
			
		||||
	return b.input
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SetInputStream resets the lexer input stream and associated lexer state.
 | 
			
		||||
func (b *BaseLexer) SetInputStream(input CharStream) {
 | 
			
		||||
	b.input = nil
 | 
			
		||||
	b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
 | 
			
		||||
	b.reset()
 | 
			
		||||
	b.input = input
 | 
			
		||||
	b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
 | 
			
		||||
	return b.tokenFactorySourcePair
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// By default does not support multiple emits per NextToken invocation
 | 
			
		||||
// for efficiency reasons. Subclass and override l method, NextToken,
 | 
			
		||||
// and GetToken (to push tokens into a list and pull from that list
 | 
			
		||||
// rather than a single variable as l implementation does).
 | 
			
		||||
// /
 | 
			
		||||
func (b *BaseLexer) EmitToken(token Token) {
 | 
			
		||||
	b.token = token
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// The standard method called to automatically emit a token at the
 | 
			
		||||
// outermost lexical rule. The token object should point into the
 | 
			
		||||
// char buffer start..stop. If there is a text override in 'text',
 | 
			
		||||
// use that to set the token's text. Override l method to emit
 | 
			
		||||
// custom Token objects or provide a Newfactory.
 | 
			
		||||
// /
 | 
			
		||||
func (b *BaseLexer) Emit() Token {
 | 
			
		||||
	t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
 | 
			
		||||
	b.EmitToken(t)
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) EmitEOF() Token {
 | 
			
		||||
	cpos := b.GetCharPositionInLine()
 | 
			
		||||
	lpos := b.GetLine()
 | 
			
		||||
	eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
 | 
			
		||||
	b.EmitToken(eof)
 | 
			
		||||
	return eof
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) GetCharPositionInLine() int {
 | 
			
		||||
	return b.Interpreter.GetCharPositionInLine()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) GetLine() int {
 | 
			
		||||
	return b.Interpreter.GetLine()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) GetType() int {
 | 
			
		||||
	return b.thetype
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) SetType(t int) {
 | 
			
		||||
	b.thetype = t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// What is the index of the current character of lookahead?///
 | 
			
		||||
func (b *BaseLexer) GetCharIndex() int {
 | 
			
		||||
	return b.input.Index()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Return the text Matched so far for the current token or any text override.
 | 
			
		||||
//Set the complete text of l token it wipes any previous changes to the text.
 | 
			
		||||
func (b *BaseLexer) GetText() string {
 | 
			
		||||
	if b.text != "" {
 | 
			
		||||
		return b.text
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return b.Interpreter.GetText(b.input)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) SetText(text string) {
 | 
			
		||||
	b.text = text
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) GetATN() *ATN {
 | 
			
		||||
	return b.Interpreter.ATN()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Return a list of all Token objects in input char stream.
 | 
			
		||||
// Forces load of all tokens. Does not include EOF token.
 | 
			
		||||
// /
 | 
			
		||||
func (b *BaseLexer) GetAllTokens() []Token {
 | 
			
		||||
	vl := b.Virt
 | 
			
		||||
	tokens := make([]Token, 0)
 | 
			
		||||
	t := vl.NextToken()
 | 
			
		||||
	for t.GetTokenType() != TokenEOF {
 | 
			
		||||
		tokens = append(tokens, t)
 | 
			
		||||
		t = vl.NextToken()
 | 
			
		||||
	}
 | 
			
		||||
	return tokens
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) notifyListeners(e RecognitionException) {
 | 
			
		||||
	start := b.TokenStartCharIndex
 | 
			
		||||
	stop := b.input.Index()
 | 
			
		||||
	text := b.input.GetTextFromInterval(NewInterval(start, stop))
 | 
			
		||||
	msg := "token recognition error at: '" + text + "'"
 | 
			
		||||
	listener := b.GetErrorListenerDispatch()
 | 
			
		||||
	listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
 | 
			
		||||
	if c == TokenEOF {
 | 
			
		||||
		return "<EOF>"
 | 
			
		||||
	} else if c == '\n' {
 | 
			
		||||
		return "\\n"
 | 
			
		||||
	} else if c == '\t' {
 | 
			
		||||
		return "\\t"
 | 
			
		||||
	} else if c == '\r' {
 | 
			
		||||
		return "\\r"
 | 
			
		||||
	} else {
 | 
			
		||||
		return string(c)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexer) getCharErrorDisplay(c rune) string {
 | 
			
		||||
	return "'" + b.getErrorDisplayForChar(c) + "'"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Lexers can normally Match any char in it's vocabulary after Matching
 | 
			
		||||
// a token, so do the easy thing and just kill a character and hope
 | 
			
		||||
// it all works out. You can instead use the rule invocation stack
 | 
			
		||||
// to do sophisticated error recovery if you are in a fragment rule.
 | 
			
		||||
// /
 | 
			
		||||
func (b *BaseLexer) Recover(re RecognitionException) {
 | 
			
		||||
	if b.input.LA(1) != TokenEOF {
 | 
			
		||||
		if _, ok := re.(*LexerNoViableAltException); ok {
 | 
			
		||||
			// Skip a char and try again
 | 
			
		||||
			b.Interpreter.Consume(b.input)
 | 
			
		||||
		} else {
 | 
			
		||||
			// TODO: Do we lose character or line position information?
 | 
			
		||||
			b.input.Consume()
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										431
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										431
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,431 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import "strconv"
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	LexerActionTypeChannel  = 0 //The type of a {@link LexerChannelAction} action.
 | 
			
		||||
	LexerActionTypeCustom   = 1 //The type of a {@link LexerCustomAction} action.
 | 
			
		||||
	LexerActionTypeMode     = 2 //The type of a {@link LexerModeAction} action.
 | 
			
		||||
	LexerActionTypeMore     = 3 //The type of a {@link LexerMoreAction} action.
 | 
			
		||||
	LexerActionTypePopMode  = 4 //The type of a {@link LexerPopModeAction} action.
 | 
			
		||||
	LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
 | 
			
		||||
	LexerActionTypeSkip     = 6 //The type of a {@link LexerSkipAction} action.
 | 
			
		||||
	LexerActionTypeType     = 7 //The type of a {@link LexerTypeAction} action.
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type LexerAction interface {
 | 
			
		||||
	getActionType() int
 | 
			
		||||
	getIsPositionDependent() bool
 | 
			
		||||
	execute(lexer Lexer)
 | 
			
		||||
	hash() int
 | 
			
		||||
	equals(other LexerAction) bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseLexerAction struct {
 | 
			
		||||
	actionType          int
 | 
			
		||||
	isPositionDependent bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseLexerAction(action int) *BaseLexerAction {
 | 
			
		||||
	la := new(BaseLexerAction)
 | 
			
		||||
 | 
			
		||||
	la.actionType = action
 | 
			
		||||
	la.isPositionDependent = false
 | 
			
		||||
 | 
			
		||||
	return la
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexerAction) execute(lexer Lexer) {
 | 
			
		||||
	panic("Not implemented")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexerAction) getActionType() int {
 | 
			
		||||
	return b.actionType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexerAction) getIsPositionDependent() bool {
 | 
			
		||||
	return b.isPositionDependent
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexerAction) hash() int {
 | 
			
		||||
	return b.actionType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseLexerAction) equals(other LexerAction) bool {
 | 
			
		||||
	return b == other
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
 | 
			
		||||
//
 | 
			
		||||
// <p>The {@code Skip} command does not have any parameters, so l action is
 | 
			
		||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
 | 
			
		||||
type LexerSkipAction struct {
 | 
			
		||||
	*BaseLexerAction
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerSkipAction() *LexerSkipAction {
 | 
			
		||||
	la := new(LexerSkipAction)
 | 
			
		||||
	la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
 | 
			
		||||
	return la
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Provides a singleton instance of l parameterless lexer action.
 | 
			
		||||
var LexerSkipActionINSTANCE = NewLexerSkipAction()
 | 
			
		||||
 | 
			
		||||
func (l *LexerSkipAction) execute(lexer Lexer) {
 | 
			
		||||
	lexer.Skip()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerSkipAction) String() string {
 | 
			
		||||
	return "skip"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//  Implements the {@code type} lexer action by calling {@link Lexer//setType}
 | 
			
		||||
// with the assigned type.
 | 
			
		||||
type LexerTypeAction struct {
 | 
			
		||||
	*BaseLexerAction
 | 
			
		||||
 | 
			
		||||
	thetype int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerTypeAction(thetype int) *LexerTypeAction {
 | 
			
		||||
	l := new(LexerTypeAction)
 | 
			
		||||
	l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
 | 
			
		||||
	l.thetype = thetype
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerTypeAction) execute(lexer Lexer) {
 | 
			
		||||
	lexer.SetType(l.thetype)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerTypeAction) hash() int {
 | 
			
		||||
	h := murmurInit(0)
 | 
			
		||||
	h = murmurUpdate(h, l.actionType)
 | 
			
		||||
	h = murmurUpdate(h, l.thetype)
 | 
			
		||||
	return murmurFinish(h, 2)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerTypeAction) equals(other LexerAction) bool {
 | 
			
		||||
	if l == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*LexerTypeAction); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		return l.thetype == other.(*LexerTypeAction).thetype
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerTypeAction) String() string {
 | 
			
		||||
	return "actionType(" + strconv.Itoa(l.thetype) + ")"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Implements the {@code pushMode} lexer action by calling
 | 
			
		||||
// {@link Lexer//pushMode} with the assigned mode.
 | 
			
		||||
type LexerPushModeAction struct {
 | 
			
		||||
	*BaseLexerAction
 | 
			
		||||
 | 
			
		||||
	mode int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerPushModeAction(mode int) *LexerPushModeAction {
 | 
			
		||||
 | 
			
		||||
	l := new(LexerPushModeAction)
 | 
			
		||||
	l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
 | 
			
		||||
 | 
			
		||||
	l.mode = mode
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// <p>This action is implemented by calling {@link Lexer//pushMode} with the
 | 
			
		||||
// value provided by {@link //getMode}.</p>
 | 
			
		||||
func (l *LexerPushModeAction) execute(lexer Lexer) {
 | 
			
		||||
	lexer.PushMode(l.mode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerPushModeAction) hash() int {
 | 
			
		||||
	h := murmurInit(0)
 | 
			
		||||
	h = murmurUpdate(h, l.actionType)
 | 
			
		||||
	h = murmurUpdate(h, l.mode)
 | 
			
		||||
	return murmurFinish(h, 2)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerPushModeAction) equals(other LexerAction) bool {
 | 
			
		||||
	if l == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*LexerPushModeAction); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		return l.mode == other.(*LexerPushModeAction).mode
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerPushModeAction) String() string {
 | 
			
		||||
	return "pushMode(" + strconv.Itoa(l.mode) + ")"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
 | 
			
		||||
//
 | 
			
		||||
// <p>The {@code popMode} command does not have any parameters, so l action is
 | 
			
		||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
 | 
			
		||||
type LexerPopModeAction struct {
 | 
			
		||||
	*BaseLexerAction
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerPopModeAction() *LexerPopModeAction {
 | 
			
		||||
 | 
			
		||||
	l := new(LexerPopModeAction)
 | 
			
		||||
 | 
			
		||||
	l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
 | 
			
		||||
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
 | 
			
		||||
 | 
			
		||||
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
 | 
			
		||||
func (l *LexerPopModeAction) execute(lexer Lexer) {
 | 
			
		||||
	lexer.PopMode()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerPopModeAction) String() string {
 | 
			
		||||
	return "popMode"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Implements the {@code more} lexer action by calling {@link Lexer//more}.
 | 
			
		||||
//
 | 
			
		||||
// <p>The {@code more} command does not have any parameters, so l action is
 | 
			
		||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
 | 
			
		||||
 | 
			
		||||
type LexerMoreAction struct {
 | 
			
		||||
	*BaseLexerAction
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerMoreAction() *LexerMoreAction {
 | 
			
		||||
	l := new(LexerMoreAction)
 | 
			
		||||
	l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore)
 | 
			
		||||
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var LexerMoreActionINSTANCE = NewLexerMoreAction()
 | 
			
		||||
 | 
			
		||||
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
 | 
			
		||||
func (l *LexerMoreAction) execute(lexer Lexer) {
 | 
			
		||||
	lexer.More()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerMoreAction) String() string {
 | 
			
		||||
	return "more"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
 | 
			
		||||
// the assigned mode.
 | 
			
		||||
type LexerModeAction struct {
 | 
			
		||||
	*BaseLexerAction
 | 
			
		||||
 | 
			
		||||
	mode int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerModeAction(mode int) *LexerModeAction {
 | 
			
		||||
	l := new(LexerModeAction)
 | 
			
		||||
	l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
 | 
			
		||||
	l.mode = mode
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// <p>This action is implemented by calling {@link Lexer//mode} with the
 | 
			
		||||
// value provided by {@link //getMode}.</p>
 | 
			
		||||
func (l *LexerModeAction) execute(lexer Lexer) {
 | 
			
		||||
	lexer.SetMode(l.mode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerModeAction) hash() int {
 | 
			
		||||
	h := murmurInit(0)
 | 
			
		||||
	h = murmurUpdate(h, l.actionType)
 | 
			
		||||
	h = murmurUpdate(h, l.mode)
 | 
			
		||||
	return murmurFinish(h, 2)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerModeAction) equals(other LexerAction) bool {
 | 
			
		||||
	if l == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*LexerModeAction); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		return l.mode == other.(*LexerModeAction).mode
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerModeAction) String() string {
 | 
			
		||||
	return "mode(" + strconv.Itoa(l.mode) + ")"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Executes a custom lexer action by calling {@link Recognizer//action} with the
 | 
			
		||||
// rule and action indexes assigned to the custom action. The implementation of
 | 
			
		||||
// a custom action is added to the generated code for the lexer in an override
 | 
			
		||||
// of {@link Recognizer//action} when the grammar is compiled.
 | 
			
		||||
//
 | 
			
		||||
// <p>This class may represent embedded actions created with the <code>{...}</code>
 | 
			
		||||
// syntax in ANTLR 4, as well as actions created for lexer commands where the
 | 
			
		||||
// command argument could not be evaluated when the grammar was compiled.</p>
 | 
			
		||||
 | 
			
		||||
// Constructs a custom lexer action with the specified rule and action
 | 
			
		||||
// indexes.
 | 
			
		||||
//
 | 
			
		||||
// @param ruleIndex The rule index to use for calls to
 | 
			
		||||
// {@link Recognizer//action}.
 | 
			
		||||
// @param actionIndex The action index to use for calls to
 | 
			
		||||
// {@link Recognizer//action}.
 | 
			
		||||
 | 
			
		||||
type LexerCustomAction struct {
 | 
			
		||||
	*BaseLexerAction
 | 
			
		||||
	ruleIndex, actionIndex int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
 | 
			
		||||
	l := new(LexerCustomAction)
 | 
			
		||||
	l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
 | 
			
		||||
	l.ruleIndex = ruleIndex
 | 
			
		||||
	l.actionIndex = actionIndex
 | 
			
		||||
	l.isPositionDependent = true
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// <p>Custom actions are implemented by calling {@link Lexer//action} with the
 | 
			
		||||
// appropriate rule and action indexes.</p>
 | 
			
		||||
func (l *LexerCustomAction) execute(lexer Lexer) {
 | 
			
		||||
	lexer.Action(nil, l.ruleIndex, l.actionIndex)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerCustomAction) hash() int {
 | 
			
		||||
	h := murmurInit(0)
 | 
			
		||||
	h = murmurUpdate(h, l.actionType)
 | 
			
		||||
	h = murmurUpdate(h, l.ruleIndex)
 | 
			
		||||
	h = murmurUpdate(h, l.actionIndex)
 | 
			
		||||
	return murmurFinish(h, 3)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerCustomAction) equals(other LexerAction) bool {
 | 
			
		||||
	if l == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*LexerCustomAction); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Implements the {@code channel} lexer action by calling
 | 
			
		||||
// {@link Lexer//setChannel} with the assigned channel.
 | 
			
		||||
// Constructs a New{@code channel} action with the specified channel value.
 | 
			
		||||
// @param channel The channel value to pass to {@link Lexer//setChannel}.
 | 
			
		||||
type LexerChannelAction struct {
 | 
			
		||||
	*BaseLexerAction
 | 
			
		||||
 | 
			
		||||
	channel int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerChannelAction(channel int) *LexerChannelAction {
 | 
			
		||||
	l := new(LexerChannelAction)
 | 
			
		||||
	l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
 | 
			
		||||
	l.channel = channel
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// <p>This action is implemented by calling {@link Lexer//setChannel} with the
 | 
			
		||||
// value provided by {@link //getChannel}.</p>
 | 
			
		||||
func (l *LexerChannelAction) execute(lexer Lexer) {
 | 
			
		||||
	lexer.SetChannel(l.channel)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerChannelAction) hash() int {
 | 
			
		||||
	h := murmurInit(0)
 | 
			
		||||
	h = murmurUpdate(h, l.actionType)
 | 
			
		||||
	h = murmurUpdate(h, l.channel)
 | 
			
		||||
	return murmurFinish(h, 2)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerChannelAction) equals(other LexerAction) bool {
 | 
			
		||||
	if l == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*LexerChannelAction); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		return l.channel == other.(*LexerChannelAction).channel
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerChannelAction) String() string {
 | 
			
		||||
	return "channel(" + strconv.Itoa(l.channel) + ")"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This implementation of {@link LexerAction} is used for tracking input offsets
 | 
			
		||||
// for position-dependent actions within a {@link LexerActionExecutor}.
 | 
			
		||||
//
 | 
			
		||||
// <p>This action is not serialized as part of the ATN, and is only required for
 | 
			
		||||
// position-dependent lexer actions which appear at a location other than the
 | 
			
		||||
// end of a rule. For more information about DFA optimizations employed for
 | 
			
		||||
// lexer actions, see {@link LexerActionExecutor//append} and
 | 
			
		||||
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.</p>
 | 
			
		||||
 | 
			
		||||
// Constructs a Newindexed custom action by associating a character offset
 | 
			
		||||
// with a {@link LexerAction}.
 | 
			
		||||
//
 | 
			
		||||
// <p>Note: This class is only required for lexer actions for which
 | 
			
		||||
// {@link LexerAction//isPositionDependent} returns {@code true}.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param offset The offset into the input {@link CharStream}, relative to
 | 
			
		||||
// the token start index, at which the specified lexer action should be
 | 
			
		||||
// executed.
 | 
			
		||||
// @param action The lexer action to execute at a particular offset in the
 | 
			
		||||
// input {@link CharStream}.
 | 
			
		||||
type LexerIndexedCustomAction struct {
 | 
			
		||||
	*BaseLexerAction
 | 
			
		||||
 | 
			
		||||
	offset              int
 | 
			
		||||
	lexerAction         LexerAction
 | 
			
		||||
	isPositionDependent bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
 | 
			
		||||
 | 
			
		||||
	l := new(LexerIndexedCustomAction)
 | 
			
		||||
	l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
 | 
			
		||||
 | 
			
		||||
	l.offset = offset
 | 
			
		||||
	l.lexerAction = lexerAction
 | 
			
		||||
	l.isPositionDependent = true
 | 
			
		||||
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// <p>This method calls {@link //execute} on the result of {@link //getAction}
 | 
			
		||||
// using the provided {@code lexer}.</p>
 | 
			
		||||
func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
 | 
			
		||||
	// assume the input stream position was properly set by the calling code
 | 
			
		||||
	l.lexerAction.execute(lexer)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerIndexedCustomAction) hash() int {
 | 
			
		||||
	h := murmurInit(0)
 | 
			
		||||
	h = murmurUpdate(h, l.actionType)
 | 
			
		||||
	h = murmurUpdate(h, l.offset)
 | 
			
		||||
	h = murmurUpdate(h, l.lexerAction.hash())
 | 
			
		||||
	return murmurFinish(h, 3)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
 | 
			
		||||
	if l == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										170
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										170
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,170 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
// Represents an executor for a sequence of lexer actions which traversed during
 | 
			
		||||
// the Matching operation of a lexer rule (token).
 | 
			
		||||
//
 | 
			
		||||
// <p>The executor tracks position information for position-dependent lexer actions
 | 
			
		||||
// efficiently, ensuring that actions appearing only at the end of the rule do
 | 
			
		||||
// not cause bloating of the {@link DFA} created for the lexer.</p>
 | 
			
		||||
 | 
			
		||||
type LexerActionExecutor struct {
 | 
			
		||||
	lexerActions     []LexerAction
 | 
			
		||||
	cachedHash       int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
 | 
			
		||||
 | 
			
		||||
	if lexerActions == nil {
 | 
			
		||||
		lexerActions = make([]LexerAction, 0)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	l := new(LexerActionExecutor)
 | 
			
		||||
 | 
			
		||||
	l.lexerActions = lexerActions
 | 
			
		||||
 | 
			
		||||
	// Caches the result of {@link //hashCode} since the hash code is an element
 | 
			
		||||
	// of the performance-critical {@link LexerATNConfig//hashCode} operation.
 | 
			
		||||
	l.cachedHash = murmurInit(57)
 | 
			
		||||
	for _, a := range lexerActions {
 | 
			
		||||
		l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Creates a {@link LexerActionExecutor} which executes the actions for
 | 
			
		||||
// the input {@code lexerActionExecutor} followed by a specified
 | 
			
		||||
// {@code lexerAction}.
 | 
			
		||||
//
 | 
			
		||||
// @param lexerActionExecutor The executor for actions already traversed by
 | 
			
		||||
// the lexer while Matching a token within a particular
 | 
			
		||||
// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
 | 
			
		||||
// though it were an empty executor.
 | 
			
		||||
// @param lexerAction The lexer action to execute after the actions
 | 
			
		||||
// specified in {@code lexerActionExecutor}.
 | 
			
		||||
//
 | 
			
		||||
// @return A {@link LexerActionExecutor} for executing the combine actions
 | 
			
		||||
// of {@code lexerActionExecutor} and {@code lexerAction}.
 | 
			
		||||
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
 | 
			
		||||
	if lexerActionExecutor == nil {
 | 
			
		||||
		return NewLexerActionExecutor([]LexerAction{lexerAction})
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Creates a {@link LexerActionExecutor} which encodes the current offset
 | 
			
		||||
// for position-dependent lexer actions.
 | 
			
		||||
//
 | 
			
		||||
// <p>Normally, when the executor encounters lexer actions where
 | 
			
		||||
// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
 | 
			
		||||
// {@link IntStream//seek} on the input {@link CharStream} to set the input
 | 
			
		||||
// position to the <em>end</em> of the current token. This behavior provides
 | 
			
		||||
// for efficient DFA representation of lexer actions which appear at the end
 | 
			
		||||
// of a lexer rule, even when the lexer rule Matches a variable number of
 | 
			
		||||
// characters.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Prior to traversing a Match transition in the ATN, the current offset
 | 
			
		||||
// from the token start index is assigned to all position-dependent lexer
 | 
			
		||||
// actions which have not already been assigned a fixed offset. By storing
 | 
			
		||||
// the offsets relative to the token start index, the DFA representation of
 | 
			
		||||
// lexer actions which appear in the middle of tokens remains efficient due
 | 
			
		||||
// to sharing among tokens of the same length, regardless of their absolute
 | 
			
		||||
// position in the input stream.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>If the current executor already has offsets assigned to all
 | 
			
		||||
// position-dependent lexer actions, the method returns {@code this}.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param offset The current offset to assign to all position-dependent
 | 
			
		||||
// lexer actions which do not already have offsets assigned.
 | 
			
		||||
//
 | 
			
		||||
// @return A {@link LexerActionExecutor} which stores input stream offsets
 | 
			
		||||
// for all position-dependent lexer actions.
 | 
			
		||||
// /
 | 
			
		||||
func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
 | 
			
		||||
	var updatedLexerActions []LexerAction
 | 
			
		||||
	for i := 0; i < len(l.lexerActions); i++ {
 | 
			
		||||
		_, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
 | 
			
		||||
		if l.lexerActions[i].getIsPositionDependent() && !ok {
 | 
			
		||||
			if updatedLexerActions == nil {
 | 
			
		||||
				updatedLexerActions = make([]LexerAction, 0)
 | 
			
		||||
 | 
			
		||||
				for _, a := range l.lexerActions {
 | 
			
		||||
					updatedLexerActions = append(updatedLexerActions, a)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if updatedLexerActions == nil {
 | 
			
		||||
		return l
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return NewLexerActionExecutor(updatedLexerActions)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Execute the actions encapsulated by l executor within the context of a
 | 
			
		||||
// particular {@link Lexer}.
 | 
			
		||||
//
 | 
			
		||||
// <p>This method calls {@link IntStream//seek} to set the position of the
 | 
			
		||||
// {@code input} {@link CharStream} prior to calling
 | 
			
		||||
// {@link LexerAction//execute} on a position-dependent action. Before the
 | 
			
		||||
// method returns, the input position will be restored to the same position
 | 
			
		||||
// it was in when the method was invoked.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param lexer The lexer instance.
 | 
			
		||||
// @param input The input stream which is the source for the current token.
 | 
			
		||||
// When l method is called, the current {@link IntStream//index} for
 | 
			
		||||
// {@code input} should be the start of the following token, i.e. 1
 | 
			
		||||
// character past the end of the current token.
 | 
			
		||||
// @param startIndex The token start index. This value may be passed to
 | 
			
		||||
// {@link IntStream//seek} to set the {@code input} position to the beginning
 | 
			
		||||
// of the token.
 | 
			
		||||
// /
 | 
			
		||||
func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
 | 
			
		||||
	requiresSeek := false
 | 
			
		||||
	stopIndex := input.Index()
 | 
			
		||||
 | 
			
		||||
	defer func() {
 | 
			
		||||
		if requiresSeek {
 | 
			
		||||
			input.Seek(stopIndex)
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(l.lexerActions); i++ {
 | 
			
		||||
		lexerAction := l.lexerActions[i]
 | 
			
		||||
		if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
 | 
			
		||||
			offset := la.offset
 | 
			
		||||
			input.Seek(startIndex + offset)
 | 
			
		||||
			lexerAction = la.lexerAction
 | 
			
		||||
			requiresSeek = (startIndex + offset) != stopIndex
 | 
			
		||||
		} else if lexerAction.getIsPositionDependent() {
 | 
			
		||||
			input.Seek(stopIndex)
 | 
			
		||||
			requiresSeek = false
 | 
			
		||||
		}
 | 
			
		||||
		lexerAction.execute(lexer)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerActionExecutor) hash() int {
 | 
			
		||||
	if l == nil {
 | 
			
		||||
		return 61
 | 
			
		||||
	}
 | 
			
		||||
	return l.cachedHash
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerActionExecutor) equals(other interface{}) bool {
 | 
			
		||||
	if l == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*LexerActionExecutor); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		return l.cachedHash == other.(*LexerActionExecutor).cachedHash &&
 | 
			
		||||
			&l.lexerActions == &other.(*LexerActionExecutor).lexerActions
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										658
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										658
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,658 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	LexerATNSimulatorDebug    = false
 | 
			
		||||
	LexerATNSimulatorDFADebug = false
 | 
			
		||||
 | 
			
		||||
	LexerATNSimulatorMinDFAEdge = 0
 | 
			
		||||
	LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
 | 
			
		||||
 | 
			
		||||
	LexerATNSimulatorMatchCalls = 0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type ILexerATNSimulator interface {
 | 
			
		||||
	IATNSimulator
 | 
			
		||||
 | 
			
		||||
	reset()
 | 
			
		||||
	Match(input CharStream, mode int) int
 | 
			
		||||
	GetCharPositionInLine() int
 | 
			
		||||
	GetLine() int
 | 
			
		||||
	GetText(input CharStream) string
 | 
			
		||||
	Consume(input CharStream)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type LexerATNSimulator struct {
 | 
			
		||||
	*BaseATNSimulator
 | 
			
		||||
 | 
			
		||||
	recog              Lexer
 | 
			
		||||
	predictionMode     int
 | 
			
		||||
	mergeCache         DoubleDict
 | 
			
		||||
	startIndex         int
 | 
			
		||||
	Line               int
 | 
			
		||||
	CharPositionInLine int
 | 
			
		||||
	mode               int
 | 
			
		||||
	prevAccept         *SimState
 | 
			
		||||
	MatchCalls         int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
 | 
			
		||||
	l := new(LexerATNSimulator)
 | 
			
		||||
 | 
			
		||||
	l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
 | 
			
		||||
 | 
			
		||||
	l.decisionToDFA = decisionToDFA
 | 
			
		||||
	l.recog = recog
 | 
			
		||||
	// The current token's starting index into the character stream.
 | 
			
		||||
	// Shared across DFA to ATN simulation in case the ATN fails and the
 | 
			
		||||
	// DFA did not have a previous accept state. In l case, we use the
 | 
			
		||||
	// ATN-generated exception object.
 | 
			
		||||
	l.startIndex = -1
 | 
			
		||||
	// line number 1..n within the input///
 | 
			
		||||
	l.Line = 1
 | 
			
		||||
	// The index of the character relative to the beginning of the line
 | 
			
		||||
	// 0..n-1///
 | 
			
		||||
	l.CharPositionInLine = 0
 | 
			
		||||
	l.mode = LexerDefaultMode
 | 
			
		||||
	// Used during DFA/ATN exec to record the most recent accept configuration
 | 
			
		||||
	// info
 | 
			
		||||
	l.prevAccept = NewSimState()
 | 
			
		||||
	// done
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
 | 
			
		||||
	l.CharPositionInLine = simulator.CharPositionInLine
 | 
			
		||||
	l.Line = simulator.Line
 | 
			
		||||
	l.mode = simulator.mode
 | 
			
		||||
	l.startIndex = simulator.startIndex
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
 | 
			
		||||
	l.MatchCalls++
 | 
			
		||||
	l.mode = mode
 | 
			
		||||
	mark := input.Mark()
 | 
			
		||||
 | 
			
		||||
	defer func() {
 | 
			
		||||
		input.Release(mark)
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	l.startIndex = input.Index()
 | 
			
		||||
	l.prevAccept.reset()
 | 
			
		||||
 | 
			
		||||
	dfa := l.decisionToDFA[mode]
 | 
			
		||||
 | 
			
		||||
	if dfa.getS0() == nil {
 | 
			
		||||
		return l.MatchATN(input)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return l.execATN(input, dfa.getS0())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) reset() {
 | 
			
		||||
	l.prevAccept.reset()
 | 
			
		||||
	l.startIndex = -1
 | 
			
		||||
	l.Line = 1
 | 
			
		||||
	l.CharPositionInLine = 0
 | 
			
		||||
	l.mode = LexerDefaultMode
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
 | 
			
		||||
	startState := l.atn.modeToStartState[l.mode]
 | 
			
		||||
 | 
			
		||||
	if LexerATNSimulatorDebug {
 | 
			
		||||
		fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
 | 
			
		||||
	}
 | 
			
		||||
	oldMode := l.mode
 | 
			
		||||
	s0Closure := l.computeStartState(input, startState)
 | 
			
		||||
	suppressEdge := s0Closure.hasSemanticContext
 | 
			
		||||
	s0Closure.hasSemanticContext = false
 | 
			
		||||
 | 
			
		||||
	next := l.addDFAState(s0Closure)
 | 
			
		||||
 | 
			
		||||
	if !suppressEdge {
 | 
			
		||||
		l.decisionToDFA[l.mode].setS0(next)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	predict := l.execATN(input, next)
 | 
			
		||||
 | 
			
		||||
	if LexerATNSimulatorDebug {
 | 
			
		||||
		fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
 | 
			
		||||
	}
 | 
			
		||||
	return predict
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
 | 
			
		||||
 | 
			
		||||
	if LexerATNSimulatorDebug {
 | 
			
		||||
		fmt.Println("start state closure=" + ds0.configs.String())
 | 
			
		||||
	}
 | 
			
		||||
	if ds0.isAcceptState {
 | 
			
		||||
		// allow zero-length tokens
 | 
			
		||||
		l.captureSimState(l.prevAccept, input, ds0)
 | 
			
		||||
	}
 | 
			
		||||
	t := input.LA(1)
 | 
			
		||||
	s := ds0 // s is current/from DFA state
 | 
			
		||||
 | 
			
		||||
	for { // while more work
 | 
			
		||||
		if LexerATNSimulatorDebug {
 | 
			
		||||
			fmt.Println("execATN loop starting closure: " + s.configs.String())
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// As we move src->trg, src->trg, we keep track of the previous trg to
 | 
			
		||||
		// avoid looking up the DFA state again, which is expensive.
 | 
			
		||||
		// If the previous target was already part of the DFA, we might
 | 
			
		||||
		// be able to avoid doing a reach operation upon t. If s!=nil,
 | 
			
		||||
		// it means that semantic predicates didn't prevent us from
 | 
			
		||||
		// creating a DFA state. Once we know s!=nil, we check to see if
 | 
			
		||||
		// the DFA state has an edge already for t. If so, we can just reuse
 | 
			
		||||
		// it's configuration set there's no point in re-computing it.
 | 
			
		||||
		// This is kind of like doing DFA simulation within the ATN
 | 
			
		||||
		// simulation because DFA simulation is really just a way to avoid
 | 
			
		||||
		// computing reach/closure sets. Technically, once we know that
 | 
			
		||||
		// we have a previously added DFA state, we could jump over to
 | 
			
		||||
		// the DFA simulator. But, that would mean popping back and forth
 | 
			
		||||
		// a lot and making things more complicated algorithmically.
 | 
			
		||||
		// This optimization makes a lot of sense for loops within DFA.
 | 
			
		||||
		// A character will take us back to an existing DFA state
 | 
			
		||||
		// that already has lots of edges out of it. e.g., .* in comments.
 | 
			
		||||
		target := l.getExistingTargetState(s, t)
 | 
			
		||||
		if target == nil {
 | 
			
		||||
			target = l.computeTargetState(input, s, t)
 | 
			
		||||
			// print("Computed:" + str(target))
 | 
			
		||||
		}
 | 
			
		||||
		if target == ATNSimulatorError {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
		// If l is a consumable input element, make sure to consume before
 | 
			
		||||
		// capturing the accept state so the input index, line, and char
 | 
			
		||||
		// position accurately reflect the state of the interpreter at the
 | 
			
		||||
		// end of the token.
 | 
			
		||||
		if t != TokenEOF {
 | 
			
		||||
			l.Consume(input)
 | 
			
		||||
		}
 | 
			
		||||
		if target.isAcceptState {
 | 
			
		||||
			l.captureSimState(l.prevAccept, input, target)
 | 
			
		||||
			if t == TokenEOF {
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		t = input.LA(1)
 | 
			
		||||
		s = target // flip current DFA target becomes Newsrc/from state
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return l.failOrAccept(l.prevAccept, input, s.configs, t)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get an existing target state for an edge in the DFA. If the target state
 | 
			
		||||
// for the edge has not yet been computed or is otherwise not available,
 | 
			
		||||
// l method returns {@code nil}.
 | 
			
		||||
//
 | 
			
		||||
// @param s The current DFA state
 | 
			
		||||
// @param t The next input symbol
 | 
			
		||||
// @return The existing target DFA state for the given input symbol
 | 
			
		||||
// {@code t}, or {@code nil} if the target state for l edge is not
 | 
			
		||||
// already cached
 | 
			
		||||
func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
 | 
			
		||||
	if s.getEdges() == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	target := s.getIthEdge(t-LexerATNSimulatorMinDFAEdge)
 | 
			
		||||
	if LexerATNSimulatorDebug && target != nil {
 | 
			
		||||
		fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
 | 
			
		||||
	}
 | 
			
		||||
	return target
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Compute a target state for an edge in the DFA, and attempt to add the
 | 
			
		||||
// computed state and corresponding edge to the DFA.
 | 
			
		||||
//
 | 
			
		||||
// @param input The input stream
 | 
			
		||||
// @param s The current DFA state
 | 
			
		||||
// @param t The next input symbol
 | 
			
		||||
//
 | 
			
		||||
// @return The computed target DFA state for the given input symbol
 | 
			
		||||
// {@code t}. If {@code t} does not lead to a valid DFA state, l method
 | 
			
		||||
// returns {@link //ERROR}.
 | 
			
		||||
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
 | 
			
		||||
	reach := NewOrderedATNConfigSet()
 | 
			
		||||
 | 
			
		||||
	// if we don't find an existing DFA state
 | 
			
		||||
	// Fill reach starting from closure, following t transitions
 | 
			
		||||
	l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
 | 
			
		||||
 | 
			
		||||
	if len(reach.configs) == 0 { // we got nowhere on t from s
 | 
			
		||||
		if !reach.hasSemanticContext {
 | 
			
		||||
			// we got nowhere on t, don't panic out l knowledge it'd
 | 
			
		||||
			// cause a failover from DFA later.
 | 
			
		||||
			l.addDFAEdge(s, t, ATNSimulatorError, nil)
 | 
			
		||||
		}
 | 
			
		||||
		// stop when we can't Match any more char
 | 
			
		||||
		return ATNSimulatorError
 | 
			
		||||
	}
 | 
			
		||||
	// Add an edge from s to target DFA found/created for reach
 | 
			
		||||
	return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
 | 
			
		||||
	if l.prevAccept.dfaState != nil {
 | 
			
		||||
		lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
 | 
			
		||||
		l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
 | 
			
		||||
		return prevAccept.dfaState.prediction
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// if no accept and EOF is first char, return EOF
 | 
			
		||||
	if t == TokenEOF && input.Index() == l.startIndex {
 | 
			
		||||
		return TokenEOF
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Given a starting configuration set, figure out all ATN configurations
 | 
			
		||||
// we can reach upon input {@code t}. Parameter {@code reach} is a return
 | 
			
		||||
// parameter.
 | 
			
		||||
func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
 | 
			
		||||
	// l is used to Skip processing for configs which have a lower priority
 | 
			
		||||
	// than a config that already reached an accept state for the same rule
 | 
			
		||||
	SkipAlt := ATNInvalidAltNumber
 | 
			
		||||
 | 
			
		||||
	for _, cfg := range closure.GetItems() {
 | 
			
		||||
		currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
 | 
			
		||||
		if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if LexerATNSimulatorDebug {
 | 
			
		||||
 | 
			
		||||
			fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for _, trans := range cfg.GetState().GetTransitions() {
 | 
			
		||||
			target := l.getReachableTarget(trans, t)
 | 
			
		||||
			if target != nil {
 | 
			
		||||
				lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
 | 
			
		||||
				if lexerActionExecutor != nil {
 | 
			
		||||
					lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
 | 
			
		||||
				}
 | 
			
		||||
				treatEOFAsEpsilon := (t == TokenEOF)
 | 
			
		||||
				config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
 | 
			
		||||
				if l.closure(input, config, reach,
 | 
			
		||||
					currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
 | 
			
		||||
					// any remaining configs for l alt have a lower priority
 | 
			
		||||
					// than the one that just reached an accept state.
 | 
			
		||||
					SkipAlt = cfg.GetAlt()
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
 | 
			
		||||
	if LexerATNSimulatorDebug {
 | 
			
		||||
		fmt.Printf("ACTION %s\n", lexerActionExecutor)
 | 
			
		||||
	}
 | 
			
		||||
	// seek to after last char in token
 | 
			
		||||
	input.Seek(index)
 | 
			
		||||
	l.Line = line
 | 
			
		||||
	l.CharPositionInLine = charPos
 | 
			
		||||
	if lexerActionExecutor != nil && l.recog != nil {
 | 
			
		||||
		lexerActionExecutor.execute(l.recog, input, startIndex)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
 | 
			
		||||
	if trans.Matches(t, 0, LexerMaxCharValue) {
 | 
			
		||||
		return trans.getTarget()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
 | 
			
		||||
	configs := NewOrderedATNConfigSet()
 | 
			
		||||
	for i := 0; i < len(p.GetTransitions()); i++ {
 | 
			
		||||
		target := p.GetTransitions()[i].getTarget()
 | 
			
		||||
		cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
 | 
			
		||||
		l.closure(input, cfg, configs, false, false, false)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return configs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Since the alternatives within any lexer decision are ordered by
 | 
			
		||||
// preference, l method stops pursuing the closure as soon as an accept
 | 
			
		||||
// state is reached. After the first accept state is reached by depth-first
 | 
			
		||||
// search from {@code config}, all other (potentially reachable) states for
 | 
			
		||||
// l rule would have a lower priority.
 | 
			
		||||
//
 | 
			
		||||
// @return {@code true} if an accept state is reached, otherwise
 | 
			
		||||
// {@code false}.
 | 
			
		||||
func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
 | 
			
		||||
	currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
 | 
			
		||||
 | 
			
		||||
	if LexerATNSimulatorDebug {
 | 
			
		||||
		fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, ok := config.state.(*RuleStopState)
 | 
			
		||||
	if ok {
 | 
			
		||||
 | 
			
		||||
		if LexerATNSimulatorDebug {
 | 
			
		||||
			if l.recog != nil {
 | 
			
		||||
				fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
 | 
			
		||||
			} else {
 | 
			
		||||
				fmt.Printf("closure at rule stop %s\n", config)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if config.context == nil || config.context.hasEmptyPath() {
 | 
			
		||||
			if config.context == nil || config.context.isEmpty() {
 | 
			
		||||
				configs.Add(config, nil)
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
 | 
			
		||||
			currentAltReachedAcceptState = true
 | 
			
		||||
		}
 | 
			
		||||
		if config.context != nil && !config.context.isEmpty() {
 | 
			
		||||
			for i := 0; i < config.context.length(); i++ {
 | 
			
		||||
				if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
 | 
			
		||||
					newContext := config.context.GetParent(i) // "pop" return state
 | 
			
		||||
					returnState := l.atn.states[config.context.getReturnState(i)]
 | 
			
		||||
					cfg := NewLexerATNConfig2(config, returnState, newContext)
 | 
			
		||||
					currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return currentAltReachedAcceptState
 | 
			
		||||
	}
 | 
			
		||||
	// optimization
 | 
			
		||||
	if !config.state.GetEpsilonOnlyTransitions() {
 | 
			
		||||
		if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
 | 
			
		||||
			configs.Add(config, nil)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	for j := 0; j < len(config.state.GetTransitions()); j++ {
 | 
			
		||||
		trans := config.state.GetTransitions()[j]
 | 
			
		||||
		cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
 | 
			
		||||
		if cfg != nil {
 | 
			
		||||
			currentAltReachedAcceptState = l.closure(input, cfg, configs,
 | 
			
		||||
				currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return currentAltReachedAcceptState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// side-effect: can alter configs.hasSemanticContext
 | 
			
		||||
func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
 | 
			
		||||
	configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
 | 
			
		||||
 | 
			
		||||
	var cfg *LexerATNConfig
 | 
			
		||||
 | 
			
		||||
	if trans.getSerializationType() == TransitionRULE {
 | 
			
		||||
 | 
			
		||||
		rt := trans.(*RuleTransition)
 | 
			
		||||
		newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
 | 
			
		||||
		cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
 | 
			
		||||
 | 
			
		||||
	} else if trans.getSerializationType() == TransitionPRECEDENCE {
 | 
			
		||||
		panic("Precedence predicates are not supported in lexers.")
 | 
			
		||||
	} else if trans.getSerializationType() == TransitionPREDICATE {
 | 
			
		||||
		// Track traversing semantic predicates. If we traverse,
 | 
			
		||||
		// we cannot add a DFA state for l "reach" computation
 | 
			
		||||
		// because the DFA would not test the predicate again in the
 | 
			
		||||
		// future. Rather than creating collections of semantic predicates
 | 
			
		||||
		// like v3 and testing them on prediction, v4 will test them on the
 | 
			
		||||
		// fly all the time using the ATN not the DFA. This is slower but
 | 
			
		||||
		// semantically it's not used that often. One of the key elements to
 | 
			
		||||
		// l predicate mechanism is not adding DFA states that see
 | 
			
		||||
		// predicates immediately afterwards in the ATN. For example,
 | 
			
		||||
 | 
			
		||||
		// a : ID {p1}? | ID {p2}?
 | 
			
		||||
 | 
			
		||||
		// should create the start state for rule 'a' (to save start state
 | 
			
		||||
		// competition), but should not create target of ID state. The
 | 
			
		||||
		// collection of ATN states the following ID references includes
 | 
			
		||||
		// states reached by traversing predicates. Since l is when we
 | 
			
		||||
		// test them, we cannot cash the DFA state target of ID.
 | 
			
		||||
 | 
			
		||||
		pt := trans.(*PredicateTransition)
 | 
			
		||||
 | 
			
		||||
		if LexerATNSimulatorDebug {
 | 
			
		||||
			fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
 | 
			
		||||
		}
 | 
			
		||||
		configs.SetHasSemanticContext(true)
 | 
			
		||||
		if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
 | 
			
		||||
			cfg = NewLexerATNConfig4(config, trans.getTarget())
 | 
			
		||||
		}
 | 
			
		||||
	} else if trans.getSerializationType() == TransitionACTION {
 | 
			
		||||
		if config.context == nil || config.context.hasEmptyPath() {
 | 
			
		||||
			// execute actions anywhere in the start rule for a token.
 | 
			
		||||
			//
 | 
			
		||||
			// TODO: if the entry rule is invoked recursively, some
 | 
			
		||||
			// actions may be executed during the recursive call. The
 | 
			
		||||
			// problem can appear when hasEmptyPath() is true but
 | 
			
		||||
			// isEmpty() is false. In l case, the config needs to be
 | 
			
		||||
			// split into two contexts - one with just the empty path
 | 
			
		||||
			// and another with everything but the empty path.
 | 
			
		||||
			// Unfortunately, the current algorithm does not allow
 | 
			
		||||
			// getEpsilonTarget to return two configurations, so
 | 
			
		||||
			// additional modifications are needed before we can support
 | 
			
		||||
			// the split operation.
 | 
			
		||||
			lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
 | 
			
		||||
			cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
 | 
			
		||||
		} else {
 | 
			
		||||
			// ignore actions in referenced rules
 | 
			
		||||
			cfg = NewLexerATNConfig4(config, trans.getTarget())
 | 
			
		||||
		}
 | 
			
		||||
	} else if trans.getSerializationType() == TransitionEPSILON {
 | 
			
		||||
		cfg = NewLexerATNConfig4(config, trans.getTarget())
 | 
			
		||||
	} else if trans.getSerializationType() == TransitionATOM ||
 | 
			
		||||
		trans.getSerializationType() == TransitionRANGE ||
 | 
			
		||||
		trans.getSerializationType() == TransitionSET {
 | 
			
		||||
		if treatEOFAsEpsilon {
 | 
			
		||||
			if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
 | 
			
		||||
				cfg = NewLexerATNConfig4(config, trans.getTarget())
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return cfg
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Evaluate a predicate specified in the lexer.
 | 
			
		||||
//
 | 
			
		||||
// <p>If {@code speculative} is {@code true}, l method was called before
 | 
			
		||||
// {@link //consume} for the Matched character. This method should call
 | 
			
		||||
// {@link //consume} before evaluating the predicate to ensure position
 | 
			
		||||
// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
 | 
			
		||||
// and {@link Lexer//getcolumn}, properly reflect the current
 | 
			
		||||
// lexer state. This method should restore {@code input} and the simulator
 | 
			
		||||
// to the original state before returning (i.e. undo the actions made by the
 | 
			
		||||
// call to {@link //consume}.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param input The input stream.
 | 
			
		||||
// @param ruleIndex The rule containing the predicate.
 | 
			
		||||
// @param predIndex The index of the predicate within the rule.
 | 
			
		||||
// @param speculative {@code true} if the current index in {@code input} is
 | 
			
		||||
// one character before the predicate's location.
 | 
			
		||||
//
 | 
			
		||||
// @return {@code true} if the specified predicate evaluates to
 | 
			
		||||
// {@code true}.
 | 
			
		||||
// /
 | 
			
		||||
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
 | 
			
		||||
	// assume true if no recognizer was provided
 | 
			
		||||
	if l.recog == nil {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	if !speculative {
 | 
			
		||||
		return l.recog.Sempred(nil, ruleIndex, predIndex)
 | 
			
		||||
	}
 | 
			
		||||
	savedcolumn := l.CharPositionInLine
 | 
			
		||||
	savedLine := l.Line
 | 
			
		||||
	index := input.Index()
 | 
			
		||||
	marker := input.Mark()
 | 
			
		||||
 | 
			
		||||
	defer func() {
 | 
			
		||||
		l.CharPositionInLine = savedcolumn
 | 
			
		||||
		l.Line = savedLine
 | 
			
		||||
		input.Seek(index)
 | 
			
		||||
		input.Release(marker)
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	l.Consume(input)
 | 
			
		||||
	return l.recog.Sempred(nil, ruleIndex, predIndex)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
 | 
			
		||||
	settings.index = input.Index()
 | 
			
		||||
	settings.line = l.Line
 | 
			
		||||
	settings.column = l.CharPositionInLine
 | 
			
		||||
	settings.dfaState = dfaState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
 | 
			
		||||
	if to == nil && cfgs != nil {
 | 
			
		||||
		// leading to l call, ATNConfigSet.hasSemanticContext is used as a
 | 
			
		||||
		// marker indicating dynamic predicate evaluation makes l edge
 | 
			
		||||
		// dependent on the specific input sequence, so the static edge in the
 | 
			
		||||
		// DFA should be omitted. The target DFAState is still created since
 | 
			
		||||
		// execATN has the ability to reSynchronize with the DFA state cache
 | 
			
		||||
		// following the predicate evaluation step.
 | 
			
		||||
		//
 | 
			
		||||
		// TJP notes: next time through the DFA, we see a pred again and eval.
 | 
			
		||||
		// If that gets us to a previously created (but dangling) DFA
 | 
			
		||||
		// state, we can continue in pure DFA mode from there.
 | 
			
		||||
		// /
 | 
			
		||||
		suppressEdge := cfgs.HasSemanticContext()
 | 
			
		||||
		cfgs.SetHasSemanticContext(false)
 | 
			
		||||
 | 
			
		||||
		to = l.addDFAState(cfgs)
 | 
			
		||||
 | 
			
		||||
		if suppressEdge {
 | 
			
		||||
			return to
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// add the edge
 | 
			
		||||
	if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
 | 
			
		||||
		// Only track edges within the DFA bounds
 | 
			
		||||
		return to
 | 
			
		||||
	}
 | 
			
		||||
	if LexerATNSimulatorDebug {
 | 
			
		||||
		fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
 | 
			
		||||
	}
 | 
			
		||||
	if from.getEdges() == nil {
 | 
			
		||||
		// make room for tokens 1..n and -1 masquerading as index 0
 | 
			
		||||
		from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
 | 
			
		||||
	}
 | 
			
		||||
	from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
 | 
			
		||||
 | 
			
		||||
	return to
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Add a NewDFA state if there isn't one with l set of
 | 
			
		||||
// configurations already. This method also detects the first
 | 
			
		||||
// configuration containing an ATN rule stop state. Later, when
 | 
			
		||||
// traversing the DFA, we will know which rule to accept.
 | 
			
		||||
func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState {
 | 
			
		||||
 | 
			
		||||
	proposed := NewDFAState(-1, configs)
 | 
			
		||||
	var firstConfigWithRuleStopState ATNConfig
 | 
			
		||||
 | 
			
		||||
	for _, cfg := range configs.GetItems() {
 | 
			
		||||
 | 
			
		||||
		_, ok := cfg.GetState().(*RuleStopState)
 | 
			
		||||
 | 
			
		||||
		if ok {
 | 
			
		||||
			firstConfigWithRuleStopState = cfg
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if firstConfigWithRuleStopState != nil {
 | 
			
		||||
		proposed.isAcceptState = true
 | 
			
		||||
		proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
 | 
			
		||||
		proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
 | 
			
		||||
	}
 | 
			
		||||
	hash := proposed.hash()
 | 
			
		||||
	dfa := l.decisionToDFA[l.mode]
 | 
			
		||||
	existing, ok := dfa.getState(hash)
 | 
			
		||||
	if ok {
 | 
			
		||||
		return existing
 | 
			
		||||
	}
 | 
			
		||||
	newState := proposed
 | 
			
		||||
	newState.stateNumber = dfa.numStates()
 | 
			
		||||
	configs.SetReadOnly(true)
 | 
			
		||||
	newState.configs = configs
 | 
			
		||||
	dfa.setState(hash, newState)
 | 
			
		||||
	return newState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) getDFA(mode int) *DFA {
 | 
			
		||||
	return l.decisionToDFA[mode]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get the text Matched so far for the current token.
 | 
			
		||||
func (l *LexerATNSimulator) GetText(input CharStream) string {
 | 
			
		||||
	// index is first lookahead char, don't include.
 | 
			
		||||
	return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) Consume(input CharStream) {
 | 
			
		||||
	curChar := input.LA(1)
 | 
			
		||||
	if curChar == int('\n') {
 | 
			
		||||
		l.Line++
 | 
			
		||||
		l.CharPositionInLine = 0
 | 
			
		||||
	} else {
 | 
			
		||||
		l.CharPositionInLine++
 | 
			
		||||
	}
 | 
			
		||||
	input.Consume()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) GetCharPositionInLine() int {
 | 
			
		||||
	return l.CharPositionInLine
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) GetLine() int {
 | 
			
		||||
	return l.Line
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (l *LexerATNSimulator) GetTokenName(tt int) string {
 | 
			
		||||
	if tt == -1 {
 | 
			
		||||
		return "EOF"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return "'" + string(tt) + "'"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func resetSimState(sim *SimState) {
 | 
			
		||||
	sim.index = -1
 | 
			
		||||
	sim.line = 0
 | 
			
		||||
	sim.column = -1
 | 
			
		||||
	sim.dfaState = nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type SimState struct {
 | 
			
		||||
	index    int
 | 
			
		||||
	line     int
 | 
			
		||||
	column   int
 | 
			
		||||
	dfaState *DFAState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewSimState() *SimState {
 | 
			
		||||
	s := new(SimState)
 | 
			
		||||
	resetSimState(s)
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *SimState) reset() {
 | 
			
		||||
	resetSimState(s)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										212
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										212
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,212 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
type LL1Analyzer struct {
 | 
			
		||||
	atn *ATN
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
 | 
			
		||||
	la := new(LL1Analyzer)
 | 
			
		||||
	la.atn = atn
 | 
			
		||||
	return la
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//* Special value added to the lookahead sets to indicate that we hit
 | 
			
		||||
//  a predicate during analysis if {@code seeThruPreds==false}.
 | 
			
		||||
///
 | 
			
		||||
const (
 | 
			
		||||
	LL1AnalyzerHitPred = TokenInvalidType
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
//*
 | 
			
		||||
// Calculates the SLL(1) expected lookahead set for each outgoing transition
 | 
			
		||||
// of an {@link ATNState}. The returned array has one element for each
 | 
			
		||||
// outgoing transition in {@code s}. If the closure from transition
 | 
			
		||||
// <em>i</em> leads to a semantic predicate before Matching a symbol, the
 | 
			
		||||
// element at index <em>i</em> of the result will be {@code nil}.
 | 
			
		||||
//
 | 
			
		||||
// @param s the ATN state
 | 
			
		||||
// @return the expected symbols for each outgoing transition of {@code s}.
 | 
			
		||||
func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
 | 
			
		||||
	if s == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	count := len(s.GetTransitions())
 | 
			
		||||
	look := make([]*IntervalSet, count)
 | 
			
		||||
	for alt := 0; alt < count; alt++ {
 | 
			
		||||
		look[alt] = NewIntervalSet()
 | 
			
		||||
		lookBusy := NewSet(nil, nil)
 | 
			
		||||
		seeThruPreds := false // fail to get lookahead upon pred
 | 
			
		||||
		la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
 | 
			
		||||
		// Wipe out lookahead for la alternative if we found nothing
 | 
			
		||||
		// or we had a predicate when we !seeThruPreds
 | 
			
		||||
		if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
 | 
			
		||||
			look[alt] = nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return look
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//*
 | 
			
		||||
// Compute set of tokens that can follow {@code s} in the ATN in the
 | 
			
		||||
// specified {@code ctx}.
 | 
			
		||||
//
 | 
			
		||||
// <p>If {@code ctx} is {@code nil} and the end of the rule containing
 | 
			
		||||
// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
 | 
			
		||||
// If {@code ctx} is not {@code nil} and the end of the outermost rule is
 | 
			
		||||
// reached, {@link Token//EOF} is added to the result set.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param s the ATN state
 | 
			
		||||
// @param stopState the ATN state to stop at. This can be a
 | 
			
		||||
// {@link BlockEndState} to detect epsilon paths through a closure.
 | 
			
		||||
// @param ctx the complete parser context, or {@code nil} if the context
 | 
			
		||||
// should be ignored
 | 
			
		||||
//
 | 
			
		||||
// @return The set of tokens that can follow {@code s} in the ATN in the
 | 
			
		||||
// specified {@code ctx}.
 | 
			
		||||
///
 | 
			
		||||
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
 | 
			
		||||
	r := NewIntervalSet()
 | 
			
		||||
	seeThruPreds := true // ignore preds get all lookahead
 | 
			
		||||
	var lookContext PredictionContext
 | 
			
		||||
	if ctx != nil {
 | 
			
		||||
		lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
 | 
			
		||||
	}
 | 
			
		||||
	la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true)
 | 
			
		||||
	return r
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//*
 | 
			
		||||
// Compute set of tokens that can follow {@code s} in the ATN in the
 | 
			
		||||
// specified {@code ctx}.
 | 
			
		||||
//
 | 
			
		||||
// <p>If {@code ctx} is {@code nil} and {@code stopState} or the end of the
 | 
			
		||||
// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
 | 
			
		||||
// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
 | 
			
		||||
// {@code true} and {@code stopState} or the end of the outermost rule is
 | 
			
		||||
// reached, {@link Token//EOF} is added to the result set.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param s the ATN state.
 | 
			
		||||
// @param stopState the ATN state to stop at. This can be a
 | 
			
		||||
// {@link BlockEndState} to detect epsilon paths through a closure.
 | 
			
		||||
// @param ctx The outer context, or {@code nil} if the outer context should
 | 
			
		||||
// not be used.
 | 
			
		||||
// @param look The result lookahead set.
 | 
			
		||||
// @param lookBusy A set used for preventing epsilon closures in the ATN
 | 
			
		||||
// from causing a stack overflow. Outside code should pass
 | 
			
		||||
// {@code NewSet<ATNConfig>} for la argument.
 | 
			
		||||
// @param calledRuleStack A set used for preventing left recursion in the
 | 
			
		||||
// ATN from causing a stack overflow. Outside code should pass
 | 
			
		||||
// {@code NewBitSet()} for la argument.
 | 
			
		||||
// @param seeThruPreds {@code true} to true semantic predicates as
 | 
			
		||||
// implicitly {@code true} and "see through them", otherwise {@code false}
 | 
			
		||||
// to treat semantic predicates as opaque and add {@link //HitPred} to the
 | 
			
		||||
// result if one is encountered.
 | 
			
		||||
// @param addEOF Add {@link Token//EOF} to the result if the end of the
 | 
			
		||||
// outermost context is reached. This parameter has no effect if {@code ctx}
 | 
			
		||||
// is {@code nil}.
 | 
			
		||||
 | 
			
		||||
func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
 | 
			
		||||
 | 
			
		||||
	returnState := la.atn.states[ctx.getReturnState(i)]
 | 
			
		||||
	la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
 | 
			
		||||
 | 
			
		||||
	c := NewBaseATNConfig6(s, 0, ctx)
 | 
			
		||||
 | 
			
		||||
	if lookBusy.contains(c) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lookBusy.add(c)
 | 
			
		||||
 | 
			
		||||
	if s == stopState {
 | 
			
		||||
		if ctx == nil {
 | 
			
		||||
			look.addOne(TokenEpsilon)
 | 
			
		||||
			return
 | 
			
		||||
		} else if ctx.isEmpty() && addEOF {
 | 
			
		||||
			look.addOne(TokenEOF)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, ok := s.(*RuleStopState)
 | 
			
		||||
 | 
			
		||||
	if ok {
 | 
			
		||||
		if ctx == nil {
 | 
			
		||||
			look.addOne(TokenEpsilon)
 | 
			
		||||
			return
 | 
			
		||||
		} else if ctx.isEmpty() && addEOF {
 | 
			
		||||
			look.addOne(TokenEOF)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if ctx != BasePredictionContextEMPTY {
 | 
			
		||||
	        removed := calledRuleStack.contains(s.GetRuleIndex())
 | 
			
		||||
            defer func() {
 | 
			
		||||
                if removed {
 | 
			
		||||
                    calledRuleStack.add(s.GetRuleIndex())
 | 
			
		||||
                }
 | 
			
		||||
            }()
 | 
			
		||||
        	calledRuleStack.remove(s.GetRuleIndex())
 | 
			
		||||
			// run thru all possible stack tops in ctx
 | 
			
		||||
			for i := 0; i < ctx.length(); i++ {
 | 
			
		||||
				returnState := la.atn.states[ctx.getReturnState(i)]
 | 
			
		||||
				la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
 | 
			
		||||
			}
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	n := len(s.GetTransitions())
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < n; i++ {
 | 
			
		||||
		t := s.GetTransitions()[i]
 | 
			
		||||
 | 
			
		||||
		if t1, ok := t.(*RuleTransition); ok {
 | 
			
		||||
			if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
 | 
			
		||||
			la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
 | 
			
		||||
		} else if t2, ok := t.(AbstractPredicateTransition); ok {
 | 
			
		||||
			if seeThruPreds {
 | 
			
		||||
				la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
 | 
			
		||||
			} else {
 | 
			
		||||
				look.addOne(LL1AnalyzerHitPred)
 | 
			
		||||
			}
 | 
			
		||||
		} else if t.getIsEpsilon() {
 | 
			
		||||
			la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
 | 
			
		||||
		} else if _, ok := t.(*WildcardTransition); ok {
 | 
			
		||||
			look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
 | 
			
		||||
		} else {
 | 
			
		||||
			set := t.getLabel()
 | 
			
		||||
			if set != nil {
 | 
			
		||||
				if _, ok := t.(*NotSetTransition); ok {
 | 
			
		||||
					set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
 | 
			
		||||
				}
 | 
			
		||||
				look.addSet(set)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
 | 
			
		||||
 | 
			
		||||
	newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
 | 
			
		||||
 | 
			
		||||
	defer func() {
 | 
			
		||||
		calledRuleStack.remove(t1.getTarget().GetRuleIndex())
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	calledRuleStack.add(t1.getTarget().GetRuleIndex())
 | 
			
		||||
	la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										718
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										718
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,718 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Parser interface {
 | 
			
		||||
	Recognizer
 | 
			
		||||
 | 
			
		||||
	GetInterpreter() *ParserATNSimulator
 | 
			
		||||
 | 
			
		||||
	GetTokenStream() TokenStream
 | 
			
		||||
	GetTokenFactory() TokenFactory
 | 
			
		||||
	GetParserRuleContext() ParserRuleContext
 | 
			
		||||
	SetParserRuleContext(ParserRuleContext)
 | 
			
		||||
	Consume() Token
 | 
			
		||||
	GetParseListeners() []ParseTreeListener
 | 
			
		||||
 | 
			
		||||
	GetErrorHandler() ErrorStrategy
 | 
			
		||||
	SetErrorHandler(ErrorStrategy)
 | 
			
		||||
	GetInputStream() IntStream
 | 
			
		||||
	GetCurrentToken() Token
 | 
			
		||||
	GetExpectedTokens() *IntervalSet
 | 
			
		||||
	NotifyErrorListeners(string, Token, RecognitionException)
 | 
			
		||||
	IsExpectedToken(int) bool
 | 
			
		||||
	GetPrecedence() int
 | 
			
		||||
	GetRuleInvocationStack(ParserRuleContext) []string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseParser struct {
 | 
			
		||||
	*BaseRecognizer
 | 
			
		||||
 | 
			
		||||
	Interpreter     *ParserATNSimulator
 | 
			
		||||
	BuildParseTrees bool
 | 
			
		||||
 | 
			
		||||
	input           TokenStream
 | 
			
		||||
	errHandler      ErrorStrategy
 | 
			
		||||
	precedenceStack IntStack
 | 
			
		||||
	ctx             ParserRuleContext
 | 
			
		||||
 | 
			
		||||
	tracer         *TraceListener
 | 
			
		||||
	parseListeners []ParseTreeListener
 | 
			
		||||
	_SyntaxErrors  int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// p.is all the parsing support code essentially most of it is error
 | 
			
		||||
// recovery stuff.//
 | 
			
		||||
func NewBaseParser(input TokenStream) *BaseParser {
 | 
			
		||||
 | 
			
		||||
	p := new(BaseParser)
 | 
			
		||||
 | 
			
		||||
	p.BaseRecognizer = NewBaseRecognizer()
 | 
			
		||||
 | 
			
		||||
	// The input stream.
 | 
			
		||||
	p.input = nil
 | 
			
		||||
	// The error handling strategy for the parser. The default value is a new
 | 
			
		||||
	// instance of {@link DefaultErrorStrategy}.
 | 
			
		||||
	p.errHandler = NewDefaultErrorStrategy()
 | 
			
		||||
	p.precedenceStack = make([]int, 0)
 | 
			
		||||
	p.precedenceStack.Push(0)
 | 
			
		||||
	// The {@link ParserRuleContext} object for the currently executing rule.
 | 
			
		||||
	// p.is always non-nil during the parsing process.
 | 
			
		||||
	p.ctx = nil
 | 
			
		||||
	// Specifies whether or not the parser should construct a parse tree during
 | 
			
		||||
	// the parsing process. The default value is {@code true}.
 | 
			
		||||
	p.BuildParseTrees = true
 | 
			
		||||
	// When {@link //setTrace}{@code (true)} is called, a reference to the
 | 
			
		||||
	// {@link TraceListener} is stored here so it can be easily removed in a
 | 
			
		||||
	// later call to {@link //setTrace}{@code (false)}. The listener itself is
 | 
			
		||||
	// implemented as a parser listener so p.field is not directly used by
 | 
			
		||||
	// other parser methods.
 | 
			
		||||
	p.tracer = nil
 | 
			
		||||
	// The list of {@link ParseTreeListener} listeners registered to receive
 | 
			
		||||
	// events during the parse.
 | 
			
		||||
	p.parseListeners = nil
 | 
			
		||||
	// The number of syntax errors Reported during parsing. p.value is
 | 
			
		||||
	// incremented each time {@link //NotifyErrorListeners} is called.
 | 
			
		||||
	p._SyntaxErrors = 0
 | 
			
		||||
	p.SetInputStream(input)
 | 
			
		||||
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// p.field maps from the serialized ATN string to the deserialized {@link
 | 
			
		||||
// ATN} with
 | 
			
		||||
// bypass alternatives.
 | 
			
		||||
//
 | 
			
		||||
// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
 | 
			
		||||
//
 | 
			
		||||
var bypassAltsAtnCache = make(map[string]int)
 | 
			
		||||
 | 
			
		||||
// reset the parser's state//
 | 
			
		||||
func (p *BaseParser) reset() {
 | 
			
		||||
	if p.input != nil {
 | 
			
		||||
		p.input.Seek(0)
 | 
			
		||||
	}
 | 
			
		||||
	p.errHandler.reset(p)
 | 
			
		||||
	p.ctx = nil
 | 
			
		||||
	p._SyntaxErrors = 0
 | 
			
		||||
	p.SetTrace(nil)
 | 
			
		||||
	p.precedenceStack = make([]int, 0)
 | 
			
		||||
	p.precedenceStack.Push(0)
 | 
			
		||||
	if p.Interpreter != nil {
 | 
			
		||||
		p.Interpreter.reset()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetErrorHandler() ErrorStrategy {
 | 
			
		||||
	return p.errHandler
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
 | 
			
		||||
	p.errHandler = e
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Match current input symbol against {@code ttype}. If the symbol type
 | 
			
		||||
// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
 | 
			
		||||
// called to complete the Match process.
 | 
			
		||||
//
 | 
			
		||||
// <p>If the symbol type does not Match,
 | 
			
		||||
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
 | 
			
		||||
// strategy to attempt recovery. If {@link //getBuildParseTree} is
 | 
			
		||||
// {@code true} and the token index of the symbol returned by
 | 
			
		||||
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
 | 
			
		||||
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param ttype the token type to Match
 | 
			
		||||
// @return the Matched symbol
 | 
			
		||||
// @panics RecognitionException if the current input symbol did not Match
 | 
			
		||||
// {@code ttype} and the error strategy could not recover from the
 | 
			
		||||
// mismatched symbol
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) Match(ttype int) Token {
 | 
			
		||||
 | 
			
		||||
	t := p.GetCurrentToken()
 | 
			
		||||
 | 
			
		||||
	if t.GetTokenType() == ttype {
 | 
			
		||||
		p.errHandler.ReportMatch(p)
 | 
			
		||||
		p.Consume()
 | 
			
		||||
	} else {
 | 
			
		||||
		t = p.errHandler.RecoverInline(p)
 | 
			
		||||
		if p.BuildParseTrees && t.GetTokenIndex() == -1 {
 | 
			
		||||
			// we must have conjured up a Newtoken during single token
 | 
			
		||||
			// insertion
 | 
			
		||||
			// if it's not the current symbol
 | 
			
		||||
			p.ctx.AddErrorNode(t)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Match current input symbol as a wildcard. If the symbol type Matches
 | 
			
		||||
// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
 | 
			
		||||
// and {@link //consume} are called to complete the Match process.
 | 
			
		||||
//
 | 
			
		||||
// <p>If the symbol type does not Match,
 | 
			
		||||
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
 | 
			
		||||
// strategy to attempt recovery. If {@link //getBuildParseTree} is
 | 
			
		||||
// {@code true} and the token index of the symbol returned by
 | 
			
		||||
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
 | 
			
		||||
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
 | 
			
		||||
//
 | 
			
		||||
// @return the Matched symbol
 | 
			
		||||
// @panics RecognitionException if the current input symbol did not Match
 | 
			
		||||
// a wildcard and the error strategy could not recover from the mismatched
 | 
			
		||||
// symbol
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) MatchWildcard() Token {
 | 
			
		||||
	t := p.GetCurrentToken()
 | 
			
		||||
	if t.GetTokenType() > 0 {
 | 
			
		||||
		p.errHandler.ReportMatch(p)
 | 
			
		||||
		p.Consume()
 | 
			
		||||
	} else {
 | 
			
		||||
		t = p.errHandler.RecoverInline(p)
 | 
			
		||||
		if p.BuildParseTrees && t.GetTokenIndex() == -1 {
 | 
			
		||||
			// we must have conjured up a Newtoken during single token
 | 
			
		||||
			// insertion
 | 
			
		||||
			// if it's not the current symbol
 | 
			
		||||
			p.ctx.AddErrorNode(t)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
 | 
			
		||||
	return p.ctx
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
 | 
			
		||||
	p.ctx = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetParseListeners() []ParseTreeListener {
 | 
			
		||||
	if p.parseListeners == nil {
 | 
			
		||||
		return make([]ParseTreeListener, 0)
 | 
			
		||||
	}
 | 
			
		||||
	return p.parseListeners
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Registers {@code listener} to receive events during the parsing process.
 | 
			
		||||
//
 | 
			
		||||
// <p>To support output-preserving grammar transformations (including but not
 | 
			
		||||
// limited to left-recursion removal, automated left-factoring, and
 | 
			
		||||
// optimized code generation), calls to listener methods during the parse
 | 
			
		||||
// may differ substantially from calls made by
 | 
			
		||||
// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
 | 
			
		||||
// particular, rule entry and exit events may occur in a different order
 | 
			
		||||
// during the parse than after the parser. In addition, calls to certain
 | 
			
		||||
// rule entry methods may be omitted.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>With the following specific exceptions, calls to listener events are
 | 
			
		||||
// <em>deterministic</em>, i.e. for identical input the calls to listener
 | 
			
		||||
// methods will be the same.</p>
 | 
			
		||||
//
 | 
			
		||||
// <ul>
 | 
			
		||||
// <li>Alterations to the grammar used to generate code may change the
 | 
			
		||||
// behavior of the listener calls.</li>
 | 
			
		||||
// <li>Alterations to the command line options passed to ANTLR 4 when
 | 
			
		||||
// generating the parser may change the behavior of the listener calls.</li>
 | 
			
		||||
// <li>Changing the version of the ANTLR Tool used to generate the parser
 | 
			
		||||
// may change the behavior of the listener calls.</li>
 | 
			
		||||
// </ul>
 | 
			
		||||
//
 | 
			
		||||
// @param listener the listener to add
 | 
			
		||||
//
 | 
			
		||||
// @panics nilPointerException if {@code} listener is {@code nil}
 | 
			
		||||
//
 | 
			
		||||
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
 | 
			
		||||
	if listener == nil {
 | 
			
		||||
		panic("listener")
 | 
			
		||||
	}
 | 
			
		||||
	if p.parseListeners == nil {
 | 
			
		||||
		p.parseListeners = make([]ParseTreeListener, 0)
 | 
			
		||||
	}
 | 
			
		||||
	p.parseListeners = append(p.parseListeners, listener)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Remove {@code listener} from the list of parse listeners.
 | 
			
		||||
//
 | 
			
		||||
// <p>If {@code listener} is {@code nil} or has not been added as a parse
 | 
			
		||||
// listener, p.method does nothing.</p>
 | 
			
		||||
// @param listener the listener to remove
 | 
			
		||||
//
 | 
			
		||||
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
 | 
			
		||||
 | 
			
		||||
	if p.parseListeners != nil {
 | 
			
		||||
 | 
			
		||||
		idx := -1
 | 
			
		||||
		for i, v := range p.parseListeners {
 | 
			
		||||
			if v == listener {
 | 
			
		||||
				idx = i
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if idx == -1 {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// remove the listener from the slice
 | 
			
		||||
		p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
 | 
			
		||||
 | 
			
		||||
		if len(p.parseListeners) == 0 {
 | 
			
		||||
			p.parseListeners = nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Remove all parse listeners.
 | 
			
		||||
func (p *BaseParser) removeParseListeners() {
 | 
			
		||||
	p.parseListeners = nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Notify any parse listeners of an enter rule event.
 | 
			
		||||
func (p *BaseParser) TriggerEnterRuleEvent() {
 | 
			
		||||
	if p.parseListeners != nil {
 | 
			
		||||
		ctx := p.ctx
 | 
			
		||||
		for _, listener := range p.parseListeners {
 | 
			
		||||
			listener.EnterEveryRule(ctx)
 | 
			
		||||
			ctx.EnterRule(listener)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Notify any parse listeners of an exit rule event.
 | 
			
		||||
//
 | 
			
		||||
// @see //addParseListener
 | 
			
		||||
//
 | 
			
		||||
func (p *BaseParser) TriggerExitRuleEvent() {
 | 
			
		||||
	if p.parseListeners != nil {
 | 
			
		||||
		// reverse order walk of listeners
 | 
			
		||||
		ctx := p.ctx
 | 
			
		||||
		l := len(p.parseListeners) - 1
 | 
			
		||||
 | 
			
		||||
		for i := range p.parseListeners {
 | 
			
		||||
			listener := p.parseListeners[l-i]
 | 
			
		||||
			ctx.ExitRule(listener)
 | 
			
		||||
			listener.ExitEveryRule(ctx)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
 | 
			
		||||
	return p.Interpreter
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetATN() *ATN {
 | 
			
		||||
	return p.Interpreter.atn
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetTokenFactory() TokenFactory {
 | 
			
		||||
	return p.input.GetTokenSource().GetTokenFactory()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Tell our token source and error strategy about a Newway to create tokens.//
 | 
			
		||||
func (p *BaseParser) setTokenFactory(factory TokenFactory) {
 | 
			
		||||
	p.input.GetTokenSource().setTokenFactory(factory)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// The ATN with bypass alternatives is expensive to create so we create it
 | 
			
		||||
// lazily.
 | 
			
		||||
//
 | 
			
		||||
// @panics UnsupportedOperationException if the current parser does not
 | 
			
		||||
// implement the {@link //getSerializedATN()} method.
 | 
			
		||||
//
 | 
			
		||||
func (p *BaseParser) GetATNWithBypassAlts() {
 | 
			
		||||
 | 
			
		||||
	// TODO
 | 
			
		||||
	panic("Not implemented!")
 | 
			
		||||
 | 
			
		||||
	//	serializedAtn := p.getSerializedATN()
 | 
			
		||||
	//	if (serializedAtn == nil) {
 | 
			
		||||
	//		panic("The current parser does not support an ATN with bypass alternatives.")
 | 
			
		||||
	//	}
 | 
			
		||||
	//	result := p.bypassAltsAtnCache[serializedAtn]
 | 
			
		||||
	//	if (result == nil) {
 | 
			
		||||
	//		deserializationOptions := NewATNDeserializationOptions(nil)
 | 
			
		||||
	//		deserializationOptions.generateRuleBypassTransitions = true
 | 
			
		||||
	//		result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
 | 
			
		||||
	//		p.bypassAltsAtnCache[serializedAtn] = result
 | 
			
		||||
	//	}
 | 
			
		||||
	//	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// The preferred method of getting a tree pattern. For example, here's a
 | 
			
		||||
// sample use:
 | 
			
		||||
//
 | 
			
		||||
// <pre>
 | 
			
		||||
// ParseTree t = parser.expr()
 | 
			
		||||
// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
 | 
			
		||||
// MyParser.RULE_expr)
 | 
			
		||||
// ParseTreeMatch m = p.Match(t)
 | 
			
		||||
// String id = m.Get("ID")
 | 
			
		||||
// </pre>
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
 | 
			
		||||
 | 
			
		||||
	panic("NewParseTreePatternMatcher not implemented!")
 | 
			
		||||
	//
 | 
			
		||||
	//	if (lexer == nil) {
 | 
			
		||||
	//		if (p.GetTokenStream() != nil) {
 | 
			
		||||
	//			tokenSource := p.GetTokenStream().GetTokenSource()
 | 
			
		||||
	//			if _, ok := tokenSource.(ILexer); ok {
 | 
			
		||||
	//				lexer = tokenSource
 | 
			
		||||
	//			}
 | 
			
		||||
	//		}
 | 
			
		||||
	//	}
 | 
			
		||||
	//	if (lexer == nil) {
 | 
			
		||||
	//		panic("Parser can't discover a lexer to use")
 | 
			
		||||
	//	}
 | 
			
		||||
 | 
			
		||||
	//	m := NewParseTreePatternMatcher(lexer, p)
 | 
			
		||||
	//	return m.compile(pattern, patternRuleIndex)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetInputStream() IntStream {
 | 
			
		||||
	return p.GetTokenStream()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) SetInputStream(input TokenStream) {
 | 
			
		||||
	p.SetTokenStream(input)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetTokenStream() TokenStream {
 | 
			
		||||
	return p.input
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Set the token stream and reset the parser.//
 | 
			
		||||
func (p *BaseParser) SetTokenStream(input TokenStream) {
 | 
			
		||||
	p.input = nil
 | 
			
		||||
	p.reset()
 | 
			
		||||
	p.input = input
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Match needs to return the current input symbol, which gets put
 | 
			
		||||
// into the label for the associated token ref e.g., x=ID.
 | 
			
		||||
//
 | 
			
		||||
func (p *BaseParser) GetCurrentToken() Token {
 | 
			
		||||
	return p.input.LT(1)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
 | 
			
		||||
	if offendingToken == nil {
 | 
			
		||||
		offendingToken = p.GetCurrentToken()
 | 
			
		||||
	}
 | 
			
		||||
	p._SyntaxErrors++
 | 
			
		||||
	line := offendingToken.GetLine()
 | 
			
		||||
	column := offendingToken.GetColumn()
 | 
			
		||||
	listener := p.GetErrorListenerDispatch()
 | 
			
		||||
	listener.SyntaxError(p, offendingToken, line, column, msg, err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) Consume() Token {
 | 
			
		||||
	o := p.GetCurrentToken()
 | 
			
		||||
	if o.GetTokenType() != TokenEOF {
 | 
			
		||||
		p.GetInputStream().Consume()
 | 
			
		||||
	}
 | 
			
		||||
	hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
 | 
			
		||||
	if p.BuildParseTrees || hasListener {
 | 
			
		||||
		if p.errHandler.inErrorRecoveryMode(p) {
 | 
			
		||||
			node := p.ctx.AddErrorNode(o)
 | 
			
		||||
			if p.parseListeners != nil {
 | 
			
		||||
				for _, l := range p.parseListeners {
 | 
			
		||||
					l.VisitErrorNode(node)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
		} else {
 | 
			
		||||
			node := p.ctx.AddTokenNode(o)
 | 
			
		||||
			if p.parseListeners != nil {
 | 
			
		||||
				for _, l := range p.parseListeners {
 | 
			
		||||
					l.VisitTerminal(node)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		//        node.invokingState = p.state
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return o
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) addContextToParseTree() {
 | 
			
		||||
	// add current context to parent if we have a parent
 | 
			
		||||
	if p.ctx.GetParent() != nil {
 | 
			
		||||
		p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
 | 
			
		||||
	p.SetState(state)
 | 
			
		||||
	p.ctx = localctx
 | 
			
		||||
	p.ctx.SetStart(p.input.LT(1))
 | 
			
		||||
	if p.BuildParseTrees {
 | 
			
		||||
		p.addContextToParseTree()
 | 
			
		||||
	}
 | 
			
		||||
	if p.parseListeners != nil {
 | 
			
		||||
		p.TriggerEnterRuleEvent()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) ExitRule() {
 | 
			
		||||
	p.ctx.SetStop(p.input.LT(-1))
 | 
			
		||||
	// trigger event on ctx, before it reverts to parent
 | 
			
		||||
	if p.parseListeners != nil {
 | 
			
		||||
		p.TriggerExitRuleEvent()
 | 
			
		||||
	}
 | 
			
		||||
	p.SetState(p.ctx.GetInvokingState())
 | 
			
		||||
	if p.ctx.GetParent() != nil {
 | 
			
		||||
		p.ctx = p.ctx.GetParent().(ParserRuleContext)
 | 
			
		||||
	} else {
 | 
			
		||||
		p.ctx = nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
 | 
			
		||||
	localctx.SetAltNumber(altNum)
 | 
			
		||||
	// if we have Newlocalctx, make sure we replace existing ctx
 | 
			
		||||
	// that is previous child of parse tree
 | 
			
		||||
	if p.BuildParseTrees && p.ctx != localctx {
 | 
			
		||||
		if p.ctx.GetParent() != nil {
 | 
			
		||||
			p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
 | 
			
		||||
			p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	p.ctx = localctx
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get the precedence level for the top-most precedence rule.
 | 
			
		||||
//
 | 
			
		||||
// @return The precedence level for the top-most precedence rule, or -1 if
 | 
			
		||||
// the parser context is not nested within a precedence rule.
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetPrecedence() int {
 | 
			
		||||
	if len(p.precedenceStack) == 0 {
 | 
			
		||||
		return -1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return p.precedenceStack[len(p.precedenceStack)-1]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
 | 
			
		||||
	p.SetState(state)
 | 
			
		||||
	p.precedenceStack.Push(precedence)
 | 
			
		||||
	p.ctx = localctx
 | 
			
		||||
	p.ctx.SetStart(p.input.LT(1))
 | 
			
		||||
	if p.parseListeners != nil {
 | 
			
		||||
		p.TriggerEnterRuleEvent() // simulates rule entry for
 | 
			
		||||
		// left-recursive rules
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Like {@link //EnterRule} but for recursive rules.
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
 | 
			
		||||
	previous := p.ctx
 | 
			
		||||
	previous.SetParent(localctx)
 | 
			
		||||
	previous.SetInvokingState(state)
 | 
			
		||||
	previous.SetStop(p.input.LT(-1))
 | 
			
		||||
 | 
			
		||||
	p.ctx = localctx
 | 
			
		||||
	p.ctx.SetStart(previous.GetStart())
 | 
			
		||||
	if p.BuildParseTrees {
 | 
			
		||||
		p.ctx.AddChild(previous)
 | 
			
		||||
	}
 | 
			
		||||
	if p.parseListeners != nil {
 | 
			
		||||
		p.TriggerEnterRuleEvent() // simulates rule entry for
 | 
			
		||||
		// left-recursive rules
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
 | 
			
		||||
	p.precedenceStack.Pop()
 | 
			
		||||
	p.ctx.SetStop(p.input.LT(-1))
 | 
			
		||||
	retCtx := p.ctx // save current ctx (return value)
 | 
			
		||||
	// unroll so ctx is as it was before call to recursive method
 | 
			
		||||
	if p.parseListeners != nil {
 | 
			
		||||
		for p.ctx != parentCtx {
 | 
			
		||||
			p.TriggerExitRuleEvent()
 | 
			
		||||
			p.ctx = p.ctx.GetParent().(ParserRuleContext)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		p.ctx = parentCtx
 | 
			
		||||
	}
 | 
			
		||||
	// hook into tree
 | 
			
		||||
	retCtx.SetParent(parentCtx)
 | 
			
		||||
	if p.BuildParseTrees && parentCtx != nil {
 | 
			
		||||
		// add return ctx into invoking rule's tree
 | 
			
		||||
		parentCtx.AddChild(retCtx)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
 | 
			
		||||
	ctx := p.ctx
 | 
			
		||||
	for ctx != nil {
 | 
			
		||||
		if ctx.GetRuleIndex() == ruleIndex {
 | 
			
		||||
			return ctx
 | 
			
		||||
		}
 | 
			
		||||
		ctx = ctx.GetParent().(ParserRuleContext)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
 | 
			
		||||
	return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) inContext(context ParserRuleContext) bool {
 | 
			
		||||
	// TODO: useful in parser?
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Checks whether or not {@code symbol} can follow the current state in the
 | 
			
		||||
// ATN. The behavior of p.method is equivalent to the following, but is
 | 
			
		||||
// implemented such that the complete context-sensitive follow set does not
 | 
			
		||||
// need to be explicitly constructed.
 | 
			
		||||
//
 | 
			
		||||
// <pre>
 | 
			
		||||
// return getExpectedTokens().contains(symbol)
 | 
			
		||||
// </pre>
 | 
			
		||||
//
 | 
			
		||||
// @param symbol the symbol type to check
 | 
			
		||||
// @return {@code true} if {@code symbol} can follow the current state in
 | 
			
		||||
// the ATN, otherwise {@code false}.
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) IsExpectedToken(symbol int) bool {
 | 
			
		||||
	atn := p.Interpreter.atn
 | 
			
		||||
	ctx := p.ctx
 | 
			
		||||
	s := atn.states[p.state]
 | 
			
		||||
	following := atn.NextTokens(s, nil)
 | 
			
		||||
	if following.contains(symbol) {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	if !following.contains(TokenEpsilon) {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
 | 
			
		||||
		invokingState := atn.states[ctx.GetInvokingState()]
 | 
			
		||||
		rt := invokingState.GetTransitions()[0]
 | 
			
		||||
		following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
 | 
			
		||||
		if following.contains(symbol) {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
		ctx = ctx.GetParent().(ParserRuleContext)
 | 
			
		||||
	}
 | 
			
		||||
	if following.contains(TokenEpsilon) && symbol == TokenEOF {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Computes the set of input symbols which could follow the current parser
 | 
			
		||||
// state and context, as given by {@link //GetState} and {@link //GetContext},
 | 
			
		||||
// respectively.
 | 
			
		||||
//
 | 
			
		||||
// @see ATN//getExpectedTokens(int, RuleContext)
 | 
			
		||||
//
 | 
			
		||||
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
 | 
			
		||||
	return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
 | 
			
		||||
	atn := p.Interpreter.atn
 | 
			
		||||
	s := atn.states[p.state]
 | 
			
		||||
	return atn.NextTokens(s, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
 | 
			
		||||
func (p *BaseParser) GetRuleIndex(ruleName string) int {
 | 
			
		||||
	var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
 | 
			
		||||
	if ok {
 | 
			
		||||
		return ruleIndex
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return -1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Return List<String> of the rule names in your parser instance
 | 
			
		||||
// leading up to a call to the current rule. You could override if
 | 
			
		||||
// you want more details such as the file/line info of where
 | 
			
		||||
// in the ATN a rule is invoked.
 | 
			
		||||
//
 | 
			
		||||
// this very useful for error messages.
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
 | 
			
		||||
	if c == nil {
 | 
			
		||||
		c = p.ctx
 | 
			
		||||
	}
 | 
			
		||||
	stack := make([]string, 0)
 | 
			
		||||
	for c != nil {
 | 
			
		||||
		// compute what follows who invoked us
 | 
			
		||||
		ruleIndex := c.GetRuleIndex()
 | 
			
		||||
		if ruleIndex < 0 {
 | 
			
		||||
			stack = append(stack, "n/a")
 | 
			
		||||
		} else {
 | 
			
		||||
			stack = append(stack, p.GetRuleNames()[ruleIndex])
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		vp := c.GetParent()
 | 
			
		||||
 | 
			
		||||
		if vp == nil {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		c = vp.(ParserRuleContext)
 | 
			
		||||
	}
 | 
			
		||||
	return stack
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// For debugging and other purposes.//
 | 
			
		||||
func (p *BaseParser) GetDFAStrings() string {
 | 
			
		||||
	return fmt.Sprint(p.Interpreter.decisionToDFA)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// For debugging and other purposes.//
 | 
			
		||||
func (p *BaseParser) DumpDFA() {
 | 
			
		||||
	seenOne := false
 | 
			
		||||
	for _, dfa := range p.Interpreter.decisionToDFA {
 | 
			
		||||
		if dfa.numStates() > 0 {
 | 
			
		||||
			if seenOne {
 | 
			
		||||
				fmt.Println()
 | 
			
		||||
			}
 | 
			
		||||
			fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
 | 
			
		||||
			fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
 | 
			
		||||
			seenOne = true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *BaseParser) GetSourceName() string {
 | 
			
		||||
	return p.GrammarFileName
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// During a parse is sometimes useful to listen in on the rule entry and exit
 | 
			
		||||
// events as well as token Matches. p.is for quick and dirty debugging.
 | 
			
		||||
//
 | 
			
		||||
func (p *BaseParser) SetTrace(trace *TraceListener) {
 | 
			
		||||
	if trace == nil {
 | 
			
		||||
		p.RemoveParseListener(p.tracer)
 | 
			
		||||
		p.tracer = nil
 | 
			
		||||
	} else {
 | 
			
		||||
		if p.tracer != nil {
 | 
			
		||||
			p.RemoveParseListener(p.tracer)
 | 
			
		||||
		}
 | 
			
		||||
		p.tracer = NewTraceListener(p)
 | 
			
		||||
		p.AddParseListener(p.tracer)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										1473
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1473
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										362
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										362
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,362 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type ParserRuleContext interface {
 | 
			
		||||
	RuleContext
 | 
			
		||||
 | 
			
		||||
	SetException(RecognitionException)
 | 
			
		||||
 | 
			
		||||
	AddTokenNode(token Token) *TerminalNodeImpl
 | 
			
		||||
	AddErrorNode(badToken Token) *ErrorNodeImpl
 | 
			
		||||
 | 
			
		||||
	EnterRule(listener ParseTreeListener)
 | 
			
		||||
	ExitRule(listener ParseTreeListener)
 | 
			
		||||
 | 
			
		||||
	SetStart(Token)
 | 
			
		||||
	GetStart() Token
 | 
			
		||||
 | 
			
		||||
	SetStop(Token)
 | 
			
		||||
	GetStop() Token
 | 
			
		||||
 | 
			
		||||
	AddChild(child RuleContext) RuleContext
 | 
			
		||||
	RemoveLastChild()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseParserRuleContext struct {
 | 
			
		||||
	*BaseRuleContext
 | 
			
		||||
 | 
			
		||||
	start, stop Token
 | 
			
		||||
	exception   RecognitionException
 | 
			
		||||
	children    []Tree
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
 | 
			
		||||
	prc := new(BaseParserRuleContext)
 | 
			
		||||
 | 
			
		||||
	prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
 | 
			
		||||
 | 
			
		||||
	prc.RuleIndex = -1
 | 
			
		||||
	// * If we are debugging or building a parse tree for a Visitor,
 | 
			
		||||
	// we need to track all of the tokens and rule invocations associated
 | 
			
		||||
	// with prc rule's context. This is empty for parsing w/o tree constr.
 | 
			
		||||
	// operation because we don't the need to track the details about
 | 
			
		||||
	// how we parse prc rule.
 | 
			
		||||
	// /
 | 
			
		||||
	prc.children = nil
 | 
			
		||||
	prc.start = nil
 | 
			
		||||
	prc.stop = nil
 | 
			
		||||
	// The exception that forced prc rule to return. If the rule successfully
 | 
			
		||||
	// completed, prc is {@code nil}.
 | 
			
		||||
	prc.exception = nil
 | 
			
		||||
 | 
			
		||||
	return prc
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
 | 
			
		||||
	prc.exception = e
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetChildren() []Tree {
 | 
			
		||||
	return prc.children
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
 | 
			
		||||
	// from RuleContext
 | 
			
		||||
	prc.parentCtx = ctx.parentCtx
 | 
			
		||||
	prc.invokingState = ctx.invokingState
 | 
			
		||||
	prc.children = nil
 | 
			
		||||
	prc.start = ctx.start
 | 
			
		||||
	prc.stop = ctx.stop
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetText() string {
 | 
			
		||||
	if prc.GetChildCount() == 0 {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var s string
 | 
			
		||||
	for _, child := range prc.children {
 | 
			
		||||
		s += child.(ParseTree).GetText()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Double dispatch methods for listeners
 | 
			
		||||
func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// * Does not set parent link other add methods do that///
 | 
			
		||||
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
 | 
			
		||||
	if prc.children == nil {
 | 
			
		||||
		prc.children = make([]Tree, 0)
 | 
			
		||||
	}
 | 
			
		||||
	if child == nil {
 | 
			
		||||
		panic("Child may not be null")
 | 
			
		||||
	}
 | 
			
		||||
	prc.children = append(prc.children, child)
 | 
			
		||||
	return child
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
 | 
			
		||||
	if prc.children == nil {
 | 
			
		||||
		prc.children = make([]Tree, 0)
 | 
			
		||||
	}
 | 
			
		||||
	if child == nil {
 | 
			
		||||
		panic("Child may not be null")
 | 
			
		||||
	}
 | 
			
		||||
	prc.children = append(prc.children, child)
 | 
			
		||||
	return child
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// * Used by EnterOuterAlt to toss out a RuleContext previously added as
 | 
			
		||||
// we entered a rule. If we have // label, we will need to remove
 | 
			
		||||
// generic ruleContext object.
 | 
			
		||||
// /
 | 
			
		||||
func (prc *BaseParserRuleContext) RemoveLastChild() {
 | 
			
		||||
	if prc.children != nil && len(prc.children) > 0 {
 | 
			
		||||
		prc.children = prc.children[0 : len(prc.children)-1]
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
 | 
			
		||||
 | 
			
		||||
	node := NewTerminalNodeImpl(token)
 | 
			
		||||
	prc.addTerminalNodeChild(node)
 | 
			
		||||
	node.parentCtx = prc
 | 
			
		||||
	return node
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
 | 
			
		||||
	node := NewErrorNodeImpl(badToken)
 | 
			
		||||
	prc.addTerminalNodeChild(node)
 | 
			
		||||
	node.parentCtx = prc
 | 
			
		||||
	return node
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetChild(i int) Tree {
 | 
			
		||||
	if prc.children != nil && len(prc.children) >= i {
 | 
			
		||||
		return prc.children[i]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
 | 
			
		||||
	if childType == nil {
 | 
			
		||||
		return prc.GetChild(i).(RuleContext)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for j := 0; j < len(prc.children); j++ {
 | 
			
		||||
		child := prc.children[j]
 | 
			
		||||
		if reflect.TypeOf(child) == childType {
 | 
			
		||||
			if i == 0 {
 | 
			
		||||
				return child.(RuleContext)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			i--
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
 | 
			
		||||
	return TreesStringTree(prc, ruleNames, recog)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
 | 
			
		||||
	return prc
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
 | 
			
		||||
	return visitor.VisitChildren(prc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) SetStart(t Token) {
 | 
			
		||||
	prc.start = t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetStart() Token {
 | 
			
		||||
	return prc.start
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) SetStop(t Token) {
 | 
			
		||||
	prc.stop = t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetStop() Token {
 | 
			
		||||
	return prc.stop
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
 | 
			
		||||
 | 
			
		||||
	for j := 0; j < len(prc.children); j++ {
 | 
			
		||||
		child := prc.children[j]
 | 
			
		||||
		if c2, ok := child.(TerminalNode); ok {
 | 
			
		||||
			if c2.GetSymbol().GetTokenType() == ttype {
 | 
			
		||||
				if i == 0 {
 | 
			
		||||
					return c2
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				i--
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
 | 
			
		||||
	if prc.children == nil {
 | 
			
		||||
		return make([]TerminalNode, 0)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tokens := make([]TerminalNode, 0)
 | 
			
		||||
 | 
			
		||||
	for j := 0; j < len(prc.children); j++ {
 | 
			
		||||
		child := prc.children[j]
 | 
			
		||||
		if tchild, ok := child.(TerminalNode); ok {
 | 
			
		||||
			if tchild.GetSymbol().GetTokenType() == ttype {
 | 
			
		||||
				tokens = append(tokens, tchild)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return tokens
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetPayload() interface{} {
 | 
			
		||||
	return prc
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
 | 
			
		||||
	if prc.children == nil || i < 0 || i >= len(prc.children) {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	j := -1 // what element have we found with ctxType?
 | 
			
		||||
	for _, o := range prc.children {
 | 
			
		||||
 | 
			
		||||
		childType := reflect.TypeOf(o)
 | 
			
		||||
 | 
			
		||||
		if childType.Implements(ctxType) {
 | 
			
		||||
			j++
 | 
			
		||||
			if j == i {
 | 
			
		||||
				return o.(RuleContext)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
 | 
			
		||||
// check for convertibility
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
 | 
			
		||||
	return prc.getChild(ctxType, i)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
 | 
			
		||||
	if prc.children == nil {
 | 
			
		||||
		return make([]RuleContext, 0)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	contexts := make([]RuleContext, 0)
 | 
			
		||||
 | 
			
		||||
	for _, child := range prc.children {
 | 
			
		||||
		childType := reflect.TypeOf(child)
 | 
			
		||||
 | 
			
		||||
		if childType.ConvertibleTo(ctxType) {
 | 
			
		||||
			contexts = append(contexts, child.(RuleContext))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return contexts
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetChildCount() int {
 | 
			
		||||
	if prc.children == nil {
 | 
			
		||||
		return 0
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return len(prc.children)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
 | 
			
		||||
	if prc.start == nil || prc.stop == nil {
 | 
			
		||||
		return TreeInvalidInterval
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//need to manage circular dependencies, so export now
 | 
			
		||||
 | 
			
		||||
// Print out a whole tree, not just a node, in LISP format
 | 
			
		||||
// (root child1 .. childN). Print just a node if b is a leaf.
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
 | 
			
		||||
 | 
			
		||||
	var p ParserRuleContext = prc
 | 
			
		||||
	s := "["
 | 
			
		||||
	for p != nil && p != stop {
 | 
			
		||||
		if ruleNames == nil {
 | 
			
		||||
			if !p.IsEmpty() {
 | 
			
		||||
				s += strconv.Itoa(p.GetInvokingState())
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			ri := p.GetRuleIndex()
 | 
			
		||||
			var ruleName string
 | 
			
		||||
			if ri >= 0 && ri < len(ruleNames) {
 | 
			
		||||
				ruleName = ruleNames[ri]
 | 
			
		||||
			} else {
 | 
			
		||||
				ruleName = strconv.Itoa(ri)
 | 
			
		||||
			}
 | 
			
		||||
			s += ruleName
 | 
			
		||||
		}
 | 
			
		||||
		if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
 | 
			
		||||
			s += " "
 | 
			
		||||
		}
 | 
			
		||||
		pi := p.GetParent()
 | 
			
		||||
		if pi != nil {
 | 
			
		||||
			p = pi.(ParserRuleContext)
 | 
			
		||||
		} else {
 | 
			
		||||
			p = nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	s += "]"
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
 | 
			
		||||
 | 
			
		||||
type InterpreterRuleContext interface {
 | 
			
		||||
	ParserRuleContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseInterpreterRuleContext struct {
 | 
			
		||||
	*BaseParserRuleContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
 | 
			
		||||
 | 
			
		||||
	prc := new(BaseInterpreterRuleContext)
 | 
			
		||||
 | 
			
		||||
	prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
 | 
			
		||||
 | 
			
		||||
	prc.RuleIndex = ruleIndex
 | 
			
		||||
 | 
			
		||||
	return prc
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										756
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										756
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,756 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Represents {@code $} in local context prediction, which means wildcard.
 | 
			
		||||
// {@code//+x =//}.
 | 
			
		||||
// /
 | 
			
		||||
const (
 | 
			
		||||
	BasePredictionContextEmptyReturnState = 0x7FFFFFFF
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Represents {@code $} in an array in full context mode, when {@code $}
 | 
			
		||||
// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
 | 
			
		||||
// {@code $} = {@link //EmptyReturnState}.
 | 
			
		||||
// /
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	BasePredictionContextglobalNodeCount = 1
 | 
			
		||||
	BasePredictionContextid              = BasePredictionContextglobalNodeCount
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type PredictionContext interface {
 | 
			
		||||
	hash() int
 | 
			
		||||
	GetParent(int) PredictionContext
 | 
			
		||||
	getReturnState(int) int
 | 
			
		||||
	equals(PredictionContext) bool
 | 
			
		||||
	length() int
 | 
			
		||||
	isEmpty() bool
 | 
			
		||||
	hasEmptyPath() bool
 | 
			
		||||
	String() string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BasePredictionContext struct {
 | 
			
		||||
	cachedHash int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
 | 
			
		||||
	pc := new(BasePredictionContext)
 | 
			
		||||
	pc.cachedHash = cachedHash
 | 
			
		||||
 | 
			
		||||
	return pc
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BasePredictionContext) isEmpty() bool {
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func calculateHash(parent PredictionContext, returnState int) int {
 | 
			
		||||
	h := murmurInit(1)
 | 
			
		||||
	h = murmurUpdate(h, parent.hash())
 | 
			
		||||
	h = murmurUpdate(h, returnState)
 | 
			
		||||
	return murmurFinish(h, 2)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func calculateEmptyHash() int {
 | 
			
		||||
	h := murmurInit(1)
 | 
			
		||||
	return murmurFinish(h, 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Used to cache {@link BasePredictionContext} objects. Its used for the shared
 | 
			
		||||
// context cash associated with contexts in DFA states. This cache
 | 
			
		||||
// can be used for both lexers and parsers.
 | 
			
		||||
 | 
			
		||||
type PredictionContextCache struct {
 | 
			
		||||
	cache map[PredictionContext]PredictionContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPredictionContextCache() *PredictionContextCache {
 | 
			
		||||
	t := new(PredictionContextCache)
 | 
			
		||||
	t.cache = make(map[PredictionContext]PredictionContext)
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Add a context to the cache and return it. If the context already exists,
 | 
			
		||||
// return that one instead and do not add a Newcontext to the cache.
 | 
			
		||||
// Protect shared cache from unsafe thread access.
 | 
			
		||||
//
 | 
			
		||||
func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
 | 
			
		||||
	if ctx == BasePredictionContextEMPTY {
 | 
			
		||||
		return BasePredictionContextEMPTY
 | 
			
		||||
	}
 | 
			
		||||
	existing := p.cache[ctx]
 | 
			
		||||
	if existing != nil {
 | 
			
		||||
		return existing
 | 
			
		||||
	}
 | 
			
		||||
	p.cache[ctx] = ctx
 | 
			
		||||
	return ctx
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
 | 
			
		||||
	return p.cache[ctx]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PredictionContextCache) length() int {
 | 
			
		||||
	return len(p.cache)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type SingletonPredictionContext interface {
 | 
			
		||||
	PredictionContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseSingletonPredictionContext struct {
 | 
			
		||||
	*BasePredictionContext
 | 
			
		||||
 | 
			
		||||
	parentCtx   PredictionContext
 | 
			
		||||
	returnState int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
 | 
			
		||||
 | 
			
		||||
	s := new(BaseSingletonPredictionContext)
 | 
			
		||||
	s.BasePredictionContext = NewBasePredictionContext(37)
 | 
			
		||||
 | 
			
		||||
	if parent != nil {
 | 
			
		||||
		s.cachedHash = calculateHash(parent, returnState)
 | 
			
		||||
	} else {
 | 
			
		||||
		s.cachedHash = calculateEmptyHash()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.parentCtx = parent
 | 
			
		||||
	s.returnState = returnState
 | 
			
		||||
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
 | 
			
		||||
	if returnState == BasePredictionContextEmptyReturnState && parent == nil {
 | 
			
		||||
		// someone can pass in the bits of an array ctx that mean $
 | 
			
		||||
		return BasePredictionContextEMPTY
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return NewBaseSingletonPredictionContext(parent, returnState)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseSingletonPredictionContext) length() int {
 | 
			
		||||
	return 1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
 | 
			
		||||
	return b.parentCtx
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
 | 
			
		||||
	return b.returnState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
 | 
			
		||||
	return b.returnState == BasePredictionContextEmptyReturnState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
 | 
			
		||||
	if b == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else if b.hash() != other.hash() {
 | 
			
		||||
		return false // can't be same if hash is different
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	otherP := other.(*BaseSingletonPredictionContext)
 | 
			
		||||
 | 
			
		||||
	if b.returnState != other.getReturnState(0) {
 | 
			
		||||
		return false
 | 
			
		||||
	} else if b.parentCtx == nil {
 | 
			
		||||
		return otherP.parentCtx == nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return b.parentCtx.equals(otherP.parentCtx)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseSingletonPredictionContext) hash() int {
 | 
			
		||||
	h := murmurInit(1)
 | 
			
		||||
 | 
			
		||||
	if b.parentCtx == nil {
 | 
			
		||||
		return murmurFinish(h, 0)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	h = murmurUpdate(h, b.parentCtx.hash())
 | 
			
		||||
	h = murmurUpdate(h, b.returnState)
 | 
			
		||||
	return murmurFinish(h, 2)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseSingletonPredictionContext) String() string {
 | 
			
		||||
	var up string
 | 
			
		||||
 | 
			
		||||
	if b.parentCtx == nil {
 | 
			
		||||
		up = ""
 | 
			
		||||
	} else {
 | 
			
		||||
		up = b.parentCtx.String()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(up) == 0 {
 | 
			
		||||
		if b.returnState == BasePredictionContextEmptyReturnState {
 | 
			
		||||
			return "$"
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return strconv.Itoa(b.returnState)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return strconv.Itoa(b.returnState) + " " + up
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var BasePredictionContextEMPTY = NewEmptyPredictionContext()
 | 
			
		||||
 | 
			
		||||
type EmptyPredictionContext struct {
 | 
			
		||||
	*BaseSingletonPredictionContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewEmptyPredictionContext() *EmptyPredictionContext {
 | 
			
		||||
 | 
			
		||||
	p := new(EmptyPredictionContext)
 | 
			
		||||
 | 
			
		||||
	p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
 | 
			
		||||
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *EmptyPredictionContext) isEmpty() bool {
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *EmptyPredictionContext) getReturnState(index int) int {
 | 
			
		||||
	return e.returnState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
 | 
			
		||||
	return e == other
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *EmptyPredictionContext) String() string {
 | 
			
		||||
	return "$"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ArrayPredictionContext struct {
 | 
			
		||||
	*BasePredictionContext
 | 
			
		||||
 | 
			
		||||
	parents      []PredictionContext
 | 
			
		||||
	returnStates []int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
 | 
			
		||||
	// Parent can be nil only if full ctx mode and we make an array
 | 
			
		||||
	// from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
 | 
			
		||||
	// nil parent and
 | 
			
		||||
	// returnState == {@link //EmptyReturnState}.
 | 
			
		||||
 | 
			
		||||
	c := new(ArrayPredictionContext)
 | 
			
		||||
	c.BasePredictionContext = NewBasePredictionContext(37)
 | 
			
		||||
 | 
			
		||||
	for i := range parents {
 | 
			
		||||
		c.cachedHash += calculateHash(parents[i], returnStates[i])
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.parents = parents
 | 
			
		||||
	c.returnStates = returnStates
 | 
			
		||||
 | 
			
		||||
	return c
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ArrayPredictionContext) GetReturnStates() []int {
 | 
			
		||||
	return a.returnStates
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ArrayPredictionContext) hasEmptyPath() bool {
 | 
			
		||||
	return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ArrayPredictionContext) isEmpty() bool {
 | 
			
		||||
	// since EmptyReturnState can only appear in the last position, we
 | 
			
		||||
	// don't need to verify that size==1
 | 
			
		||||
	return a.returnStates[0] == BasePredictionContextEmptyReturnState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ArrayPredictionContext) length() int {
 | 
			
		||||
	return len(a.returnStates)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
 | 
			
		||||
	return a.parents[index]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ArrayPredictionContext) getReturnState(index int) int {
 | 
			
		||||
	return a.returnStates[index]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
 | 
			
		||||
	if _, ok := other.(*ArrayPredictionContext); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else if a.cachedHash != other.hash() {
 | 
			
		||||
		return false // can't be same if hash is different
 | 
			
		||||
	} else {
 | 
			
		||||
		otherP := other.(*ArrayPredictionContext)
 | 
			
		||||
		return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ArrayPredictionContext) hash() int {
 | 
			
		||||
	h := murmurInit(1)
 | 
			
		||||
 | 
			
		||||
	for _, p := range a.parents {
 | 
			
		||||
		h = murmurUpdate(h, p.hash())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, r := range a.returnStates {
 | 
			
		||||
		h = murmurUpdate(h, r)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return murmurFinish(h, 2 * len(a.parents))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *ArrayPredictionContext) String() string {
 | 
			
		||||
	if a.isEmpty() {
 | 
			
		||||
		return "[]"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s := "["
 | 
			
		||||
	for i := 0; i < len(a.returnStates); i++ {
 | 
			
		||||
		if i > 0 {
 | 
			
		||||
			s = s + ", "
 | 
			
		||||
		}
 | 
			
		||||
		if a.returnStates[i] == BasePredictionContextEmptyReturnState {
 | 
			
		||||
			s = s + "$"
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		s = s + strconv.Itoa(a.returnStates[i])
 | 
			
		||||
		if a.parents[i] != nil {
 | 
			
		||||
			s = s + " " + a.parents[i].String()
 | 
			
		||||
		} else {
 | 
			
		||||
			s = s + "nil"
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return s + "]"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
 | 
			
		||||
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
 | 
			
		||||
// /
 | 
			
		||||
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
 | 
			
		||||
	if outerContext == nil {
 | 
			
		||||
		outerContext = RuleContextEmpty
 | 
			
		||||
	}
 | 
			
		||||
	// if we are in RuleContext of start rule, s, then BasePredictionContext
 | 
			
		||||
	// is EMPTY. Nobody called us. (if we are empty, return empty)
 | 
			
		||||
	if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
 | 
			
		||||
		return BasePredictionContextEMPTY
 | 
			
		||||
	}
 | 
			
		||||
	// If we have a parent, convert it to a BasePredictionContext graph
 | 
			
		||||
	parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
 | 
			
		||||
	state := a.states[outerContext.GetInvokingState()]
 | 
			
		||||
	transition := state.GetTransitions()[0]
 | 
			
		||||
 | 
			
		||||
	return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
 | 
			
		||||
	// share same graph if both same
 | 
			
		||||
	if a == b {
 | 
			
		||||
		return a
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ac, ok1 := a.(*BaseSingletonPredictionContext)
 | 
			
		||||
	bc, ok2 := b.(*BaseSingletonPredictionContext)
 | 
			
		||||
 | 
			
		||||
	if ok1 && ok2 {
 | 
			
		||||
		return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
 | 
			
		||||
	}
 | 
			
		||||
	// At least one of a or b is array
 | 
			
		||||
	// If one is $ and rootIsWildcard, return $ as// wildcard
 | 
			
		||||
	if rootIsWildcard {
 | 
			
		||||
		if _, ok := a.(*EmptyPredictionContext); ok {
 | 
			
		||||
			return a
 | 
			
		||||
		}
 | 
			
		||||
		if _, ok := b.(*EmptyPredictionContext); ok {
 | 
			
		||||
			return b
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// convert singleton so both are arrays to normalize
 | 
			
		||||
	if _, ok := a.(*BaseSingletonPredictionContext); ok {
 | 
			
		||||
		a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
 | 
			
		||||
	}
 | 
			
		||||
	if _, ok := b.(*BaseSingletonPredictionContext); ok {
 | 
			
		||||
		b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
 | 
			
		||||
	}
 | 
			
		||||
	return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Merge two {@link SingletonBasePredictionContext} instances.
 | 
			
		||||
//
 | 
			
		||||
// <p>Stack tops equal, parents merge is same return left graph.<br>
 | 
			
		||||
// <embed src="images/SingletonMerge_SameRootSamePar.svg"
 | 
			
		||||
// type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Same stack top, parents differ merge parents giving array node, then
 | 
			
		||||
// remainders of those graphs. A Newroot node is created to point to the
 | 
			
		||||
// merged parents.<br>
 | 
			
		||||
// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
 | 
			
		||||
// type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Different stack tops pointing to same parent. Make array node for the
 | 
			
		||||
// root where both element in the root point to the same (original)
 | 
			
		||||
// parent.<br>
 | 
			
		||||
// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
 | 
			
		||||
// type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Different stack tops pointing to different parents. Make array node for
 | 
			
		||||
// the root where each element points to the corresponding original
 | 
			
		||||
// parent.<br>
 | 
			
		||||
// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
 | 
			
		||||
// type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// @param a the first {@link SingletonBasePredictionContext}
 | 
			
		||||
// @param b the second {@link SingletonBasePredictionContext}
 | 
			
		||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
 | 
			
		||||
// otherwise false to indicate a full-context merge
 | 
			
		||||
// @param mergeCache
 | 
			
		||||
// /
 | 
			
		||||
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
 | 
			
		||||
	if mergeCache != nil {
 | 
			
		||||
		previous := mergeCache.Get(a.hash(), b.hash())
 | 
			
		||||
		if previous != nil {
 | 
			
		||||
			return previous.(PredictionContext)
 | 
			
		||||
		}
 | 
			
		||||
		previous = mergeCache.Get(b.hash(), a.hash())
 | 
			
		||||
		if previous != nil {
 | 
			
		||||
			return previous.(PredictionContext)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rootMerge := mergeRoot(a, b, rootIsWildcard)
 | 
			
		||||
	if rootMerge != nil {
 | 
			
		||||
		if mergeCache != nil {
 | 
			
		||||
			mergeCache.set(a.hash(), b.hash(), rootMerge)
 | 
			
		||||
		}
 | 
			
		||||
		return rootMerge
 | 
			
		||||
	}
 | 
			
		||||
	if a.returnState == b.returnState {
 | 
			
		||||
		parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
 | 
			
		||||
		// if parent is same as existing a or b parent or reduced to a parent,
 | 
			
		||||
		// return it
 | 
			
		||||
		if parent == a.parentCtx {
 | 
			
		||||
			return a // ax + bx = ax, if a=b
 | 
			
		||||
		}
 | 
			
		||||
		if parent == b.parentCtx {
 | 
			
		||||
			return b // ax + bx = bx, if a=b
 | 
			
		||||
		}
 | 
			
		||||
		// else: ax + ay = a'[x,y]
 | 
			
		||||
		// merge parents x and y, giving array node with x,y then remainders
 | 
			
		||||
		// of those graphs. dup a, a' points at merged array
 | 
			
		||||
		// Newjoined parent so create Newsingleton pointing to it, a'
 | 
			
		||||
		spc := SingletonBasePredictionContextCreate(parent, a.returnState)
 | 
			
		||||
		if mergeCache != nil {
 | 
			
		||||
			mergeCache.set(a.hash(), b.hash(), spc)
 | 
			
		||||
		}
 | 
			
		||||
		return spc
 | 
			
		||||
	}
 | 
			
		||||
	// a != b payloads differ
 | 
			
		||||
	// see if we can collapse parents due to $+x parents if local ctx
 | 
			
		||||
	var singleParent PredictionContext
 | 
			
		||||
	if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
 | 
			
		||||
		// bx =
 | 
			
		||||
		// [a,b]x
 | 
			
		||||
		singleParent = a.parentCtx
 | 
			
		||||
	}
 | 
			
		||||
	if singleParent != nil { // parents are same
 | 
			
		||||
		// sort payloads and use same parent
 | 
			
		||||
		payloads := []int{a.returnState, b.returnState}
 | 
			
		||||
		if a.returnState > b.returnState {
 | 
			
		||||
			payloads[0] = b.returnState
 | 
			
		||||
			payloads[1] = a.returnState
 | 
			
		||||
		}
 | 
			
		||||
		parents := []PredictionContext{singleParent, singleParent}
 | 
			
		||||
		apc := NewArrayPredictionContext(parents, payloads)
 | 
			
		||||
		if mergeCache != nil {
 | 
			
		||||
			mergeCache.set(a.hash(), b.hash(), apc)
 | 
			
		||||
		}
 | 
			
		||||
		return apc
 | 
			
		||||
	}
 | 
			
		||||
	// parents differ and can't merge them. Just pack together
 | 
			
		||||
	// into array can't merge.
 | 
			
		||||
	// ax + by = [ax,by]
 | 
			
		||||
	payloads := []int{a.returnState, b.returnState}
 | 
			
		||||
	parents := []PredictionContext{a.parentCtx, b.parentCtx}
 | 
			
		||||
	if a.returnState > b.returnState { // sort by payload
 | 
			
		||||
		payloads[0] = b.returnState
 | 
			
		||||
		payloads[1] = a.returnState
 | 
			
		||||
		parents = []PredictionContext{b.parentCtx, a.parentCtx}
 | 
			
		||||
	}
 | 
			
		||||
	apc := NewArrayPredictionContext(parents, payloads)
 | 
			
		||||
	if mergeCache != nil {
 | 
			
		||||
		mergeCache.set(a.hash(), b.hash(), apc)
 | 
			
		||||
	}
 | 
			
		||||
	return apc
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Handle case where at least one of {@code a} or {@code b} is
 | 
			
		||||
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
 | 
			
		||||
// to represent {@link //EMPTY}.
 | 
			
		||||
//
 | 
			
		||||
// <h2>Local-Context Merges</h2>
 | 
			
		||||
//
 | 
			
		||||
// <p>These local-context merge operations are used when {@code rootIsWildcard}
 | 
			
		||||
// is true.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>{@link //EMPTY} is superset of any graph return {@link //EMPTY}.<br>
 | 
			
		||||
// <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
 | 
			
		||||
// {@code //EMPTY} return left graph.<br>
 | 
			
		||||
// <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Special case of last merge if local context.<br>
 | 
			
		||||
// <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <h2>Full-Context Merges</h2>
 | 
			
		||||
//
 | 
			
		||||
// <p>These full-context merge operations are used when {@code rootIsWildcard}
 | 
			
		||||
// is false.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Must keep all contexts {@link //EMPTY} in array is a special value (and
 | 
			
		||||
// nil parent).<br>
 | 
			
		||||
// <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// @param a the first {@link SingletonBasePredictionContext}
 | 
			
		||||
// @param b the second {@link SingletonBasePredictionContext}
 | 
			
		||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
 | 
			
		||||
// otherwise false to indicate a full-context merge
 | 
			
		||||
// /
 | 
			
		||||
func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
 | 
			
		||||
	if rootIsWildcard {
 | 
			
		||||
		if a == BasePredictionContextEMPTY {
 | 
			
		||||
			return BasePredictionContextEMPTY // // + b =//
 | 
			
		||||
		}
 | 
			
		||||
		if b == BasePredictionContextEMPTY {
 | 
			
		||||
			return BasePredictionContextEMPTY // a +// =//
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
 | 
			
		||||
			return BasePredictionContextEMPTY // $ + $ = $
 | 
			
		||||
		} else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
 | 
			
		||||
			payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
 | 
			
		||||
			parents := []PredictionContext{b.GetParent(-1), nil}
 | 
			
		||||
			return NewArrayPredictionContext(parents, payloads)
 | 
			
		||||
		} else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
 | 
			
		||||
			payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
 | 
			
		||||
			parents := []PredictionContext{a.GetParent(-1), nil}
 | 
			
		||||
			return NewArrayPredictionContext(parents, payloads)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Merge two {@link ArrayBasePredictionContext} instances.
 | 
			
		||||
//
 | 
			
		||||
// <p>Different tops, different parents.<br>
 | 
			
		||||
// <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Shared top, same parents.<br>
 | 
			
		||||
// <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Shared top, different parents.<br>
 | 
			
		||||
// <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Shared top, all shared parents.<br>
 | 
			
		||||
// <embed src="images/ArrayMerge_ShareTopSharePar.svg"
 | 
			
		||||
// type="image/svg+xml"/></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Equal tops, merge parents and reduce top to
 | 
			
		||||
// {@link SingletonBasePredictionContext}.<br>
 | 
			
		||||
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
 | 
			
		||||
// /
 | 
			
		||||
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
 | 
			
		||||
	if mergeCache != nil {
 | 
			
		||||
		previous := mergeCache.Get(a.hash(), b.hash())
 | 
			
		||||
		if previous != nil {
 | 
			
		||||
			return previous.(PredictionContext)
 | 
			
		||||
		}
 | 
			
		||||
		previous = mergeCache.Get(b.hash(), a.hash())
 | 
			
		||||
		if previous != nil {
 | 
			
		||||
			return previous.(PredictionContext)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// merge sorted payloads a + b => M
 | 
			
		||||
	i := 0 // walks a
 | 
			
		||||
	j := 0 // walks b
 | 
			
		||||
	k := 0 // walks target M array
 | 
			
		||||
 | 
			
		||||
	mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
 | 
			
		||||
	mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
 | 
			
		||||
	// walk and merge to yield mergedParents, mergedReturnStates
 | 
			
		||||
	for i < len(a.returnStates) && j < len(b.returnStates) {
 | 
			
		||||
		aParent := a.parents[i]
 | 
			
		||||
		bParent := b.parents[j]
 | 
			
		||||
		if a.returnStates[i] == b.returnStates[j] {
 | 
			
		||||
			// same payload (stack tops are equal), must yield merged singleton
 | 
			
		||||
			payload := a.returnStates[i]
 | 
			
		||||
			// $+$ = $
 | 
			
		||||
			bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
 | 
			
		||||
			axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
 | 
			
		||||
			// ->
 | 
			
		||||
			// ax
 | 
			
		||||
			if bothDollars || axAX {
 | 
			
		||||
				mergedParents[k] = aParent // choose left
 | 
			
		||||
				mergedReturnStates[k] = payload
 | 
			
		||||
			} else { // ax+ay -> a'[x,y]
 | 
			
		||||
				mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
 | 
			
		||||
				mergedParents[k] = mergedParent
 | 
			
		||||
				mergedReturnStates[k] = payload
 | 
			
		||||
			}
 | 
			
		||||
			i++ // hop over left one as usual
 | 
			
		||||
			j++ // but also Skip one in right side since we merge
 | 
			
		||||
		} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
 | 
			
		||||
			mergedParents[k] = aParent
 | 
			
		||||
			mergedReturnStates[k] = a.returnStates[i]
 | 
			
		||||
			i++
 | 
			
		||||
		} else { // b > a, copy b[j] to M
 | 
			
		||||
			mergedParents[k] = bParent
 | 
			
		||||
			mergedReturnStates[k] = b.returnStates[j]
 | 
			
		||||
			j++
 | 
			
		||||
		}
 | 
			
		||||
		k++
 | 
			
		||||
	}
 | 
			
		||||
	// copy over any payloads remaining in either array
 | 
			
		||||
	if i < len(a.returnStates) {
 | 
			
		||||
		for p := i; p < len(a.returnStates); p++ {
 | 
			
		||||
			mergedParents[k] = a.parents[p]
 | 
			
		||||
			mergedReturnStates[k] = a.returnStates[p]
 | 
			
		||||
			k++
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		for p := j; p < len(b.returnStates); p++ {
 | 
			
		||||
			mergedParents[k] = b.parents[p]
 | 
			
		||||
			mergedReturnStates[k] = b.returnStates[p]
 | 
			
		||||
			k++
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// trim merged if we combined a few that had same stack tops
 | 
			
		||||
	if k < len(mergedParents) { // write index < last position trim
 | 
			
		||||
		if k == 1 { // for just one merged element, return singleton top
 | 
			
		||||
			pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
 | 
			
		||||
			if mergeCache != nil {
 | 
			
		||||
				mergeCache.set(a.hash(), b.hash(), pc)
 | 
			
		||||
			}
 | 
			
		||||
			return pc
 | 
			
		||||
		}
 | 
			
		||||
		mergedParents = mergedParents[0:k]
 | 
			
		||||
		mergedReturnStates = mergedReturnStates[0:k]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
 | 
			
		||||
 | 
			
		||||
	// if we created same array as a or b, return that instead
 | 
			
		||||
	// TODO: track whether this is possible above during merge sort for speed
 | 
			
		||||
	if M == a {
 | 
			
		||||
		if mergeCache != nil {
 | 
			
		||||
			mergeCache.set(a.hash(), b.hash(), a)
 | 
			
		||||
		}
 | 
			
		||||
		return a
 | 
			
		||||
	}
 | 
			
		||||
	if M == b {
 | 
			
		||||
		if mergeCache != nil {
 | 
			
		||||
			mergeCache.set(a.hash(), b.hash(), b)
 | 
			
		||||
		}
 | 
			
		||||
		return b
 | 
			
		||||
	}
 | 
			
		||||
	combineCommonParents(mergedParents)
 | 
			
		||||
 | 
			
		||||
	if mergeCache != nil {
 | 
			
		||||
		mergeCache.set(a.hash(), b.hash(), M)
 | 
			
		||||
	}
 | 
			
		||||
	return M
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Make pass over all <em>M</em> {@code parents} merge any {@code equals()}
 | 
			
		||||
// ones.
 | 
			
		||||
// /
 | 
			
		||||
func combineCommonParents(parents []PredictionContext) {
 | 
			
		||||
	uniqueParents := make(map[PredictionContext]PredictionContext)
 | 
			
		||||
 | 
			
		||||
	for p := 0; p < len(parents); p++ {
 | 
			
		||||
		parent := parents[p]
 | 
			
		||||
		if uniqueParents[parent] == nil {
 | 
			
		||||
			uniqueParents[parent] = parent
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	for q := 0; q < len(parents); q++ {
 | 
			
		||||
		parents[q] = uniqueParents[parents[q]]
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
 | 
			
		||||
 | 
			
		||||
	if context.isEmpty() {
 | 
			
		||||
		return context
 | 
			
		||||
	}
 | 
			
		||||
	existing := visited[context]
 | 
			
		||||
	if existing != nil {
 | 
			
		||||
		return existing
 | 
			
		||||
	}
 | 
			
		||||
	existing = contextCache.Get(context)
 | 
			
		||||
	if existing != nil {
 | 
			
		||||
		visited[context] = existing
 | 
			
		||||
		return existing
 | 
			
		||||
	}
 | 
			
		||||
	changed := false
 | 
			
		||||
	parents := make([]PredictionContext, context.length())
 | 
			
		||||
	for i := 0; i < len(parents); i++ {
 | 
			
		||||
		parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
 | 
			
		||||
		if changed || parent != context.GetParent(i) {
 | 
			
		||||
			if !changed {
 | 
			
		||||
				parents = make([]PredictionContext, context.length())
 | 
			
		||||
				for j := 0; j < context.length(); j++ {
 | 
			
		||||
					parents[j] = context.GetParent(j)
 | 
			
		||||
				}
 | 
			
		||||
				changed = true
 | 
			
		||||
			}
 | 
			
		||||
			parents[i] = parent
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if !changed {
 | 
			
		||||
		contextCache.add(context)
 | 
			
		||||
		visited[context] = context
 | 
			
		||||
		return context
 | 
			
		||||
	}
 | 
			
		||||
	var updated PredictionContext
 | 
			
		||||
	if len(parents) == 0 {
 | 
			
		||||
		updated = BasePredictionContextEMPTY
 | 
			
		||||
	} else if len(parents) == 1 {
 | 
			
		||||
		updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
 | 
			
		||||
	} else {
 | 
			
		||||
		updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
 | 
			
		||||
	}
 | 
			
		||||
	contextCache.add(updated)
 | 
			
		||||
	visited[updated] = updated
 | 
			
		||||
	visited[context] = updated
 | 
			
		||||
 | 
			
		||||
	return updated
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										553
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										553
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,553 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
// This enumeration defines the prediction modes available in ANTLR 4 along with
 | 
			
		||||
// utility methods for analyzing configuration sets for conflicts and/or
 | 
			
		||||
// ambiguities.
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	//
 | 
			
		||||
	// The SLL(*) prediction mode. This prediction mode ignores the current
 | 
			
		||||
	// parser context when making predictions. This is the fastest prediction
 | 
			
		||||
	// mode, and provides correct results for many grammars. This prediction
 | 
			
		||||
	// mode is more powerful than the prediction mode provided by ANTLR 3, but
 | 
			
		||||
	// may result in syntax errors for grammar and input combinations which are
 | 
			
		||||
	// not SLL.
 | 
			
		||||
	//
 | 
			
		||||
	// <p>
 | 
			
		||||
	// When using this prediction mode, the parser will either return a correct
 | 
			
		||||
	// parse tree (i.e. the same parse tree that would be returned with the
 | 
			
		||||
	// {@link //LL} prediction mode), or it will Report a syntax error. If a
 | 
			
		||||
	// syntax error is encountered when using the {@link //SLL} prediction mode,
 | 
			
		||||
	// it may be due to either an actual syntax error in the input or indicate
 | 
			
		||||
	// that the particular combination of grammar and input requires the more
 | 
			
		||||
	// powerful {@link //LL} prediction abilities to complete successfully.</p>
 | 
			
		||||
	//
 | 
			
		||||
	// <p>
 | 
			
		||||
	// This prediction mode does not provide any guarantees for prediction
 | 
			
		||||
	// behavior for syntactically-incorrect inputs.</p>
 | 
			
		||||
	//
 | 
			
		||||
	PredictionModeSLL = 0
 | 
			
		||||
	//
 | 
			
		||||
	// The LL(*) prediction mode. This prediction mode allows the current parser
 | 
			
		||||
	// context to be used for resolving SLL conflicts that occur during
 | 
			
		||||
	// prediction. This is the fastest prediction mode that guarantees correct
 | 
			
		||||
	// parse results for all combinations of grammars with syntactically correct
 | 
			
		||||
	// inputs.
 | 
			
		||||
	//
 | 
			
		||||
	// <p>
 | 
			
		||||
	// When using this prediction mode, the parser will make correct decisions
 | 
			
		||||
	// for all syntactically-correct grammar and input combinations. However, in
 | 
			
		||||
	// cases where the grammar is truly ambiguous this prediction mode might not
 | 
			
		||||
	// Report a precise answer for <em>exactly which</em> alternatives are
 | 
			
		||||
	// ambiguous.</p>
 | 
			
		||||
	//
 | 
			
		||||
	// <p>
 | 
			
		||||
	// This prediction mode does not provide any guarantees for prediction
 | 
			
		||||
	// behavior for syntactically-incorrect inputs.</p>
 | 
			
		||||
	//
 | 
			
		||||
	PredictionModeLL = 1
 | 
			
		||||
	//
 | 
			
		||||
	// The LL(*) prediction mode with exact ambiguity detection. In addition to
 | 
			
		||||
	// the correctness guarantees provided by the {@link //LL} prediction mode,
 | 
			
		||||
	// this prediction mode instructs the prediction algorithm to determine the
 | 
			
		||||
	// complete and exact set of ambiguous alternatives for every ambiguous
 | 
			
		||||
	// decision encountered while parsing.
 | 
			
		||||
	//
 | 
			
		||||
	// <p>
 | 
			
		||||
	// This prediction mode may be used for diagnosing ambiguities during
 | 
			
		||||
	// grammar development. Due to the performance overhead of calculating sets
 | 
			
		||||
	// of ambiguous alternatives, this prediction mode should be avoided when
 | 
			
		||||
	// the exact results are not necessary.</p>
 | 
			
		||||
	//
 | 
			
		||||
	// <p>
 | 
			
		||||
	// This prediction mode does not provide any guarantees for prediction
 | 
			
		||||
	// behavior for syntactically-incorrect inputs.</p>
 | 
			
		||||
	//
 | 
			
		||||
	PredictionModeLLExactAmbigDetection = 2
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Computes the SLL prediction termination condition.
 | 
			
		||||
//
 | 
			
		||||
// <p>
 | 
			
		||||
// This method computes the SLL prediction termination condition for both of
 | 
			
		||||
// the following cases.</p>
 | 
			
		||||
//
 | 
			
		||||
// <ul>
 | 
			
		||||
// <li>The usual SLL+LL fallback upon SLL conflict</li>
 | 
			
		||||
// <li>Pure SLL without LL fallback</li>
 | 
			
		||||
// </ul>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>COMBINED SLL+LL PARSING</strong></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
 | 
			
		||||
// ensured regardless of how the termination condition is computed by this
 | 
			
		||||
// method. Due to the substantially higher cost of LL prediction, the
 | 
			
		||||
// prediction should only fall back to LL when the additional lookahead
 | 
			
		||||
// cannot lead to a unique SLL prediction.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
 | 
			
		||||
// conflicting subsets should fall back to full LL, even if the
 | 
			
		||||
// configuration sets don't resolve to the same alternative (e.g.
 | 
			
		||||
// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
 | 
			
		||||
// configuration, SLL could continue with the hopes that more lookahead will
 | 
			
		||||
// resolve via one of those non-conflicting configurations.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
 | 
			
		||||
// stops when it sees only conflicting configuration subsets. In contrast,
 | 
			
		||||
// full LL keeps going when there is uncertainty.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>HEURISTIC</strong></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>As a heuristic, we stop prediction when we see any conflicting subset
 | 
			
		||||
// unless we see a state that only has one alternative associated with it.
 | 
			
		||||
// The single-alt-state thing lets prediction continue upon rules like
 | 
			
		||||
// (otherwise, it would admit defeat too soon):</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>When the ATN simulation reaches the state before {@code ''}, it has a
 | 
			
		||||
// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
 | 
			
		||||
// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
 | 
			
		||||
// processing this node because alternative to has another way to continue,
 | 
			
		||||
// via {@code [6|2|[]]}.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>It also let's us continue for this rule:</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>After Matching input A, we reach the stop state for rule A, state 1.
 | 
			
		||||
// State 8 is the state right before B. Clearly alternatives 1 and 2
 | 
			
		||||
// conflict and no amount of further lookahead will separate the two.
 | 
			
		||||
// However, alternative 3 will be able to continue and so we do not stop
 | 
			
		||||
// working on this state. In the previous example, we're concerned with
 | 
			
		||||
// states associated with the conflicting alternatives. Here alt 3 is not
 | 
			
		||||
// associated with the conflicting configs, but since we can continue
 | 
			
		||||
// looking for input reasonably, don't declare the state done.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>PURE SLL PARSING</strong></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>To handle pure SLL parsing, all we have to do is make sure that we
 | 
			
		||||
// combine stack contexts for configurations that differ only by semantic
 | 
			
		||||
// predicate. From there, we can do the usual SLL termination heuristic.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>SLL decisions don't evaluate predicates until after they reach DFA stop
 | 
			
		||||
// states because they need to create the DFA cache that works in all
 | 
			
		||||
// semantic situations. In contrast, full LL evaluates predicates collected
 | 
			
		||||
// during start state computation so it can ignore predicates thereafter.
 | 
			
		||||
// This means that SLL termination detection can totally ignore semantic
 | 
			
		||||
// predicates.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
 | 
			
		||||
// semantic predicate contexts so we might see two configurations like the
 | 
			
		||||
// following.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Before testing these configurations against others, we have to merge
 | 
			
		||||
// {@code x} and {@code x'} (without modifying the existing configurations).
 | 
			
		||||
// For example, we test {@code (x+x')==x''} when looking for conflicts in
 | 
			
		||||
// the following configurations.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>If the configuration set has predicates (as indicated by
 | 
			
		||||
// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
 | 
			
		||||
// the configurations to strip out all of the predicates so that a standard
 | 
			
		||||
// {@link ATNConfigSet} will merge everything ignoring predicates.</p>
 | 
			
		||||
//
 | 
			
		||||
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
 | 
			
		||||
	// Configs in rule stop states indicate reaching the end of the decision
 | 
			
		||||
	// rule (local context) or end of start rule (full context). If all
 | 
			
		||||
	// configs meet this condition, then none of the configurations is able
 | 
			
		||||
	// to Match additional input so we terminate prediction.
 | 
			
		||||
	//
 | 
			
		||||
	if PredictionModeallConfigsInRuleStopStates(configs) {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	// pure SLL mode parsing
 | 
			
		||||
	if mode == PredictionModeSLL {
 | 
			
		||||
		// Don't bother with combining configs from different semantic
 | 
			
		||||
		// contexts if we can fail over to full LL costs more time
 | 
			
		||||
		// since we'll often fail over anyway.
 | 
			
		||||
		if configs.HasSemanticContext() {
 | 
			
		||||
			// dup configs, tossing out semantic predicates
 | 
			
		||||
			dup := NewBaseATNConfigSet(false)
 | 
			
		||||
			for _, c := range configs.GetItems() {
 | 
			
		||||
 | 
			
		||||
				//				NewBaseATNConfig({semanticContext:}, c)
 | 
			
		||||
				c = NewBaseATNConfig2(c, SemanticContextNone)
 | 
			
		||||
				dup.Add(c, nil)
 | 
			
		||||
			}
 | 
			
		||||
			configs = dup
 | 
			
		||||
		}
 | 
			
		||||
		// now we have combined contexts for configs with dissimilar preds
 | 
			
		||||
	}
 | 
			
		||||
	// pure SLL or combined SLL+LL mode parsing
 | 
			
		||||
	altsets := PredictionModegetConflictingAltSubsets(configs)
 | 
			
		||||
	return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Checks if any configuration in {@code configs} is in a
 | 
			
		||||
// {@link RuleStopState}. Configurations meeting this condition have reached
 | 
			
		||||
// the end of the decision rule (local context) or end of start rule (full
 | 
			
		||||
// context).
 | 
			
		||||
//
 | 
			
		||||
// @param configs the configuration set to test
 | 
			
		||||
// @return {@code true} if any configuration in {@code configs} is in a
 | 
			
		||||
// {@link RuleStopState}, otherwise {@code false}
 | 
			
		||||
func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
 | 
			
		||||
	for _, c := range configs.GetItems() {
 | 
			
		||||
		if _, ok := c.GetState().(*RuleStopState); ok {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Checks if all configurations in {@code configs} are in a
 | 
			
		||||
// {@link RuleStopState}. Configurations meeting this condition have reached
 | 
			
		||||
// the end of the decision rule (local context) or end of start rule (full
 | 
			
		||||
// context).
 | 
			
		||||
//
 | 
			
		||||
// @param configs the configuration set to test
 | 
			
		||||
// @return {@code true} if all configurations in {@code configs} are in a
 | 
			
		||||
// {@link RuleStopState}, otherwise {@code false}
 | 
			
		||||
func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
 | 
			
		||||
 | 
			
		||||
	for _, c := range configs.GetItems() {
 | 
			
		||||
		if _, ok := c.GetState().(*RuleStopState); !ok {
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Full LL prediction termination.
 | 
			
		||||
//
 | 
			
		||||
// <p>Can we stop looking ahead during ATN simulation or is there some
 | 
			
		||||
// uncertainty as to which alternative we will ultimately pick, after
 | 
			
		||||
// consuming more input? Even if there are partial conflicts, we might know
 | 
			
		||||
// that everything is going to resolve to the same minimum alternative. That
 | 
			
		||||
// means we can stop since no more lookahead will change that fact. On the
 | 
			
		||||
// other hand, there might be multiple conflicts that resolve to different
 | 
			
		||||
// minimums. That means we need more look ahead to decide which of those
 | 
			
		||||
// alternatives we should predict.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>The basic idea is to split the set of configurations {@code C}, into
 | 
			
		||||
// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
 | 
			
		||||
// non-conflicting configurations. Two configurations conflict if they have
 | 
			
		||||
// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
 | 
			
		||||
// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
 | 
			
		||||
// and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Reduce these configuration subsets to the set of possible alternatives.
 | 
			
		||||
// You can compute the alternative subsets in one pass as follows:</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
 | 
			
		||||
// {@code C} holding {@code s} and {@code ctx} fixed.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
 | 
			
		||||
//
 | 
			
		||||
// <pre>
 | 
			
		||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
 | 
			
		||||
// alt and not pred
 | 
			
		||||
// </pre>
 | 
			
		||||
//
 | 
			
		||||
// <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
 | 
			
		||||
// {@code s} and {@code ctx}.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
 | 
			
		||||
// the union of these alternative subsets is a singleton, then no amount of
 | 
			
		||||
// more lookahead will help us. We will always pick that alternative. If,
 | 
			
		||||
// however, there is more than one alternative, then we are uncertain which
 | 
			
		||||
// alternative to predict and must continue looking for resolution. We may
 | 
			
		||||
// or may not discover an ambiguity in the future, even if there are no
 | 
			
		||||
// conflicting subsets this round.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>The biggest sin is to terminate early because it means we've made a
 | 
			
		||||
// decision but were uncertain as to the eventual outcome. We haven't used
 | 
			
		||||
// enough lookahead. On the other hand, announcing a conflict too late is no
 | 
			
		||||
// big deal you will still have the conflict. It's just inefficient. It
 | 
			
		||||
// might even look until the end of file.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>No special consideration for semantic predicates is required because
 | 
			
		||||
// predicates are evaluated on-the-fly for full LL prediction, ensuring that
 | 
			
		||||
// no configuration contains a semantic context during the termination
 | 
			
		||||
// check.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>CONFLICTING CONFIGS</strong></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
 | 
			
		||||
// when {@code i!=j} but {@code x=x'}. Because we merge all
 | 
			
		||||
// {@code (s, i, _)} configurations together, that means that there are at
 | 
			
		||||
// most {@code n} configurations associated with state {@code s} for
 | 
			
		||||
// {@code n} possible alternatives in the decision. The merged stacks
 | 
			
		||||
// complicate the comparison of configuration contexts {@code x} and
 | 
			
		||||
// {@code x'}. Sam checks to see if one is a subset of the other by calling
 | 
			
		||||
// merge and checking to see if the merged result is either {@code x} or
 | 
			
		||||
// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
 | 
			
		||||
// is the superset, then {@code i} is the only possible prediction since the
 | 
			
		||||
// others resolve to {@code min(i)} as well. However, if {@code x} is
 | 
			
		||||
// associated with {@code j>i} then at least one stack configuration for
 | 
			
		||||
// {@code j} is not in conflict with alternative {@code i}. The algorithm
 | 
			
		||||
// should keep going, looking for more lookahead due to the uncertainty.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>For simplicity, I'm doing a equality check between {@code x} and
 | 
			
		||||
// {@code x'} that lets the algorithm continue to consume lookahead longer
 | 
			
		||||
// than necessary. The reason I like the equality is of course the
 | 
			
		||||
// simplicity but also because that is the test you need to detect the
 | 
			
		||||
// alternatives that are actually in conflict.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>CONTINUE/STOP RULE</strong></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>Continue if union of resolved alternative sets from non-conflicting and
 | 
			
		||||
// conflicting alternative subsets has more than one alternative. We are
 | 
			
		||||
// uncertain about which alternative to predict.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
 | 
			
		||||
// alternatives are still in the running for the amount of input we've
 | 
			
		||||
// consumed at this point. The conflicting sets let us to strip away
 | 
			
		||||
// configurations that won't lead to more states because we resolve
 | 
			
		||||
// conflicts to the configuration with a minimum alternate for the
 | 
			
		||||
// conflicting set.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>CASES</strong></p>
 | 
			
		||||
//
 | 
			
		||||
// <ul>
 | 
			
		||||
//
 | 
			
		||||
// <li>no conflicts and more than 1 alternative in set => continue</li>
 | 
			
		||||
//
 | 
			
		||||
// <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
 | 
			
		||||
// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
 | 
			
		||||
// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
 | 
			
		||||
// {@code {1,3}} => continue
 | 
			
		||||
// </li>
 | 
			
		||||
//
 | 
			
		||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
 | 
			
		||||
// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
 | 
			
		||||
// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
 | 
			
		||||
// {@code {1}} => stop and predict 1</li>
 | 
			
		||||
//
 | 
			
		||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
 | 
			
		||||
// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
 | 
			
		||||
// {@code {1}} = {@code {1}} => stop and predict 1, can announce
 | 
			
		||||
// ambiguity {@code {1,2}}</li>
 | 
			
		||||
//
 | 
			
		||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
 | 
			
		||||
// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
 | 
			
		||||
// {@code {2}} = {@code {1,2}} => continue</li>
 | 
			
		||||
//
 | 
			
		||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
 | 
			
		||||
// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
 | 
			
		||||
// {@code {3}} = {@code {1,3}} => continue</li>
 | 
			
		||||
//
 | 
			
		||||
// </ul>
 | 
			
		||||
//
 | 
			
		||||
// <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
 | 
			
		||||
//
 | 
			
		||||
// <p>If all states Report the same conflicting set of alternatives, then we
 | 
			
		||||
// know we have the exact ambiguity set.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p><code>|A_<em>i</em>|>1</code> and
 | 
			
		||||
// <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
 | 
			
		||||
//
 | 
			
		||||
// <p>In other words, we continue examining lookahead until all {@code A_i}
 | 
			
		||||
// have more than one alternative and all {@code A_i} are the same. If
 | 
			
		||||
// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
 | 
			
		||||
// because the resolved set is {@code {1}}. To determine what the real
 | 
			
		||||
// ambiguity is, we have to know whether the ambiguity is between one and
 | 
			
		||||
// two or one and three so we keep going. We can only stop prediction when
 | 
			
		||||
// we need exact ambiguity detection when the sets look like
 | 
			
		||||
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
 | 
			
		||||
//
 | 
			
		||||
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
 | 
			
		||||
	return PredictionModegetSingleViableAlt(altsets)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Determines if every alternative subset in {@code altsets} contains more
 | 
			
		||||
// than one alternative.
 | 
			
		||||
//
 | 
			
		||||
// @param altsets a collection of alternative subsets
 | 
			
		||||
// @return {@code true} if every {@link BitSet} in {@code altsets} has
 | 
			
		||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
 | 
			
		||||
//
 | 
			
		||||
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
 | 
			
		||||
	return !PredictionModehasNonConflictingAltSet(altsets)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Determines if any single alternative subset in {@code altsets} contains
 | 
			
		||||
// exactly one alternative.
 | 
			
		||||
//
 | 
			
		||||
// @param altsets a collection of alternative subsets
 | 
			
		||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
 | 
			
		||||
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
 | 
			
		||||
//
 | 
			
		||||
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
 | 
			
		||||
	for i := 0; i < len(altsets); i++ {
 | 
			
		||||
		alts := altsets[i]
 | 
			
		||||
		if alts.length() == 1 {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Determines if any single alternative subset in {@code altsets} contains
 | 
			
		||||
// more than one alternative.
 | 
			
		||||
//
 | 
			
		||||
// @param altsets a collection of alternative subsets
 | 
			
		||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
 | 
			
		||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
 | 
			
		||||
//
 | 
			
		||||
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
 | 
			
		||||
	for i := 0; i < len(altsets); i++ {
 | 
			
		||||
		alts := altsets[i]
 | 
			
		||||
		if alts.length() > 1 {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Determines if every alternative subset in {@code altsets} is equivalent.
 | 
			
		||||
//
 | 
			
		||||
// @param altsets a collection of alternative subsets
 | 
			
		||||
// @return {@code true} if every member of {@code altsets} is equal to the
 | 
			
		||||
// others, otherwise {@code false}
 | 
			
		||||
//
 | 
			
		||||
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
 | 
			
		||||
	var first *BitSet
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(altsets); i++ {
 | 
			
		||||
		alts := altsets[i]
 | 
			
		||||
		if first == nil {
 | 
			
		||||
			first = alts
 | 
			
		||||
		} else if alts != first {
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Returns the unique alternative predicted by all alternative subsets in
 | 
			
		||||
// {@code altsets}. If no such alternative exists, this method returns
 | 
			
		||||
// {@link ATN//INVALID_ALT_NUMBER}.
 | 
			
		||||
//
 | 
			
		||||
// @param altsets a collection of alternative subsets
 | 
			
		||||
//
 | 
			
		||||
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
 | 
			
		||||
	all := PredictionModeGetAlts(altsets)
 | 
			
		||||
	if all.length() == 1 {
 | 
			
		||||
		return all.minValue()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ATNInvalidAltNumber
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Gets the complete set of represented alternatives for a collection of
 | 
			
		||||
// alternative subsets. This method returns the union of each {@link BitSet}
 | 
			
		||||
// in {@code altsets}.
 | 
			
		||||
//
 | 
			
		||||
// @param altsets a collection of alternative subsets
 | 
			
		||||
// @return the set of represented alternatives in {@code altsets}
 | 
			
		||||
//
 | 
			
		||||
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
 | 
			
		||||
	all := NewBitSet()
 | 
			
		||||
	for _, alts := range altsets {
 | 
			
		||||
		all.or(alts)
 | 
			
		||||
	}
 | 
			
		||||
	return all
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// This func gets the conflicting alt subsets from a configuration set.
 | 
			
		||||
// For each configuration {@code c} in {@code configs}:
 | 
			
		||||
//
 | 
			
		||||
// <pre>
 | 
			
		||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
 | 
			
		||||
// alt and not pred
 | 
			
		||||
// </pre>
 | 
			
		||||
//
 | 
			
		||||
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
 | 
			
		||||
	configToAlts := make(map[int]*BitSet)
 | 
			
		||||
 | 
			
		||||
	for _, c := range configs.GetItems() {
 | 
			
		||||
		key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
 | 
			
		||||
 | 
			
		||||
		alts, ok := configToAlts[key]
 | 
			
		||||
		if !ok {
 | 
			
		||||
			alts = NewBitSet()
 | 
			
		||||
			configToAlts[key] = alts
 | 
			
		||||
		}
 | 
			
		||||
		alts.add(c.GetAlt())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	values := make([]*BitSet, 0, 10)
 | 
			
		||||
	for _, v := range configToAlts {
 | 
			
		||||
		values = append(values, v)
 | 
			
		||||
	}
 | 
			
		||||
	return values
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Get a map from state to alt subset from a configuration set. For each
 | 
			
		||||
// configuration {@code c} in {@code configs}:
 | 
			
		||||
//
 | 
			
		||||
// <pre>
 | 
			
		||||
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
 | 
			
		||||
// </pre>
 | 
			
		||||
//
 | 
			
		||||
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
 | 
			
		||||
	m := NewAltDict()
 | 
			
		||||
 | 
			
		||||
	for _, c := range configs.GetItems() {
 | 
			
		||||
		alts := m.Get(c.GetState().String())
 | 
			
		||||
		if alts == nil {
 | 
			
		||||
			alts = NewBitSet()
 | 
			
		||||
			m.put(c.GetState().String(), alts)
 | 
			
		||||
		}
 | 
			
		||||
		alts.(*BitSet).add(c.GetAlt())
 | 
			
		||||
	}
 | 
			
		||||
	return m
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
 | 
			
		||||
	values := PredictionModeGetStateToAltMap(configs).values()
 | 
			
		||||
	for i := 0; i < len(values); i++ {
 | 
			
		||||
		if values[i].(*BitSet).length() == 1 {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
 | 
			
		||||
	result := ATNInvalidAltNumber
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(altsets); i++ {
 | 
			
		||||
		alts := altsets[i]
 | 
			
		||||
		minAlt := alts.minValue()
 | 
			
		||||
		if result == ATNInvalidAltNumber {
 | 
			
		||||
			result = minAlt
 | 
			
		||||
		} else if result != minAlt { // more than 1 viable alt
 | 
			
		||||
			return ATNInvalidAltNumber
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										217
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										217
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,217 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Recognizer interface {
 | 
			
		||||
	GetLiteralNames() []string
 | 
			
		||||
	GetSymbolicNames() []string
 | 
			
		||||
	GetRuleNames() []string
 | 
			
		||||
 | 
			
		||||
	Sempred(RuleContext, int, int) bool
 | 
			
		||||
	Precpred(RuleContext, int) bool
 | 
			
		||||
 | 
			
		||||
	GetState() int
 | 
			
		||||
	SetState(int)
 | 
			
		||||
	Action(RuleContext, int, int)
 | 
			
		||||
	AddErrorListener(ErrorListener)
 | 
			
		||||
	RemoveErrorListeners()
 | 
			
		||||
	GetATN() *ATN
 | 
			
		||||
	GetErrorListenerDispatch() ErrorListener
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseRecognizer struct {
 | 
			
		||||
	listeners []ErrorListener
 | 
			
		||||
	state     int
 | 
			
		||||
 | 
			
		||||
	RuleNames       []string
 | 
			
		||||
	LiteralNames    []string
 | 
			
		||||
	SymbolicNames   []string
 | 
			
		||||
	GrammarFileName string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseRecognizer() *BaseRecognizer {
 | 
			
		||||
	rec := new(BaseRecognizer)
 | 
			
		||||
	rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
 | 
			
		||||
	rec.state = -1
 | 
			
		||||
	return rec
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var tokenTypeMapCache = make(map[string]int)
 | 
			
		||||
var ruleIndexMapCache = make(map[string]int)
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) checkVersion(toolVersion string) {
 | 
			
		||||
	runtimeVersion := "4.9.2"
 | 
			
		||||
	if runtimeVersion != toolVersion {
 | 
			
		||||
		fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
 | 
			
		||||
	panic("action not implemented on Recognizer!")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
 | 
			
		||||
	b.listeners = append(b.listeners, listener)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) RemoveErrorListeners() {
 | 
			
		||||
	b.listeners = make([]ErrorListener, 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) GetRuleNames() []string {
 | 
			
		||||
	return b.RuleNames
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) GetTokenNames() []string {
 | 
			
		||||
	return b.LiteralNames
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) GetSymbolicNames() []string {
 | 
			
		||||
	return b.SymbolicNames
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) GetLiteralNames() []string {
 | 
			
		||||
	return b.LiteralNames
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) GetState() int {
 | 
			
		||||
	return b.state
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) SetState(v int) {
 | 
			
		||||
	b.state = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//func (b *Recognizer) GetTokenTypeMap() {
 | 
			
		||||
//    var tokenNames = b.GetTokenNames()
 | 
			
		||||
//    if (tokenNames==nil) {
 | 
			
		||||
//        panic("The current recognizer does not provide a list of token names.")
 | 
			
		||||
//    }
 | 
			
		||||
//    var result = tokenTypeMapCache[tokenNames]
 | 
			
		||||
//    if(result==nil) {
 | 
			
		||||
//        result = tokenNames.reduce(function(o, k, i) { o[k] = i })
 | 
			
		||||
//        result.EOF = TokenEOF
 | 
			
		||||
//        tokenTypeMapCache[tokenNames] = result
 | 
			
		||||
//    }
 | 
			
		||||
//    return result
 | 
			
		||||
//}
 | 
			
		||||
 | 
			
		||||
// Get a map from rule names to rule indexes.
 | 
			
		||||
//
 | 
			
		||||
// <p>Used for XPath and tree pattern compilation.</p>
 | 
			
		||||
//
 | 
			
		||||
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
 | 
			
		||||
 | 
			
		||||
	panic("Method not defined!")
 | 
			
		||||
	//    var ruleNames = b.GetRuleNames()
 | 
			
		||||
	//    if (ruleNames==nil) {
 | 
			
		||||
	//        panic("The current recognizer does not provide a list of rule names.")
 | 
			
		||||
	//    }
 | 
			
		||||
	//
 | 
			
		||||
	//    var result = ruleIndexMapCache[ruleNames]
 | 
			
		||||
	//    if(result==nil) {
 | 
			
		||||
	//        result = ruleNames.reduce(function(o, k, i) { o[k] = i })
 | 
			
		||||
	//        ruleIndexMapCache[ruleNames] = result
 | 
			
		||||
	//    }
 | 
			
		||||
	//    return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) GetTokenType(tokenName string) int {
 | 
			
		||||
	panic("Method not defined!")
 | 
			
		||||
	//    var ttype = b.GetTokenTypeMap()[tokenName]
 | 
			
		||||
	//    if (ttype !=nil) {
 | 
			
		||||
	//        return ttype
 | 
			
		||||
	//    } else {
 | 
			
		||||
	//        return TokenInvalidType
 | 
			
		||||
	//    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//func (b *Recognizer) GetTokenTypeMap() map[string]int {
 | 
			
		||||
//    Vocabulary vocabulary = getVocabulary()
 | 
			
		||||
//
 | 
			
		||||
//    Synchronized (tokenTypeMapCache) {
 | 
			
		||||
//        Map<String, Integer> result = tokenTypeMapCache.Get(vocabulary)
 | 
			
		||||
//        if (result == null) {
 | 
			
		||||
//            result = new HashMap<String, Integer>()
 | 
			
		||||
//            for (int i = 0; i < GetATN().maxTokenType; i++) {
 | 
			
		||||
//                String literalName = vocabulary.getLiteralName(i)
 | 
			
		||||
//                if (literalName != null) {
 | 
			
		||||
//                    result.put(literalName, i)
 | 
			
		||||
//                }
 | 
			
		||||
//
 | 
			
		||||
//                String symbolicName = vocabulary.GetSymbolicName(i)
 | 
			
		||||
//                if (symbolicName != null) {
 | 
			
		||||
//                    result.put(symbolicName, i)
 | 
			
		||||
//                }
 | 
			
		||||
//            }
 | 
			
		||||
//
 | 
			
		||||
//            result.put("EOF", Token.EOF)
 | 
			
		||||
//            result = Collections.unmodifiableMap(result)
 | 
			
		||||
//            tokenTypeMapCache.put(vocabulary, result)
 | 
			
		||||
//        }
 | 
			
		||||
//
 | 
			
		||||
//        return result
 | 
			
		||||
//    }
 | 
			
		||||
//}
 | 
			
		||||
 | 
			
		||||
// What is the error header, normally line/character position information?//
 | 
			
		||||
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
 | 
			
		||||
	line := e.GetOffendingToken().GetLine()
 | 
			
		||||
	column := e.GetOffendingToken().GetColumn()
 | 
			
		||||
	return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// How should a token be displayed in an error message? The default
 | 
			
		||||
//  is to display just the text, but during development you might
 | 
			
		||||
//  want to have a lot of information spit out.  Override in that case
 | 
			
		||||
//  to use t.String() (which, for CommonToken, dumps everything about
 | 
			
		||||
//  the token). This is better than forcing you to override a method in
 | 
			
		||||
//  your token objects because you don't have to go modify your lexer
 | 
			
		||||
//  so that it creates a NewJava type.
 | 
			
		||||
//
 | 
			
		||||
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
 | 
			
		||||
// implementations of {@link ANTLRErrorStrategy} may provide a similar
 | 
			
		||||
// feature when necessary. For example, see
 | 
			
		||||
// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
 | 
			
		||||
//
 | 
			
		||||
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
 | 
			
		||||
	if t == nil {
 | 
			
		||||
		return "<no token>"
 | 
			
		||||
	}
 | 
			
		||||
	s := t.GetText()
 | 
			
		||||
	if s == "" {
 | 
			
		||||
		if t.GetTokenType() == TokenEOF {
 | 
			
		||||
			s = "<EOF>"
 | 
			
		||||
		} else {
 | 
			
		||||
			s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	s = strings.Replace(s, "\t", "\\t", -1)
 | 
			
		||||
	s = strings.Replace(s, "\n", "\\n", -1)
 | 
			
		||||
	s = strings.Replace(s, "\r", "\\r", -1)
 | 
			
		||||
 | 
			
		||||
	return "'" + s + "'"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
 | 
			
		||||
	return NewProxyErrorListener(b.listeners)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// subclass needs to override these if there are sempreds or actions
 | 
			
		||||
// that the ATN interp needs to execute
 | 
			
		||||
func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										114
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										114
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,114 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
//  A rule context is a record of a single rule invocation. It knows
 | 
			
		||||
//  which context invoked it, if any. If there is no parent context, then
 | 
			
		||||
//  naturally the invoking state is not valid.  The parent link
 | 
			
		||||
//  provides a chain upwards from the current rule invocation to the root
 | 
			
		||||
//  of the invocation tree, forming a stack. We actually carry no
 | 
			
		||||
//  information about the rule associated with b context (except
 | 
			
		||||
//  when parsing). We keep only the state number of the invoking state from
 | 
			
		||||
//  the ATN submachine that invoked b. Contrast b with the s
 | 
			
		||||
//  pointer inside ParserRuleContext that tracks the current state
 | 
			
		||||
//  being "executed" for the current rule.
 | 
			
		||||
//
 | 
			
		||||
//  The parent contexts are useful for computing lookahead sets and
 | 
			
		||||
//  getting error information.
 | 
			
		||||
//
 | 
			
		||||
//  These objects are used during parsing and prediction.
 | 
			
		||||
//  For the special case of parsers, we use the subclass
 | 
			
		||||
//  ParserRuleContext.
 | 
			
		||||
//
 | 
			
		||||
//  @see ParserRuleContext
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
type RuleContext interface {
 | 
			
		||||
	RuleNode
 | 
			
		||||
 | 
			
		||||
	GetInvokingState() int
 | 
			
		||||
	SetInvokingState(int)
 | 
			
		||||
 | 
			
		||||
	GetRuleIndex() int
 | 
			
		||||
	IsEmpty() bool
 | 
			
		||||
 | 
			
		||||
	GetAltNumber() int
 | 
			
		||||
	SetAltNumber(altNumber int)
 | 
			
		||||
 | 
			
		||||
	String([]string, RuleContext) string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseRuleContext struct {
 | 
			
		||||
	parentCtx     RuleContext
 | 
			
		||||
	invokingState int
 | 
			
		||||
	RuleIndex     int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
 | 
			
		||||
 | 
			
		||||
	rn := new(BaseRuleContext)
 | 
			
		||||
 | 
			
		||||
	// What context invoked b rule?
 | 
			
		||||
	rn.parentCtx = parent
 | 
			
		||||
 | 
			
		||||
	// What state invoked the rule associated with b context?
 | 
			
		||||
	// The "return address" is the followState of invokingState
 | 
			
		||||
	// If parent is nil, b should be -1.
 | 
			
		||||
	if parent == nil {
 | 
			
		||||
		rn.invokingState = -1
 | 
			
		||||
	} else {
 | 
			
		||||
		rn.invokingState = invokingState
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return rn
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
 | 
			
		||||
	return b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRuleContext) SetParent(v Tree) {
 | 
			
		||||
	if v == nil {
 | 
			
		||||
		b.parentCtx = nil
 | 
			
		||||
	} else {
 | 
			
		||||
		b.parentCtx = v.(RuleContext)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRuleContext) GetInvokingState() int {
 | 
			
		||||
	return b.invokingState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRuleContext) SetInvokingState(t int) {
 | 
			
		||||
	b.invokingState = t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRuleContext) GetRuleIndex() int {
 | 
			
		||||
	return b.RuleIndex
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRuleContext) GetAltNumber() int {
 | 
			
		||||
	return ATNInvalidAltNumber
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
 | 
			
		||||
 | 
			
		||||
// A context is empty if there is no invoking state meaning nobody call
 | 
			
		||||
// current context.
 | 
			
		||||
func (b *BaseRuleContext) IsEmpty() bool {
 | 
			
		||||
	return b.invokingState == -1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Return the combined text of all child nodes. This method only considers
 | 
			
		||||
// tokens which have been added to the parse tree.
 | 
			
		||||
// <p>
 | 
			
		||||
// Since tokens on hidden channels (e.g. whitespace or comments) are not
 | 
			
		||||
// added to the parse trees, they will not appear in the output of b
 | 
			
		||||
// method.
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
func (b *BaseRuleContext) GetParent() Tree {
 | 
			
		||||
	return b.parentCtx
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										455
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										455
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,455 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// A tree structure used to record the semantic context in which
 | 
			
		||||
//  an ATN configuration is valid.  It's either a single predicate,
 | 
			
		||||
//  a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
 | 
			
		||||
//
 | 
			
		||||
//  <p>I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
 | 
			
		||||
//  {@link SemanticContext} within the scope of this outer class.</p>
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
type SemanticContext interface {
 | 
			
		||||
	comparable
 | 
			
		||||
 | 
			
		||||
	evaluate(parser Recognizer, outerContext RuleContext) bool
 | 
			
		||||
	evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
 | 
			
		||||
 | 
			
		||||
	hash() int
 | 
			
		||||
	String() string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func SemanticContextandContext(a, b SemanticContext) SemanticContext {
 | 
			
		||||
	if a == nil || a == SemanticContextNone {
 | 
			
		||||
		return b
 | 
			
		||||
	}
 | 
			
		||||
	if b == nil || b == SemanticContextNone {
 | 
			
		||||
		return a
 | 
			
		||||
	}
 | 
			
		||||
	result := NewAND(a, b)
 | 
			
		||||
	if len(result.opnds) == 1 {
 | 
			
		||||
		return result.opnds[0]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func SemanticContextorContext(a, b SemanticContext) SemanticContext {
 | 
			
		||||
	if a == nil {
 | 
			
		||||
		return b
 | 
			
		||||
	}
 | 
			
		||||
	if b == nil {
 | 
			
		||||
		return a
 | 
			
		||||
	}
 | 
			
		||||
	if a == SemanticContextNone || b == SemanticContextNone {
 | 
			
		||||
		return SemanticContextNone
 | 
			
		||||
	}
 | 
			
		||||
	result := NewOR(a, b)
 | 
			
		||||
	if len(result.opnds) == 1 {
 | 
			
		||||
		return result.opnds[0]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Predicate struct {
 | 
			
		||||
	ruleIndex      int
 | 
			
		||||
	predIndex      int
 | 
			
		||||
	isCtxDependent bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
 | 
			
		||||
	p := new(Predicate)
 | 
			
		||||
 | 
			
		||||
	p.ruleIndex = ruleIndex
 | 
			
		||||
	p.predIndex = predIndex
 | 
			
		||||
	p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//The default {@link SemanticContext}, which is semantically equivalent to
 | 
			
		||||
//a predicate of the form {@code {true}?}.
 | 
			
		||||
 | 
			
		||||
var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
 | 
			
		||||
 | 
			
		||||
func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
 | 
			
		||||
 | 
			
		||||
	var localctx RuleContext
 | 
			
		||||
 | 
			
		||||
	if p.isCtxDependent {
 | 
			
		||||
		localctx = outerContext
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *Predicate) equals(other interface{}) bool {
 | 
			
		||||
	if p == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*Predicate); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		return p.ruleIndex == other.(*Predicate).ruleIndex &&
 | 
			
		||||
			p.predIndex == other.(*Predicate).predIndex &&
 | 
			
		||||
			p.isCtxDependent == other.(*Predicate).isCtxDependent
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *Predicate) hash() int {
 | 
			
		||||
	return p.ruleIndex*43 + p.predIndex*47
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *Predicate) String() string {
 | 
			
		||||
	return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PrecedencePredicate struct {
 | 
			
		||||
	precedence int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
 | 
			
		||||
 | 
			
		||||
	p := new(PrecedencePredicate)
 | 
			
		||||
	p.precedence = precedence
 | 
			
		||||
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
 | 
			
		||||
	return parser.Precpred(outerContext, p.precedence)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
 | 
			
		||||
	if parser.Precpred(outerContext, p.precedence) {
 | 
			
		||||
		return SemanticContextNone
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
 | 
			
		||||
	return p.precedence - other.precedence
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PrecedencePredicate) equals(other interface{}) bool {
 | 
			
		||||
	if p == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*PrecedencePredicate); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		return p.precedence == other.(*PrecedencePredicate).precedence
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PrecedencePredicate) hash() int {
 | 
			
		||||
	return p.precedence * 51
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *PrecedencePredicate) String() string {
 | 
			
		||||
	return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate {
 | 
			
		||||
	result := make([]*PrecedencePredicate, 0)
 | 
			
		||||
 | 
			
		||||
	for _, v := range set.values() {
 | 
			
		||||
		if c2, ok := v.(*PrecedencePredicate); ok {
 | 
			
		||||
			result = append(result, c2)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// A semantic context which is true whenever none of the contained contexts
 | 
			
		||||
// is false.`
 | 
			
		||||
 | 
			
		||||
type AND struct {
 | 
			
		||||
	opnds []SemanticContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewAND(a, b SemanticContext) *AND {
 | 
			
		||||
 | 
			
		||||
	operands := NewSet(nil, nil)
 | 
			
		||||
	if aa, ok := a.(*AND); ok {
 | 
			
		||||
		for _, o := range aa.opnds {
 | 
			
		||||
			operands.add(o)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		operands.add(a)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ba, ok := b.(*AND); ok {
 | 
			
		||||
		for _, o := range ba.opnds {
 | 
			
		||||
			operands.add(o)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		operands.add(b)
 | 
			
		||||
	}
 | 
			
		||||
	precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
 | 
			
		||||
	if len(precedencePredicates) > 0 {
 | 
			
		||||
		// interested in the transition with the lowest precedence
 | 
			
		||||
		var reduced *PrecedencePredicate
 | 
			
		||||
 | 
			
		||||
		for _, p := range precedencePredicates {
 | 
			
		||||
			if reduced == nil || p.precedence < reduced.precedence {
 | 
			
		||||
				reduced = p
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		operands.add(reduced)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vs := operands.values()
 | 
			
		||||
	opnds := make([]SemanticContext, len(vs))
 | 
			
		||||
	for i, v := range vs {
 | 
			
		||||
		opnds[i] = v.(SemanticContext)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	and := new(AND)
 | 
			
		||||
	and.opnds = opnds
 | 
			
		||||
 | 
			
		||||
	return and
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *AND) equals(other interface{}) bool {
 | 
			
		||||
	if a == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*AND); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		for i, v := range other.(*AND).opnds {
 | 
			
		||||
			if !a.opnds[i].equals(v) {
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// {@inheritDoc}
 | 
			
		||||
//
 | 
			
		||||
// <p>
 | 
			
		||||
// The evaluation of predicates by a context is short-circuiting, but
 | 
			
		||||
// unordered.</p>
 | 
			
		||||
//
 | 
			
		||||
func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
 | 
			
		||||
	for i := 0; i < len(a.opnds); i++ {
 | 
			
		||||
		if !a.opnds[i].evaluate(parser, outerContext) {
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
 | 
			
		||||
	differs := false
 | 
			
		||||
	operands := make([]SemanticContext, 0)
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < len(a.opnds); i++ {
 | 
			
		||||
		context := a.opnds[i]
 | 
			
		||||
		evaluated := context.evalPrecedence(parser, outerContext)
 | 
			
		||||
		differs = differs || (evaluated != context)
 | 
			
		||||
		if evaluated == nil {
 | 
			
		||||
			// The AND context is false if any element is false
 | 
			
		||||
			return nil
 | 
			
		||||
		} else if evaluated != SemanticContextNone {
 | 
			
		||||
			// Reduce the result by Skipping true elements
 | 
			
		||||
			operands = append(operands, evaluated)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if !differs {
 | 
			
		||||
		return a
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(operands) == 0 {
 | 
			
		||||
		// all elements were true, so the AND context is true
 | 
			
		||||
		return SemanticContextNone
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var result SemanticContext
 | 
			
		||||
 | 
			
		||||
	for _, o := range operands {
 | 
			
		||||
		if result == nil {
 | 
			
		||||
			result = o
 | 
			
		||||
		} else {
 | 
			
		||||
			result = SemanticContextandContext(result, o)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *AND) hash() int {
 | 
			
		||||
	h := murmurInit(37) // Init with a value different from OR
 | 
			
		||||
	for _, op := range a.opnds {
 | 
			
		||||
		h = murmurUpdate(h, op.hash())
 | 
			
		||||
	}
 | 
			
		||||
	return murmurFinish(h, len(a.opnds))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *OR) hash() int {
 | 
			
		||||
	h := murmurInit(41) // Init with a value different from AND
 | 
			
		||||
	for _, op := range a.opnds {
 | 
			
		||||
		h = murmurUpdate(h, op.hash())
 | 
			
		||||
	}
 | 
			
		||||
	return murmurFinish(h, len(a.opnds))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *AND) String() string {
 | 
			
		||||
	s := ""
 | 
			
		||||
 | 
			
		||||
	for _, o := range a.opnds {
 | 
			
		||||
		s += "&& " + fmt.Sprint(o)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(s) > 3 {
 | 
			
		||||
		return s[0:3]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// A semantic context which is true whenever at least one of the contained
 | 
			
		||||
// contexts is true.
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
type OR struct {
 | 
			
		||||
	opnds []SemanticContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewOR(a, b SemanticContext) *OR {
 | 
			
		||||
 | 
			
		||||
	operands := NewSet(nil, nil)
 | 
			
		||||
	if aa, ok := a.(*OR); ok {
 | 
			
		||||
		for _, o := range aa.opnds {
 | 
			
		||||
			operands.add(o)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		operands.add(a)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ba, ok := b.(*OR); ok {
 | 
			
		||||
		for _, o := range ba.opnds {
 | 
			
		||||
			operands.add(o)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		operands.add(b)
 | 
			
		||||
	}
 | 
			
		||||
	precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
 | 
			
		||||
	if len(precedencePredicates) > 0 {
 | 
			
		||||
		// interested in the transition with the lowest precedence
 | 
			
		||||
		var reduced *PrecedencePredicate
 | 
			
		||||
 | 
			
		||||
		for _, p := range precedencePredicates {
 | 
			
		||||
			if reduced == nil || p.precedence > reduced.precedence {
 | 
			
		||||
				reduced = p
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		operands.add(reduced)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vs := operands.values()
 | 
			
		||||
 | 
			
		||||
	opnds := make([]SemanticContext, len(vs))
 | 
			
		||||
	for i, v := range vs {
 | 
			
		||||
		opnds[i] = v.(SemanticContext)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	o := new(OR)
 | 
			
		||||
	o.opnds = opnds
 | 
			
		||||
 | 
			
		||||
	return o
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *OR) equals(other interface{}) bool {
 | 
			
		||||
	if o == other {
 | 
			
		||||
		return true
 | 
			
		||||
	} else if _, ok := other.(*OR); !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	} else {
 | 
			
		||||
		for i, v := range other.(*OR).opnds {
 | 
			
		||||
			if !o.opnds[i].equals(v) {
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// <p>
 | 
			
		||||
// The evaluation of predicates by o context is short-circuiting, but
 | 
			
		||||
// unordered.</p>
 | 
			
		||||
//
 | 
			
		||||
func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
 | 
			
		||||
	for i := 0; i < len(o.opnds); i++ {
 | 
			
		||||
		if o.opnds[i].evaluate(parser, outerContext) {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
 | 
			
		||||
	differs := false
 | 
			
		||||
	operands := make([]SemanticContext, 0)
 | 
			
		||||
	for i := 0; i < len(o.opnds); i++ {
 | 
			
		||||
		context := o.opnds[i]
 | 
			
		||||
		evaluated := context.evalPrecedence(parser, outerContext)
 | 
			
		||||
		differs = differs || (evaluated != context)
 | 
			
		||||
		if evaluated == SemanticContextNone {
 | 
			
		||||
			// The OR context is true if any element is true
 | 
			
		||||
			return SemanticContextNone
 | 
			
		||||
		} else if evaluated != nil {
 | 
			
		||||
			// Reduce the result by Skipping false elements
 | 
			
		||||
			operands = append(operands, evaluated)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if !differs {
 | 
			
		||||
		return o
 | 
			
		||||
	}
 | 
			
		||||
	if len(operands) == 0 {
 | 
			
		||||
		// all elements were false, so the OR context is false
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	var result SemanticContext
 | 
			
		||||
 | 
			
		||||
	for _, o := range operands {
 | 
			
		||||
		if result == nil {
 | 
			
		||||
			result = o
 | 
			
		||||
		} else {
 | 
			
		||||
			result = SemanticContextorContext(result, o)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *OR) String() string {
 | 
			
		||||
	s := ""
 | 
			
		||||
 | 
			
		||||
	for _, o := range o.opnds {
 | 
			
		||||
		s += "|| " + fmt.Sprint(o)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(s) > 3 {
 | 
			
		||||
		return s[0:3]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										210
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										210
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,210 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type TokenSourceCharStreamPair struct {
 | 
			
		||||
	tokenSource TokenSource
 | 
			
		||||
	charStream  CharStream
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// A token has properties: text, type, line, character position in the line
 | 
			
		||||
// (so we can ignore tabs), token channel, index, and source from which
 | 
			
		||||
// we obtained this token.
 | 
			
		||||
 | 
			
		||||
type Token interface {
 | 
			
		||||
	GetSource() *TokenSourceCharStreamPair
 | 
			
		||||
	GetTokenType() int
 | 
			
		||||
	GetChannel() int
 | 
			
		||||
	GetStart() int
 | 
			
		||||
	GetStop() int
 | 
			
		||||
	GetLine() int
 | 
			
		||||
	GetColumn() int
 | 
			
		||||
 | 
			
		||||
	GetText() string
 | 
			
		||||
	SetText(s string)
 | 
			
		||||
 | 
			
		||||
	GetTokenIndex() int
 | 
			
		||||
	SetTokenIndex(v int)
 | 
			
		||||
 | 
			
		||||
	GetTokenSource() TokenSource
 | 
			
		||||
	GetInputStream() CharStream
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseToken struct {
 | 
			
		||||
	source     *TokenSourceCharStreamPair
 | 
			
		||||
	tokenType  int    // token type of the token
 | 
			
		||||
	channel    int    // The parser ignores everything not on DEFAULT_CHANNEL
 | 
			
		||||
	start      int    // optional return -1 if not implemented.
 | 
			
		||||
	stop       int    // optional return -1 if not implemented.
 | 
			
		||||
	tokenIndex int    // from 0..n-1 of the token object in the input stream
 | 
			
		||||
	line       int    // line=1..n of the 1st character
 | 
			
		||||
	column     int    // beginning of the line at which it occurs, 0..n-1
 | 
			
		||||
	text       string // text of the token.
 | 
			
		||||
	readOnly   bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	TokenInvalidType = 0
 | 
			
		||||
 | 
			
		||||
	// During lookahead operations, this "token" signifies we hit rule end ATN state
 | 
			
		||||
	// and did not follow it despite needing to.
 | 
			
		||||
	TokenEpsilon = -2
 | 
			
		||||
 | 
			
		||||
	TokenMinUserTokenType = 1
 | 
			
		||||
 | 
			
		||||
	TokenEOF = -1
 | 
			
		||||
 | 
			
		||||
	// All tokens go to the parser (unless Skip() is called in that rule)
 | 
			
		||||
	// on a particular "channel". The parser tunes to a particular channel
 | 
			
		||||
	// so that whitespace etc... can go to the parser on a "hidden" channel.
 | 
			
		||||
 | 
			
		||||
	TokenDefaultChannel = 0
 | 
			
		||||
 | 
			
		||||
	// Anything on different channel than DEFAULT_CHANNEL is not parsed
 | 
			
		||||
	// by parser.
 | 
			
		||||
 | 
			
		||||
	TokenHiddenChannel = 1
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) GetChannel() int {
 | 
			
		||||
	return b.channel
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) GetStart() int {
 | 
			
		||||
	return b.start
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) GetStop() int {
 | 
			
		||||
	return b.stop
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) GetLine() int {
 | 
			
		||||
	return b.line
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) GetColumn() int {
 | 
			
		||||
	return b.column
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) GetTokenType() int {
 | 
			
		||||
	return b.tokenType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
 | 
			
		||||
	return b.source
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) GetTokenIndex() int {
 | 
			
		||||
	return b.tokenIndex
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) SetTokenIndex(v int) {
 | 
			
		||||
	b.tokenIndex = v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) GetTokenSource() TokenSource {
 | 
			
		||||
	return b.source.tokenSource
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BaseToken) GetInputStream() CharStream {
 | 
			
		||||
	return b.source.charStream
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type CommonToken struct {
 | 
			
		||||
	*BaseToken
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
 | 
			
		||||
 | 
			
		||||
	t := new(CommonToken)
 | 
			
		||||
 | 
			
		||||
	t.BaseToken = new(BaseToken)
 | 
			
		||||
 | 
			
		||||
	t.source = source
 | 
			
		||||
	t.tokenType = tokenType
 | 
			
		||||
	t.channel = channel
 | 
			
		||||
	t.start = start
 | 
			
		||||
	t.stop = stop
 | 
			
		||||
	t.tokenIndex = -1
 | 
			
		||||
	if t.source.tokenSource != nil {
 | 
			
		||||
		t.line = source.tokenSource.GetLine()
 | 
			
		||||
		t.column = source.tokenSource.GetCharPositionInLine()
 | 
			
		||||
	} else {
 | 
			
		||||
		t.column = -1
 | 
			
		||||
	}
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// An empty {@link Pair} which is used as the default value of
 | 
			
		||||
// {@link //source} for tokens that do not have a source.
 | 
			
		||||
 | 
			
		||||
//CommonToken.EMPTY_SOURCE = [ nil, nil ]
 | 
			
		||||
 | 
			
		||||
// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
 | 
			
		||||
//
 | 
			
		||||
// <p>
 | 
			
		||||
// If {@code oldToken} is also a {@link CommonToken} instance, the newly
 | 
			
		||||
// constructed token will share a reference to the {@link //text} field and
 | 
			
		||||
// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
 | 
			
		||||
// be assigned the result of calling {@link //GetText}, and {@link //source}
 | 
			
		||||
// will be constructed from the result of {@link Token//GetTokenSource} and
 | 
			
		||||
// {@link Token//GetInputStream}.</p>
 | 
			
		||||
//
 | 
			
		||||
// @param oldToken The token to copy.
 | 
			
		||||
//
 | 
			
		||||
func (c *CommonToken) clone() *CommonToken {
 | 
			
		||||
	t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
 | 
			
		||||
	t.tokenIndex = c.GetTokenIndex()
 | 
			
		||||
	t.line = c.GetLine()
 | 
			
		||||
	t.column = c.GetColumn()
 | 
			
		||||
	t.text = c.GetText()
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonToken) GetText() string {
 | 
			
		||||
	if c.text != "" {
 | 
			
		||||
		return c.text
 | 
			
		||||
	}
 | 
			
		||||
	input := c.GetInputStream()
 | 
			
		||||
	if input == nil {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
	n := input.Size()
 | 
			
		||||
	if c.start < n && c.stop < n {
 | 
			
		||||
		return input.GetTextFromInterval(NewInterval(c.start, c.stop))
 | 
			
		||||
	}
 | 
			
		||||
	return "<EOF>"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonToken) SetText(text string) {
 | 
			
		||||
	c.text = text
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *CommonToken) String() string {
 | 
			
		||||
	txt := c.GetText()
 | 
			
		||||
	if txt != "" {
 | 
			
		||||
		txt = strings.Replace(txt, "\n", "\\n", -1)
 | 
			
		||||
		txt = strings.Replace(txt, "\r", "\\r", -1)
 | 
			
		||||
		txt = strings.Replace(txt, "\t", "\\t", -1)
 | 
			
		||||
	} else {
 | 
			
		||||
		txt = "<no text>"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var ch string
 | 
			
		||||
	if c.channel > 0 {
 | 
			
		||||
		ch = ",channel=" + strconv.Itoa(c.channel)
 | 
			
		||||
	} else {
 | 
			
		||||
		ch = ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
 | 
			
		||||
		txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
 | 
			
		||||
		ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										17
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,17 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
type TokenSource interface {
 | 
			
		||||
	NextToken() Token
 | 
			
		||||
	Skip()
 | 
			
		||||
	More()
 | 
			
		||||
	GetLine() int
 | 
			
		||||
	GetCharPositionInLine() int
 | 
			
		||||
	GetInputStream() CharStream
 | 
			
		||||
	GetSourceName() string
 | 
			
		||||
	setTokenFactory(factory TokenFactory)
 | 
			
		||||
	GetTokenFactory() TokenFactory
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										20
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,20 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
type TokenStream interface {
 | 
			
		||||
	IntStream
 | 
			
		||||
 | 
			
		||||
	LT(k int) Token
 | 
			
		||||
 | 
			
		||||
	Get(index int) Token
 | 
			
		||||
	GetTokenSource() TokenSource
 | 
			
		||||
	SetTokenSource(TokenSource)
 | 
			
		||||
 | 
			
		||||
	GetAllText() string
 | 
			
		||||
	GetTextFromInterval(*Interval) string
 | 
			
		||||
	GetTextFromRuleContext(RuleContext) string
 | 
			
		||||
	GetTextFromTokens(Token, Token) string
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										649
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										649
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,649 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
"bytes"
 | 
			
		||||
"fmt"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// 
 | 
			
		||||
// Useful for rewriting out a buffered input token stream after doing some
 | 
			
		||||
// augmentation or other manipulations on it.
 | 
			
		||||
 | 
			
		||||
// <p>
 | 
			
		||||
// You can insert stuff, replace, and delete chunks. Note that the operations
 | 
			
		||||
// are done lazily--only if you convert the buffer to a {@link String} with
 | 
			
		||||
// {@link TokenStream#getText()}. This is very efficient because you are not
 | 
			
		||||
// moving data around all the time. As the buffer of tokens is converted to
 | 
			
		||||
// strings, the {@link #getText()} method(s) scan the input token stream and
 | 
			
		||||
// check to see if there is an operation at the current index. If so, the
 | 
			
		||||
// operation is done and then normal {@link String} rendering continues on the
 | 
			
		||||
// buffer. This is like having multiple Turing machine instruction streams
 | 
			
		||||
// (programs) operating on a single input tape. :)</p>
 | 
			
		||||
// <p>
 | 
			
		||||
 | 
			
		||||
// This rewriter makes no modifications to the token stream. It does not ask the
 | 
			
		||||
// stream to fill itself up nor does it advance the input cursor. The token
 | 
			
		||||
// stream {@link TokenStream#index()} will return the same value before and
 | 
			
		||||
// after any {@link #getText()} call.</p>
 | 
			
		||||
 | 
			
		||||
// <p>
 | 
			
		||||
// The rewriter only works on tokens that you have in the buffer and ignores the
 | 
			
		||||
// current input cursor. If you are buffering tokens on-demand, calling
 | 
			
		||||
// {@link #getText()} halfway through the input will only do rewrites for those
 | 
			
		||||
// tokens in the first half of the file.</p>
 | 
			
		||||
 | 
			
		||||
// <p>
 | 
			
		||||
// Since the operations are done lazily at {@link #getText}-time, operations do
 | 
			
		||||
// not screw up the token index values. That is, an insert operation at token
 | 
			
		||||
// index {@code i} does not change the index values for tokens
 | 
			
		||||
// {@code i}+1..n-1.</p>
 | 
			
		||||
 | 
			
		||||
// <p>
 | 
			
		||||
// Because operations never actually alter the buffer, you may always get the
 | 
			
		||||
// original token stream back without undoing anything. Since the instructions
 | 
			
		||||
// are queued up, you can easily simulate transactions and roll back any changes
 | 
			
		||||
// if there is an error just by removing instructions. For example,</p>
 | 
			
		||||
 | 
			
		||||
// <pre>
 | 
			
		||||
// CharStream input = new ANTLRFileStream("input");
 | 
			
		||||
// TLexer lex = new TLexer(input);
 | 
			
		||||
// CommonTokenStream tokens = new CommonTokenStream(lex);
 | 
			
		||||
// T parser = new T(tokens);
 | 
			
		||||
// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
 | 
			
		||||
// parser.startRule();
 | 
			
		||||
// </pre>
 | 
			
		||||
 | 
			
		||||
// <p>
 | 
			
		||||
// Then in the rules, you can execute (assuming rewriter is visible):</p>
 | 
			
		||||
 | 
			
		||||
// <pre>
 | 
			
		||||
// Token t,u;
 | 
			
		||||
// ...
 | 
			
		||||
// rewriter.insertAfter(t, "text to put after t");}
 | 
			
		||||
// rewriter.insertAfter(u, "text after u");}
 | 
			
		||||
// System.out.println(rewriter.getText());
 | 
			
		||||
// </pre>
 | 
			
		||||
 | 
			
		||||
// <p>
 | 
			
		||||
// You can also have multiple "instruction streams" and get multiple rewrites
 | 
			
		||||
// from a single pass over the input. Just name the instruction streams and use
 | 
			
		||||
// that name again when printing the buffer. This could be useful for generating
 | 
			
		||||
// a C file and also its header file--all from the same buffer:</p>
 | 
			
		||||
 | 
			
		||||
// <pre>
 | 
			
		||||
// rewriter.insertAfter("pass1", t, "text to put after t");}
 | 
			
		||||
// rewriter.insertAfter("pass2", u, "text after u");}
 | 
			
		||||
// System.out.println(rewriter.getText("pass1"));
 | 
			
		||||
// System.out.println(rewriter.getText("pass2"));
 | 
			
		||||
// </pre>
 | 
			
		||||
 | 
			
		||||
// <p>
 | 
			
		||||
// If you don't use named rewrite streams, a "default" stream is used as the
 | 
			
		||||
// first example shows.</p>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
const(
 | 
			
		||||
	Default_Program_Name = "default"
 | 
			
		||||
	Program_Init_Size = 100
 | 
			
		||||
	Min_Token_Index = 0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Define the rewrite operation hierarchy
 | 
			
		||||
 | 
			
		||||
type RewriteOperation interface {
 | 
			
		||||
	// Execute the rewrite operation by possibly adding to the buffer.
 | 
			
		||||
	// Return the index of the next token to operate on.
 | 
			
		||||
	Execute(buffer *bytes.Buffer)	int
 | 
			
		||||
	String()			string
 | 
			
		||||
	GetInstructionIndex()		int
 | 
			
		||||
	GetIndex()			int
 | 
			
		||||
	GetText()			string
 | 
			
		||||
	GetOpName()			string
 | 
			
		||||
	GetTokens()			TokenStream
 | 
			
		||||
	SetInstructionIndex(val int)
 | 
			
		||||
	SetIndex(int)
 | 
			
		||||
	SetText(string)
 | 
			
		||||
	SetOpName(string)
 | 
			
		||||
	SetTokens(TokenStream)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseRewriteOperation struct {
 | 
			
		||||
	//Current index of rewrites list
 | 
			
		||||
	instruction_index		int
 | 
			
		||||
	//Token buffer index
 | 
			
		||||
	index				int
 | 
			
		||||
	//Substitution text
 | 
			
		||||
	text				string
 | 
			
		||||
	//Actual operation name
 | 
			
		||||
	op_name				string
 | 
			
		||||
	//Pointer to token steam
 | 
			
		||||
	tokens				TokenStream
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation)GetInstructionIndex() int{
 | 
			
		||||
	return op.instruction_index
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation)GetIndex() int{
 | 
			
		||||
	return op.index
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation)GetText() string{
 | 
			
		||||
	return op.text
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation)GetOpName() string{
 | 
			
		||||
	return op.op_name
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation)GetTokens() TokenStream{
 | 
			
		||||
	return op.tokens
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation)SetInstructionIndex(val int){
 | 
			
		||||
	op.instruction_index = val
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation)SetIndex(val int) {
 | 
			
		||||
	op.index = val
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation)SetText(val string){
 | 
			
		||||
	op.text = val
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation)SetOpName(val string){
 | 
			
		||||
	op.op_name = val
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation)SetTokens(val TokenStream)  {
 | 
			
		||||
	op.tokens = val
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
 | 
			
		||||
	return op.index
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *BaseRewriteOperation) String() string  {
 | 
			
		||||
	return fmt.Sprintf("<%s@%d:\"%s\">",
 | 
			
		||||
		op.op_name,
 | 
			
		||||
		op.tokens.Get(op.GetIndex()),
 | 
			
		||||
		op.text,
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
type InsertBeforeOp struct {
 | 
			
		||||
	BaseRewriteOperation
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
 | 
			
		||||
	return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
 | 
			
		||||
		index:index,
 | 
			
		||||
		text:text,
 | 
			
		||||
		op_name:"InsertBeforeOp",
 | 
			
		||||
		tokens:stream,
 | 
			
		||||
	}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
 | 
			
		||||
	buffer.WriteString(op.text)
 | 
			
		||||
	if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
 | 
			
		||||
		buffer.WriteString(op.tokens.Get(op.index).GetText())
 | 
			
		||||
	}
 | 
			
		||||
	return op.index+1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *InsertBeforeOp) String() string {
 | 
			
		||||
	return op.BaseRewriteOperation.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Distinguish between insert after/before to do the "insert afters"
 | 
			
		||||
//  first and then the "insert befores" at same index. Implementation
 | 
			
		||||
//  of "insert after" is "insert before index+1".
 | 
			
		||||
 | 
			
		||||
type InsertAfterOp struct {
 | 
			
		||||
	BaseRewriteOperation
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
 | 
			
		||||
	return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
 | 
			
		||||
		index:index+1,
 | 
			
		||||
		text:text,
 | 
			
		||||
		tokens:stream,
 | 
			
		||||
	}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
 | 
			
		||||
	buffer.WriteString(op.text)
 | 
			
		||||
	if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
 | 
			
		||||
		buffer.WriteString(op.tokens.Get(op.index).GetText())
 | 
			
		||||
	}
 | 
			
		||||
	return op.index+1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *InsertAfterOp) String() string {
 | 
			
		||||
	return op.BaseRewriteOperation.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
 | 
			
		||||
// instructions.
 | 
			
		||||
type ReplaceOp struct{
 | 
			
		||||
	BaseRewriteOperation
 | 
			
		||||
	LastIndex int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
 | 
			
		||||
	return &ReplaceOp{
 | 
			
		||||
		BaseRewriteOperation:BaseRewriteOperation{
 | 
			
		||||
			index:from,
 | 
			
		||||
			text:text,
 | 
			
		||||
			op_name:"ReplaceOp",
 | 
			
		||||
			tokens:stream,
 | 
			
		||||
		},
 | 
			
		||||
		LastIndex:to,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
 | 
			
		||||
	if op.text != ""{
 | 
			
		||||
		buffer.WriteString(op.text)
 | 
			
		||||
	}
 | 
			
		||||
	return op.LastIndex +1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (op *ReplaceOp) String() string {
 | 
			
		||||
	if op.text == "" {
 | 
			
		||||
		return fmt.Sprintf("<DeleteOP@%d..%d>",
 | 
			
		||||
			op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
 | 
			
		||||
	}
 | 
			
		||||
	return fmt.Sprintf("<ReplaceOp@%d..%d:\"%s\">",
 | 
			
		||||
		op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
type TokenStreamRewriter struct {
 | 
			
		||||
	//Our source stream
 | 
			
		||||
	tokens						TokenStream
 | 
			
		||||
	// You may have multiple, named streams of rewrite operations.
 | 
			
		||||
	//  I'm calling these things "programs."
 | 
			
		||||
	//  Maps String (name) → rewrite (List)
 | 
			
		||||
	programs					map[string][]RewriteOperation
 | 
			
		||||
	last_rewrite_token_indexes	map[string]int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
 | 
			
		||||
	return &TokenStreamRewriter{
 | 
			
		||||
		tokens:						tokens,
 | 
			
		||||
		programs:					map[string][]RewriteOperation{
 | 
			
		||||
			Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
 | 
			
		||||
		},
 | 
			
		||||
		last_rewrite_token_indexes:	map[string]int{},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
 | 
			
		||||
	return tsr.tokens
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//  Rollback the instruction stream for a program so that
 | 
			
		||||
//  the indicated instruction (via instructionIndex) is no
 | 
			
		||||
//  longer in the stream. UNTESTED!
 | 
			
		||||
func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
 | 
			
		||||
	is, ok  := tsr.programs[program_name]
 | 
			
		||||
	if ok{
 | 
			
		||||
		tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
 | 
			
		||||
	tsr.Rollback(Default_Program_Name, instruction_index)
 | 
			
		||||
}
 | 
			
		||||
//Reset the program so that no instructions exist
 | 
			
		||||
func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
 | 
			
		||||
	tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
 | 
			
		||||
	tsr.DeleteProgram(Default_Program_Name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
 | 
			
		||||
	// to insert after, just insert before next index (even if past end)
 | 
			
		||||
	var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
 | 
			
		||||
	rewrites := tsr.GetProgram(program_name)
 | 
			
		||||
	op.SetInstructionIndex(len(rewrites))
 | 
			
		||||
	tsr.AddToProgram(program_name, op)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
 | 
			
		||||
	tsr.InsertAfter(Default_Program_Name, index, text)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
 | 
			
		||||
	tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
 | 
			
		||||
	var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
 | 
			
		||||
	rewrites := tsr.GetProgram(program_name)
 | 
			
		||||
	op.SetInstructionIndex(len(rewrites))
 | 
			
		||||
	tsr.AddToProgram(program_name, op)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
 | 
			
		||||
	tsr.InsertBefore(Default_Program_Name, index, text)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
 | 
			
		||||
	tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
 | 
			
		||||
	if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
 | 
			
		||||
		panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
 | 
			
		||||
			from, to, tsr.tokens.Size()))
 | 
			
		||||
	}
 | 
			
		||||
	var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
 | 
			
		||||
	rewrites := tsr.GetProgram(program_name)
 | 
			
		||||
	op.SetInstructionIndex(len(rewrites))
 | 
			
		||||
	tsr.AddToProgram(program_name, op)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string)  {
 | 
			
		||||
	tsr.Replace(Default_Program_Name, from, to, text)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
 | 
			
		||||
	tsr.ReplaceDefault(index, index, text)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
 | 
			
		||||
	tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
 | 
			
		||||
	tsr.ReplaceToken(Default_Program_Name, from, to, text)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
 | 
			
		||||
	tsr.ReplaceTokenDefault(index, index, text)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
 | 
			
		||||
	tsr.Replace(program_name, from, to, "" )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
 | 
			
		||||
	tsr.Delete(Default_Program_Name, from, to)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
 | 
			
		||||
	tsr.DeleteDefault(index,index)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token)  {
 | 
			
		||||
	tsr.ReplaceToken(program_name, from, to, "")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
 | 
			
		||||
	tsr.DeleteToken(Default_Program_Name, from, to)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int  {
 | 
			
		||||
	i, ok := tsr.last_rewrite_token_indexes[program_name]
 | 
			
		||||
	if !ok{
 | 
			
		||||
		return -1
 | 
			
		||||
	}
 | 
			
		||||
	return i
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
 | 
			
		||||
	return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
 | 
			
		||||
	tsr.last_rewrite_token_indexes[program_name] = i
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
 | 
			
		||||
	is := make([]RewriteOperation, 0, Program_Init_Size)
 | 
			
		||||
	tsr.programs[name] = is
 | 
			
		||||
	return is
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
 | 
			
		||||
	is := tsr.GetProgram(name)
 | 
			
		||||
	is = append(is, op)
 | 
			
		||||
	tsr.programs[name] = is
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation  {
 | 
			
		||||
	is, ok := tsr.programs[name]
 | 
			
		||||
	if !ok{
 | 
			
		||||
		is = tsr.InitializeProgram(name)
 | 
			
		||||
	}
 | 
			
		||||
	return is
 | 
			
		||||
}
 | 
			
		||||
//  Return the text from the original tokens altered per the
 | 
			
		||||
//  instructions given to this rewriter.
 | 
			
		||||
func (tsr *TokenStreamRewriter)GetTextDefault() string{
 | 
			
		||||
	return tsr.GetText(
 | 
			
		||||
		Default_Program_Name,
 | 
			
		||||
		NewInterval(0, tsr.tokens.Size()-1))
 | 
			
		||||
}
 | 
			
		||||
//  Return the text from the original tokens altered per the
 | 
			
		||||
//  instructions given to this rewriter.
 | 
			
		||||
func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string  {
 | 
			
		||||
	rewrites := tsr.programs[program_name]
 | 
			
		||||
	start := interval.Start
 | 
			
		||||
	stop  := interval.Stop
 | 
			
		||||
	// ensure start/end are in range
 | 
			
		||||
	stop = min(stop, tsr.tokens.Size()-1)
 | 
			
		||||
	start = max(start,0)
 | 
			
		||||
	if rewrites == nil || len(rewrites) == 0{
 | 
			
		||||
		return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
 | 
			
		||||
	}
 | 
			
		||||
	buf := bytes.Buffer{}
 | 
			
		||||
	// First, optimize instruction stream
 | 
			
		||||
	indexToOp := reduceToSingleOperationPerIndex(rewrites)
 | 
			
		||||
	// Walk buffer, executing instructions and emitting tokens
 | 
			
		||||
	for i:=start; i<=stop && i<tsr.tokens.Size();{
 | 
			
		||||
		op := indexToOp[i]
 | 
			
		||||
		delete(indexToOp, i)// remove so any left have index size-1
 | 
			
		||||
		t := tsr.tokens.Get(i)
 | 
			
		||||
		if op == nil{
 | 
			
		||||
			// no operation at that index, just dump token
 | 
			
		||||
			if t.GetTokenType() != TokenEOF {buf.WriteString(t.GetText())}
 | 
			
		||||
			i++ // move to next token
 | 
			
		||||
		}else {
 | 
			
		||||
			i = op.Execute(&buf)// execute operation and skip
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// include stuff after end if it's last index in buffer
 | 
			
		||||
	// So, if they did an insertAfter(lastValidIndex, "foo"), include
 | 
			
		||||
	// foo if end==lastValidIndex.
 | 
			
		||||
	if stop == tsr.tokens.Size()-1{
 | 
			
		||||
		// Scan any remaining operations after last token
 | 
			
		||||
		// should be included (they will be inserts).
 | 
			
		||||
		for _, op := range indexToOp{
 | 
			
		||||
			if op.GetIndex() >= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return buf.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//  We need to combine operations and report invalid operations (like
 | 
			
		||||
//  overlapping replaces that are not completed nested). Inserts to
 | 
			
		||||
//  same index need to be combined etc...  Here are the cases:
 | 
			
		||||
//
 | 
			
		||||
//  I.i.u I.j.v								leave alone, nonoverlapping
 | 
			
		||||
//  I.i.u I.i.v								combine: Iivu
 | 
			
		||||
//
 | 
			
		||||
//  R.i-j.u R.x-y.v	| i-j in x-y			delete first R
 | 
			
		||||
//  R.i-j.u R.i-j.v							delete first R
 | 
			
		||||
//  R.i-j.u R.x-y.v	| x-y in i-j			ERROR
 | 
			
		||||
//  R.i-j.u R.x-y.v	| boundaries overlap	ERROR
 | 
			
		||||
//
 | 
			
		||||
//  Delete special case of replace (text==null):
 | 
			
		||||
//  D.i-j.u D.x-y.v	| boundaries overlap	combine to max(min)..max(right)
 | 
			
		||||
//
 | 
			
		||||
//  I.i.u R.x-y.v | i in (x+1)-y			delete I (since insert before
 | 
			
		||||
//											we're not deleting i)
 | 
			
		||||
//  I.i.u R.x-y.v | i not in (x+1)-y		leave alone, nonoverlapping
 | 
			
		||||
//  R.x-y.v I.i.u | i in x-y				ERROR
 | 
			
		||||
//  R.x-y.v I.x.u 							R.x-y.uv (combine, delete I)
 | 
			
		||||
//  R.x-y.v I.i.u | i not in x-y			leave alone, nonoverlapping
 | 
			
		||||
//
 | 
			
		||||
//  I.i.u = insert u before op @ index i
 | 
			
		||||
//  R.x-y.u = replace x-y indexed tokens with u
 | 
			
		||||
//
 | 
			
		||||
//  First we need to examine replaces. For any replace op:
 | 
			
		||||
//
 | 
			
		||||
// 		1. wipe out any insertions before op within that range.
 | 
			
		||||
//		2. Drop any replace op before that is contained completely within
 | 
			
		||||
//	 	that range.
 | 
			
		||||
//		3. Throw exception upon boundary overlap with any previous replace.
 | 
			
		||||
//
 | 
			
		||||
//  	Then we can deal with inserts:
 | 
			
		||||
//
 | 
			
		||||
// 		1. for any inserts to same index, combine even if not adjacent.
 | 
			
		||||
// 		2. for any prior replace with same left boundary, combine this
 | 
			
		||||
//	 	insert with replace and delete this replace.
 | 
			
		||||
// 		3. throw exception if index in same range as previous replace
 | 
			
		||||
//
 | 
			
		||||
//  Don't actually delete; make op null in list. Easier to walk list.
 | 
			
		||||
//  Later we can throw as we add to index → op map.
 | 
			
		||||
//
 | 
			
		||||
//  Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
 | 
			
		||||
//  inserted stuff would be before the replace range. But, if you
 | 
			
		||||
//  add tokens in front of a method body '{' and then delete the method
 | 
			
		||||
//  body, I think the stuff before the '{' you added should disappear too.
 | 
			
		||||
//
 | 
			
		||||
//  Return a map from token index to operation.
 | 
			
		||||
//
 | 
			
		||||
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
 | 
			
		||||
	// WALK REPLACES
 | 
			
		||||
	for i:=0; i < len(rewrites); i++{
 | 
			
		||||
		op := rewrites[i]
 | 
			
		||||
		if op == nil{continue}
 | 
			
		||||
		rop, ok := op.(*ReplaceOp)
 | 
			
		||||
		if !ok{continue}
 | 
			
		||||
		// Wipe prior inserts within range
 | 
			
		||||
		for j:=0; j<i && j < len(rewrites); j++{
 | 
			
		||||
			if iop, ok := rewrites[j].(*InsertBeforeOp);ok{
 | 
			
		||||
				if iop.index == rop.index{
 | 
			
		||||
					// E.g., insert before 2, delete 2..2; update replace
 | 
			
		||||
					// text to include insert before, kill insert
 | 
			
		||||
					rewrites[iop.instruction_index] = nil
 | 
			
		||||
					if rop.text != ""{
 | 
			
		||||
						rop.text = iop.text + rop.text
 | 
			
		||||
					}else{
 | 
			
		||||
						rop.text = iop.text
 | 
			
		||||
					}
 | 
			
		||||
				}else if iop.index > rop.index && iop.index <=rop.LastIndex{
 | 
			
		||||
					// delete insert as it's a no-op.
 | 
			
		||||
					rewrites[iop.instruction_index] = nil
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		// Drop any prior replaces contained within
 | 
			
		||||
		for j:=0; j<i && j < len(rewrites); j++{
 | 
			
		||||
			if prevop, ok := rewrites[j].(*ReplaceOp);ok{
 | 
			
		||||
				if prevop.index>=rop.index && prevop.LastIndex <= rop.LastIndex{
 | 
			
		||||
					// delete replace as it's a no-op.
 | 
			
		||||
					rewrites[prevop.instruction_index] = nil
 | 
			
		||||
					continue
 | 
			
		||||
				}
 | 
			
		||||
				// throw exception unless disjoint or identical
 | 
			
		||||
				disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
 | 
			
		||||
				// Delete special case of replace (text==null):
 | 
			
		||||
				// D.i-j.u D.x-y.v	| boundaries overlap	combine to max(min)..max(right)
 | 
			
		||||
				if prevop.text == "" && rop.text == "" && !disjoint{
 | 
			
		||||
					rewrites[prevop.instruction_index] = nil
 | 
			
		||||
					rop.index = min(prevop.index, rop.index)
 | 
			
		||||
					rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
 | 
			
		||||
					println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
 | 
			
		||||
				}else if !disjoint{
 | 
			
		||||
					panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// WALK INSERTS
 | 
			
		||||
	for i:=0; i < len(rewrites); i++ {
 | 
			
		||||
		op := rewrites[i]
 | 
			
		||||
		if op == nil{continue}
 | 
			
		||||
		//hack to replicate inheritance in composition
 | 
			
		||||
		_, iok := rewrites[i].(*InsertBeforeOp)
 | 
			
		||||
		_, aok := rewrites[i].(*InsertAfterOp)
 | 
			
		||||
		if !iok && !aok{continue}
 | 
			
		||||
		iop := rewrites[i]
 | 
			
		||||
		// combine current insert with prior if any at same index
 | 
			
		||||
		// deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
 | 
			
		||||
		for j:=0; j<i && j < len(rewrites); j++{
 | 
			
		||||
			if nextIop, ok := rewrites[j].(*InsertAfterOp); ok{
 | 
			
		||||
				if nextIop.index == iop.GetIndex(){
 | 
			
		||||
					iop.SetText(nextIop.text + iop.GetText())
 | 
			
		||||
					rewrites[j] = nil
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok{
 | 
			
		||||
				if prevIop.index == iop.GetIndex(){
 | 
			
		||||
					iop.SetText(iop.GetText() + prevIop.text)
 | 
			
		||||
					rewrites[prevIop.instruction_index] = nil
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		// look for replaces where iop.index is in range; error
 | 
			
		||||
		for j:=0; j<i && j < len(rewrites); j++{
 | 
			
		||||
			if rop,ok := rewrites[j].(*ReplaceOp); ok{
 | 
			
		||||
				if iop.GetIndex() == rop.index{
 | 
			
		||||
					rop.text = iop.GetText() + rop.text
 | 
			
		||||
					rewrites[i] = nil
 | 
			
		||||
					continue
 | 
			
		||||
				}
 | 
			
		||||
				if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex{
 | 
			
		||||
					panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	m := map[int]RewriteOperation{}
 | 
			
		||||
	for i:=0; i < len(rewrites); i++{
 | 
			
		||||
		op := rewrites[i]
 | 
			
		||||
		if op == nil {continue}
 | 
			
		||||
		if _, ok := m[op.GetIndex()]; ok{
 | 
			
		||||
			panic("should only be one op per index")
 | 
			
		||||
		}
 | 
			
		||||
		m[op.GetIndex()] = op
 | 
			
		||||
	}
 | 
			
		||||
	return m
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
	Quick fixing Go lack of overloads
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
func max(a,b int)int{
 | 
			
		||||
	if a>b{
 | 
			
		||||
		return a
 | 
			
		||||
	}else {
 | 
			
		||||
		return b
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
func min(a,b int)int{
 | 
			
		||||
	if a<b{
 | 
			
		||||
		return a
 | 
			
		||||
	}else {
 | 
			
		||||
		return b
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										32
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,32 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import "fmt"
 | 
			
		||||
 | 
			
		||||
type TraceListener struct {
 | 
			
		||||
	parser *BaseParser
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewTraceListener(parser *BaseParser) *TraceListener {
 | 
			
		||||
	tl := new(TraceListener)
 | 
			
		||||
	tl.parser = parser
 | 
			
		||||
	return tl
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TraceListener) VisitErrorNode(_ ErrorNode) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) {
 | 
			
		||||
	fmt.Println("enter   " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TraceListener) VisitTerminal(node TerminalNode) {
 | 
			
		||||
	fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()])
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) {
 | 
			
		||||
	fmt.Println("exit    " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										421
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										421
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,421 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
//  atom, set, epsilon, action, predicate, rule transitions.
 | 
			
		||||
//
 | 
			
		||||
//  <p>This is a one way link.  It emanates from a state (usually via a list of
 | 
			
		||||
//  transitions) and has a target state.</p>
 | 
			
		||||
//
 | 
			
		||||
//  <p>Since we never have to change the ATN transitions once we construct it,
 | 
			
		||||
//  the states. We'll use the term Edge for the DFA to distinguish them from
 | 
			
		||||
//  ATN transitions.</p>
 | 
			
		||||
 | 
			
		||||
type Transition interface {
 | 
			
		||||
	getTarget() ATNState
 | 
			
		||||
	setTarget(ATNState)
 | 
			
		||||
	getIsEpsilon() bool
 | 
			
		||||
	getLabel() *IntervalSet
 | 
			
		||||
	getSerializationType() int
 | 
			
		||||
	Matches(int, int, int) bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseTransition struct {
 | 
			
		||||
	target            ATNState
 | 
			
		||||
	isEpsilon         bool
 | 
			
		||||
	label             int
 | 
			
		||||
	intervalSet       *IntervalSet
 | 
			
		||||
	serializationType int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBaseTransition(target ATNState) *BaseTransition {
 | 
			
		||||
 | 
			
		||||
	if target == nil {
 | 
			
		||||
		panic("target cannot be nil.")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	t := new(BaseTransition)
 | 
			
		||||
 | 
			
		||||
	t.target = target
 | 
			
		||||
	// Are we epsilon, action, sempred?
 | 
			
		||||
	t.isEpsilon = false
 | 
			
		||||
	t.intervalSet = nil
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *BaseTransition) getTarget() ATNState {
 | 
			
		||||
	return t.target
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *BaseTransition) setTarget(s ATNState) {
 | 
			
		||||
	t.target = s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *BaseTransition) getIsEpsilon() bool {
 | 
			
		||||
	return t.isEpsilon
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *BaseTransition) getLabel() *IntervalSet {
 | 
			
		||||
	return t.intervalSet
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *BaseTransition) getSerializationType() int {
 | 
			
		||||
	return t.serializationType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	panic("Not implemented")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	TransitionEPSILON    = 1
 | 
			
		||||
	TransitionRANGE      = 2
 | 
			
		||||
	TransitionRULE       = 3
 | 
			
		||||
	TransitionPREDICATE  = 4 // e.g., {isType(input.LT(1))}?
 | 
			
		||||
	TransitionATOM       = 5
 | 
			
		||||
	TransitionACTION     = 6
 | 
			
		||||
	TransitionSET        = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
 | 
			
		||||
	TransitionNOTSET     = 8
 | 
			
		||||
	TransitionWILDCARD   = 9
 | 
			
		||||
	TransitionPRECEDENCE = 10
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var TransitionserializationNames = []string{
 | 
			
		||||
	"INVALID",
 | 
			
		||||
	"EPSILON",
 | 
			
		||||
	"RANGE",
 | 
			
		||||
	"RULE",
 | 
			
		||||
	"PREDICATE",
 | 
			
		||||
	"ATOM",
 | 
			
		||||
	"ACTION",
 | 
			
		||||
	"SET",
 | 
			
		||||
	"NOT_SET",
 | 
			
		||||
	"WILDCARD",
 | 
			
		||||
	"PRECEDENCE",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//var TransitionserializationTypes struct {
 | 
			
		||||
//	EpsilonTransition int
 | 
			
		||||
//	RangeTransition int
 | 
			
		||||
//	RuleTransition int
 | 
			
		||||
//	PredicateTransition int
 | 
			
		||||
//	AtomTransition int
 | 
			
		||||
//	ActionTransition int
 | 
			
		||||
//	SetTransition int
 | 
			
		||||
//	NotSetTransition int
 | 
			
		||||
//	WildcardTransition int
 | 
			
		||||
//	PrecedencePredicateTransition int
 | 
			
		||||
//}{
 | 
			
		||||
//	TransitionEPSILON,
 | 
			
		||||
//	TransitionRANGE,
 | 
			
		||||
//	TransitionRULE,
 | 
			
		||||
//	TransitionPREDICATE,
 | 
			
		||||
//	TransitionATOM,
 | 
			
		||||
//	TransitionACTION,
 | 
			
		||||
//	TransitionSET,
 | 
			
		||||
//	TransitionNOTSET,
 | 
			
		||||
//	TransitionWILDCARD,
 | 
			
		||||
//	TransitionPRECEDENCE
 | 
			
		||||
//}
 | 
			
		||||
 | 
			
		||||
// TODO: make all transitions sets? no, should remove set edges
 | 
			
		||||
type AtomTransition struct {
 | 
			
		||||
	*BaseTransition
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(AtomTransition)
 | 
			
		||||
	t.BaseTransition = NewBaseTransition(target)
 | 
			
		||||
 | 
			
		||||
	t.label = intervalSet // The token type or character value or, signifies special intervalSet.
 | 
			
		||||
	t.intervalSet = t.makeLabel()
 | 
			
		||||
	t.serializationType = TransitionATOM
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *AtomTransition) makeLabel() *IntervalSet {
 | 
			
		||||
	s := NewIntervalSet()
 | 
			
		||||
	s.addOne(t.label)
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	return t.label == symbol
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *AtomTransition) String() string {
 | 
			
		||||
	return strconv.Itoa(t.label)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RuleTransition struct {
 | 
			
		||||
	*BaseTransition
 | 
			
		||||
 | 
			
		||||
	followState           ATNState
 | 
			
		||||
	ruleIndex, precedence int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(RuleTransition)
 | 
			
		||||
	t.BaseTransition = NewBaseTransition(ruleStart)
 | 
			
		||||
 | 
			
		||||
	t.ruleIndex = ruleIndex
 | 
			
		||||
	t.precedence = precedence
 | 
			
		||||
	t.followState = followState
 | 
			
		||||
	t.serializationType = TransitionRULE
 | 
			
		||||
	t.isEpsilon = true
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type EpsilonTransition struct {
 | 
			
		||||
	*BaseTransition
 | 
			
		||||
 | 
			
		||||
	outermostPrecedenceReturn int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(EpsilonTransition)
 | 
			
		||||
	t.BaseTransition = NewBaseTransition(target)
 | 
			
		||||
 | 
			
		||||
	t.serializationType = TransitionEPSILON
 | 
			
		||||
	t.isEpsilon = true
 | 
			
		||||
	t.outermostPrecedenceReturn = outermostPrecedenceReturn
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *EpsilonTransition) String() string {
 | 
			
		||||
	return "epsilon"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RangeTransition struct {
 | 
			
		||||
	*BaseTransition
 | 
			
		||||
 | 
			
		||||
	start, stop int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(RangeTransition)
 | 
			
		||||
	t.BaseTransition = NewBaseTransition(target)
 | 
			
		||||
 | 
			
		||||
	t.serializationType = TransitionRANGE
 | 
			
		||||
	t.start = start
 | 
			
		||||
	t.stop = stop
 | 
			
		||||
	t.intervalSet = t.makeLabel()
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *RangeTransition) makeLabel() *IntervalSet {
 | 
			
		||||
	s := NewIntervalSet()
 | 
			
		||||
	s.addRange(t.start, t.stop)
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	return symbol >= t.start && symbol <= t.stop
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *RangeTransition) String() string {
 | 
			
		||||
	return "'" + string(t.start) + "'..'" + string(t.stop) + "'"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type AbstractPredicateTransition interface {
 | 
			
		||||
	Transition
 | 
			
		||||
	IAbstractPredicateTransitionFoo()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseAbstractPredicateTransition struct {
 | 
			
		||||
	*BaseTransition
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(BaseAbstractPredicateTransition)
 | 
			
		||||
	t.BaseTransition = NewBaseTransition(target)
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
 | 
			
		||||
 | 
			
		||||
type PredicateTransition struct {
 | 
			
		||||
	*BaseAbstractPredicateTransition
 | 
			
		||||
 | 
			
		||||
	isCtxDependent       bool
 | 
			
		||||
	ruleIndex, predIndex int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(PredicateTransition)
 | 
			
		||||
	t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
 | 
			
		||||
 | 
			
		||||
	t.serializationType = TransitionPREDICATE
 | 
			
		||||
	t.ruleIndex = ruleIndex
 | 
			
		||||
	t.predIndex = predIndex
 | 
			
		||||
	t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
 | 
			
		||||
	t.isEpsilon = true
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *PredicateTransition) getPredicate() *Predicate {
 | 
			
		||||
	return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *PredicateTransition) String() string {
 | 
			
		||||
	return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ActionTransition struct {
 | 
			
		||||
	*BaseTransition
 | 
			
		||||
 | 
			
		||||
	isCtxDependent                    bool
 | 
			
		||||
	ruleIndex, actionIndex, predIndex int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(ActionTransition)
 | 
			
		||||
	t.BaseTransition = NewBaseTransition(target)
 | 
			
		||||
 | 
			
		||||
	t.serializationType = TransitionACTION
 | 
			
		||||
	t.ruleIndex = ruleIndex
 | 
			
		||||
	t.actionIndex = actionIndex
 | 
			
		||||
	t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
 | 
			
		||||
	t.isEpsilon = true
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *ActionTransition) String() string {
 | 
			
		||||
	return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type SetTransition struct {
 | 
			
		||||
	*BaseTransition
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(SetTransition)
 | 
			
		||||
	t.BaseTransition = NewBaseTransition(target)
 | 
			
		||||
 | 
			
		||||
	t.serializationType = TransitionSET
 | 
			
		||||
	if set != nil {
 | 
			
		||||
		t.intervalSet = set
 | 
			
		||||
	} else {
 | 
			
		||||
		t.intervalSet = NewIntervalSet()
 | 
			
		||||
		t.intervalSet.addOne(TokenInvalidType)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	return t.intervalSet.contains(symbol)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *SetTransition) String() string {
 | 
			
		||||
	return t.intervalSet.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type NotSetTransition struct {
 | 
			
		||||
	*SetTransition
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(NotSetTransition)
 | 
			
		||||
 | 
			
		||||
	t.SetTransition = NewSetTransition(target, set)
 | 
			
		||||
 | 
			
		||||
	t.serializationType = TransitionNOTSET
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *NotSetTransition) String() string {
 | 
			
		||||
	return "~" + t.intervalSet.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type WildcardTransition struct {
 | 
			
		||||
	*BaseTransition
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewWildcardTransition(target ATNState) *WildcardTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(WildcardTransition)
 | 
			
		||||
	t.BaseTransition = NewBaseTransition(target)
 | 
			
		||||
 | 
			
		||||
	t.serializationType = TransitionWILDCARD
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *WildcardTransition) String() string {
 | 
			
		||||
	return "."
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PrecedencePredicateTransition struct {
 | 
			
		||||
	*BaseAbstractPredicateTransition
 | 
			
		||||
 | 
			
		||||
	precedence int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
 | 
			
		||||
 | 
			
		||||
	t := new(PrecedencePredicateTransition)
 | 
			
		||||
	t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
 | 
			
		||||
 | 
			
		||||
	t.serializationType = TransitionPRECEDENCE
 | 
			
		||||
	t.precedence = precedence
 | 
			
		||||
	t.isEpsilon = true
 | 
			
		||||
 | 
			
		||||
	return t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
 | 
			
		||||
	return NewPrecedencePredicate(t.precedence)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *PrecedencePredicateTransition) String() string {
 | 
			
		||||
	return fmt.Sprint(t.precedence) + " >= _p"
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										256
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										256
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,256 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
// The basic notion of a tree has a parent, a payload, and a list of children.
 | 
			
		||||
//  It is the most abstract interface for all the trees used by ANTLR.
 | 
			
		||||
///
 | 
			
		||||
 | 
			
		||||
var TreeInvalidInterval = NewInterval(-1, -2)
 | 
			
		||||
 | 
			
		||||
type Tree interface {
 | 
			
		||||
	GetParent() Tree
 | 
			
		||||
	SetParent(Tree)
 | 
			
		||||
	GetPayload() interface{}
 | 
			
		||||
	GetChild(i int) Tree
 | 
			
		||||
	GetChildCount() int
 | 
			
		||||
	GetChildren() []Tree
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type SyntaxTree interface {
 | 
			
		||||
	Tree
 | 
			
		||||
 | 
			
		||||
	GetSourceInterval() *Interval
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ParseTree interface {
 | 
			
		||||
	SyntaxTree
 | 
			
		||||
 | 
			
		||||
	Accept(Visitor ParseTreeVisitor) interface{}
 | 
			
		||||
	GetText() string
 | 
			
		||||
 | 
			
		||||
	ToStringTree([]string, Recognizer) string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RuleNode interface {
 | 
			
		||||
	ParseTree
 | 
			
		||||
 | 
			
		||||
	GetRuleContext() RuleContext
 | 
			
		||||
	GetBaseRuleContext() *BaseRuleContext
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type TerminalNode interface {
 | 
			
		||||
	ParseTree
 | 
			
		||||
 | 
			
		||||
	GetSymbol() Token
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ErrorNode interface {
 | 
			
		||||
	TerminalNode
 | 
			
		||||
 | 
			
		||||
	errorNode()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ParseTreeVisitor interface {
 | 
			
		||||
	Visit(tree ParseTree) interface{}
 | 
			
		||||
	VisitChildren(node RuleNode) interface{}
 | 
			
		||||
	VisitTerminal(node TerminalNode) interface{}
 | 
			
		||||
	VisitErrorNode(node ErrorNode) interface{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseParseTreeVisitor struct{}
 | 
			
		||||
 | 
			
		||||
var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
 | 
			
		||||
 | 
			
		||||
func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{}            { return nil }
 | 
			
		||||
func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{}     { return nil }
 | 
			
		||||
func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
 | 
			
		||||
func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{}   { return nil }
 | 
			
		||||
 | 
			
		||||
// TODO
 | 
			
		||||
//func (this ParseTreeVisitor) Visit(ctx) {
 | 
			
		||||
//	if (Utils.isArray(ctx)) {
 | 
			
		||||
//		self := this
 | 
			
		||||
//		return ctx.map(function(child) { return VisitAtom(self, child)})
 | 
			
		||||
//	} else {
 | 
			
		||||
//		return VisitAtom(this, ctx)
 | 
			
		||||
//	}
 | 
			
		||||
//}
 | 
			
		||||
//
 | 
			
		||||
//func VisitAtom(Visitor, ctx) {
 | 
			
		||||
//	if (ctx.parser == nil) { //is terminal
 | 
			
		||||
//		return
 | 
			
		||||
//	}
 | 
			
		||||
//
 | 
			
		||||
//	name := ctx.parser.ruleNames[ctx.ruleIndex]
 | 
			
		||||
//	funcName := "Visit" + Utils.titleCase(name)
 | 
			
		||||
//
 | 
			
		||||
//	return Visitor[funcName](ctx)
 | 
			
		||||
//}
 | 
			
		||||
 | 
			
		||||
type ParseTreeListener interface {
 | 
			
		||||
	VisitTerminal(node TerminalNode)
 | 
			
		||||
	VisitErrorNode(node ErrorNode)
 | 
			
		||||
	EnterEveryRule(ctx ParserRuleContext)
 | 
			
		||||
	ExitEveryRule(ctx ParserRuleContext)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BaseParseTreeListener struct{}
 | 
			
		||||
 | 
			
		||||
var _ ParseTreeListener = &BaseParseTreeListener{}
 | 
			
		||||
 | 
			
		||||
func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode)      {}
 | 
			
		||||
func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode)        {}
 | 
			
		||||
func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
 | 
			
		||||
func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext)  {}
 | 
			
		||||
 | 
			
		||||
type TerminalNodeImpl struct {
 | 
			
		||||
	parentCtx RuleContext
 | 
			
		||||
 | 
			
		||||
	symbol Token
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ TerminalNode = &TerminalNodeImpl{}
 | 
			
		||||
 | 
			
		||||
func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
 | 
			
		||||
	tn := new(TerminalNodeImpl)
 | 
			
		||||
 | 
			
		||||
	tn.parentCtx = nil
 | 
			
		||||
	tn.symbol = symbol
 | 
			
		||||
 | 
			
		||||
	return tn
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) GetChild(i int) Tree {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) GetChildren() []Tree {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
 | 
			
		||||
	panic("Cannot set children on terminal node")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) GetSymbol() Token {
 | 
			
		||||
	return t.symbol
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) GetParent() Tree {
 | 
			
		||||
	return t.parentCtx
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) SetParent(tree Tree) {
 | 
			
		||||
	t.parentCtx = tree.(RuleContext)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) GetPayload() interface{} {
 | 
			
		||||
	return t.symbol
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
 | 
			
		||||
	if t.symbol == nil {
 | 
			
		||||
		return TreeInvalidInterval
 | 
			
		||||
	}
 | 
			
		||||
	tokenIndex := t.symbol.GetTokenIndex()
 | 
			
		||||
	return NewInterval(tokenIndex, tokenIndex)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) GetChildCount() int {
 | 
			
		||||
	return 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
 | 
			
		||||
	return v.VisitTerminal(t)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) GetText() string {
 | 
			
		||||
	return t.symbol.GetText()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) String() string {
 | 
			
		||||
	if t.symbol.GetTokenType() == TokenEOF {
 | 
			
		||||
		return "<EOF>"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return t.symbol.GetText()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
 | 
			
		||||
	return t.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Represents a token that was consumed during reSynchronization
 | 
			
		||||
// rather than during a valid Match operation. For example,
 | 
			
		||||
// we will create this kind of a node during single token insertion
 | 
			
		||||
// and deletion as well as during "consume until error recovery set"
 | 
			
		||||
// upon no viable alternative exceptions.
 | 
			
		||||
 | 
			
		||||
type ErrorNodeImpl struct {
 | 
			
		||||
	*TerminalNodeImpl
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ ErrorNode = &ErrorNodeImpl{}
 | 
			
		||||
 | 
			
		||||
func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
 | 
			
		||||
	en := new(ErrorNodeImpl)
 | 
			
		||||
	en.TerminalNodeImpl = NewTerminalNodeImpl(token)
 | 
			
		||||
	return en
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *ErrorNodeImpl) errorNode() {}
 | 
			
		||||
 | 
			
		||||
func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
 | 
			
		||||
	return v.VisitErrorNode(e)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ParseTreeWalker struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewParseTreeWalker() *ParseTreeWalker {
 | 
			
		||||
	return new(ParseTreeWalker)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Performs a walk on the given parse tree starting at the root and going down recursively
 | 
			
		||||
// with depth-first search. On each node, EnterRule is called before
 | 
			
		||||
// recursively walking down into child nodes, then
 | 
			
		||||
// ExitRule is called after the recursive call to wind up.
 | 
			
		||||
func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
 | 
			
		||||
	switch tt := t.(type) {
 | 
			
		||||
	case ErrorNode:
 | 
			
		||||
		listener.VisitErrorNode(tt)
 | 
			
		||||
	case TerminalNode:
 | 
			
		||||
		listener.VisitTerminal(tt)
 | 
			
		||||
	default:
 | 
			
		||||
		p.EnterRule(listener, t.(RuleNode))
 | 
			
		||||
		for i := 0; i < t.GetChildCount(); i++ {
 | 
			
		||||
			child := t.GetChild(i)
 | 
			
		||||
			p.Walk(listener, child)
 | 
			
		||||
		}
 | 
			
		||||
		p.ExitRule(listener, t.(RuleNode))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
 | 
			
		||||
// then by triggering the event specific to the given parse tree node
 | 
			
		||||
//
 | 
			
		||||
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
 | 
			
		||||
	ctx := r.GetRuleContext().(ParserRuleContext)
 | 
			
		||||
	listener.EnterEveryRule(ctx)
 | 
			
		||||
	ctx.EnterRule(listener)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Exits a grammar rule by first triggering the event specific to the given parse tree node
 | 
			
		||||
// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
 | 
			
		||||
//
 | 
			
		||||
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
 | 
			
		||||
	ctx := r.GetRuleContext().(ParserRuleContext)
 | 
			
		||||
	ctx.ExitRule(listener)
 | 
			
		||||
	listener.ExitEveryRule(ctx)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var ParseTreeWalkerDefault = NewParseTreeWalker()
 | 
			
		||||
							
								
								
									
										137
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										137
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,137 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import "fmt"
 | 
			
		||||
 | 
			
		||||
/** A set of utility routines useful for all kinds of ANTLR trees. */
 | 
			
		||||
 | 
			
		||||
// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
 | 
			
		||||
//  node payloads to get the text for the nodes.  Detect
 | 
			
		||||
//  parse trees and extract data appropriately.
 | 
			
		||||
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
 | 
			
		||||
 | 
			
		||||
	if recog != nil {
 | 
			
		||||
		ruleNames = recog.GetRuleNames()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s := TreesGetNodeText(tree, ruleNames, nil)
 | 
			
		||||
 | 
			
		||||
	s = EscapeWhitespace(s, false)
 | 
			
		||||
	c := tree.GetChildCount()
 | 
			
		||||
	if c == 0 {
 | 
			
		||||
		return s
 | 
			
		||||
	}
 | 
			
		||||
	res := "(" + s + " "
 | 
			
		||||
	if c > 0 {
 | 
			
		||||
		s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
 | 
			
		||||
		res += s
 | 
			
		||||
	}
 | 
			
		||||
	for i := 1; i < c; i++ {
 | 
			
		||||
		s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
 | 
			
		||||
		res += (" " + s)
 | 
			
		||||
	}
 | 
			
		||||
	res += ")"
 | 
			
		||||
	return res
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
 | 
			
		||||
	if recog != nil {
 | 
			
		||||
		ruleNames = recog.GetRuleNames()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ruleNames != nil {
 | 
			
		||||
		switch t2 := t.(type) {
 | 
			
		||||
		case RuleNode:
 | 
			
		||||
			t3 := t2.GetRuleContext()
 | 
			
		||||
			altNumber := t3.GetAltNumber()
 | 
			
		||||
 | 
			
		||||
			if altNumber != ATNInvalidAltNumber {
 | 
			
		||||
				return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
 | 
			
		||||
			}
 | 
			
		||||
			return ruleNames[t3.GetRuleIndex()]
 | 
			
		||||
		case ErrorNode:
 | 
			
		||||
			return fmt.Sprint(t2)
 | 
			
		||||
		case TerminalNode:
 | 
			
		||||
			if t2.GetSymbol() != nil {
 | 
			
		||||
				return t2.GetSymbol().GetText()
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// no recog for rule names
 | 
			
		||||
	payload := t.GetPayload()
 | 
			
		||||
	if p2, ok := payload.(Token); ok {
 | 
			
		||||
		return p2.GetText()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return fmt.Sprint(t.GetPayload())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Return ordered list of all children of this node
 | 
			
		||||
func TreesGetChildren(t Tree) []Tree {
 | 
			
		||||
	list := make([]Tree, 0)
 | 
			
		||||
	for i := 0; i < t.GetChildCount(); i++ {
 | 
			
		||||
		list = append(list, t.GetChild(i))
 | 
			
		||||
	}
 | 
			
		||||
	return list
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Return a list of all ancestors of this node.  The first node of
 | 
			
		||||
//  list is the root and the last is the parent of this node.
 | 
			
		||||
//
 | 
			
		||||
func TreesgetAncestors(t Tree) []Tree {
 | 
			
		||||
	ancestors := make([]Tree, 0)
 | 
			
		||||
	t = t.GetParent()
 | 
			
		||||
	for t != nil {
 | 
			
		||||
		f := []Tree{t}
 | 
			
		||||
		ancestors = append(f, ancestors...)
 | 
			
		||||
		t = t.GetParent()
 | 
			
		||||
	}
 | 
			
		||||
	return ancestors
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
 | 
			
		||||
	return TreesfindAllNodes(t, ttype, true)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
 | 
			
		||||
	return TreesfindAllNodes(t, ruleIndex, false)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
 | 
			
		||||
	nodes := make([]ParseTree, 0)
 | 
			
		||||
	treesFindAllNodes(t, index, findTokens, &nodes)
 | 
			
		||||
	return nodes
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
 | 
			
		||||
	// check this node (the root) first
 | 
			
		||||
 | 
			
		||||
	t2, ok := t.(TerminalNode)
 | 
			
		||||
	t3, ok2 := t.(ParserRuleContext)
 | 
			
		||||
 | 
			
		||||
	if findTokens && ok {
 | 
			
		||||
		if t2.GetSymbol().GetTokenType() == index {
 | 
			
		||||
			*nodes = append(*nodes, t2)
 | 
			
		||||
		}
 | 
			
		||||
	} else if !findTokens && ok2 {
 | 
			
		||||
		if t3.GetRuleIndex() == index {
 | 
			
		||||
			*nodes = append(*nodes, t3)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// check children
 | 
			
		||||
	for i := 0; i < t.GetChildCount(); i++ {
 | 
			
		||||
		treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TreesDescendants(t ParseTree) []ParseTree {
 | 
			
		||||
	nodes := []ParseTree{t}
 | 
			
		||||
	for i := 0; i < t.GetChildCount(); i++ {
 | 
			
		||||
		nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
 | 
			
		||||
	}
 | 
			
		||||
	return nodes
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										417
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										417
									
								
								vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,417 @@
 | 
			
		||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
 | 
			
		||||
// Use of this file is governed by the BSD 3-clause license that
 | 
			
		||||
// can be found in the LICENSE.txt file in the project root.
 | 
			
		||||
 | 
			
		||||
package antlr
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"sort"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func intMin(a, b int) int {
 | 
			
		||||
	if a < b {
 | 
			
		||||
		return a
 | 
			
		||||
	}
 | 
			
		||||
	return b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func intMax(a, b int) int {
 | 
			
		||||
	if a > b {
 | 
			
		||||
		return a
 | 
			
		||||
	}
 | 
			
		||||
	return b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// A simple integer stack
 | 
			
		||||
 | 
			
		||||
type IntStack []int
 | 
			
		||||
 | 
			
		||||
var ErrEmptyStack = errors.New("Stack is empty")
 | 
			
		||||
 | 
			
		||||
func (s *IntStack) Pop() (int, error) {
 | 
			
		||||
	l := len(*s) - 1
 | 
			
		||||
	if l < 0 {
 | 
			
		||||
		return 0, ErrEmptyStack
 | 
			
		||||
	}
 | 
			
		||||
	v := (*s)[l]
 | 
			
		||||
	*s = (*s)[0:l]
 | 
			
		||||
	return v, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *IntStack) Push(e int) {
 | 
			
		||||
	*s = append(*s, e)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Set struct {
 | 
			
		||||
	data             map[int][]interface{}
 | 
			
		||||
	hashcodeFunction func(interface{}) int
 | 
			
		||||
	equalsFunction   func(interface{}, interface{}) bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewSet(
 | 
			
		||||
	hashcodeFunction func(interface{}) int,
 | 
			
		||||
	equalsFunction func(interface{}, interface{}) bool) *Set {
 | 
			
		||||
 | 
			
		||||
	s := new(Set)
 | 
			
		||||
 | 
			
		||||
	s.data = make(map[int][]interface{})
 | 
			
		||||
 | 
			
		||||
	if hashcodeFunction != nil {
 | 
			
		||||
		s.hashcodeFunction = hashcodeFunction
 | 
			
		||||
	} else {
 | 
			
		||||
		s.hashcodeFunction = standardHashFunction
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if equalsFunction == nil {
 | 
			
		||||
		s.equalsFunction = standardEqualsFunction
 | 
			
		||||
	} else {
 | 
			
		||||
		s.equalsFunction = equalsFunction
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func standardEqualsFunction(a interface{}, b interface{}) bool {
 | 
			
		||||
 | 
			
		||||
	ac, oka := a.(comparable)
 | 
			
		||||
	bc, okb := b.(comparable)
 | 
			
		||||
 | 
			
		||||
	if !oka || !okb {
 | 
			
		||||
		panic("Not Comparable")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ac.equals(bc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func standardHashFunction(a interface{}) int {
 | 
			
		||||
	if h, ok := a.(hasher); ok {
 | 
			
		||||
		return h.hash()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	panic("Not Hasher")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type hasher interface {
 | 
			
		||||
	hash() int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Set) length() int {
 | 
			
		||||
	return len(s.data)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Set) add(value interface{}) interface{} {
 | 
			
		||||
 | 
			
		||||
	key := s.hashcodeFunction(value)
 | 
			
		||||
 | 
			
		||||
	values := s.data[key]
 | 
			
		||||
 | 
			
		||||
	if s.data[key] != nil {
 | 
			
		||||
		for i := 0; i < len(values); i++ {
 | 
			
		||||
			if s.equalsFunction(value, values[i]) {
 | 
			
		||||
				return values[i]
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		s.data[key] = append(s.data[key], value)
 | 
			
		||||
		return value
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	v := make([]interface{}, 1, 10)
 | 
			
		||||
	v[0] = value
 | 
			
		||||
	s.data[key] = v
 | 
			
		||||
 | 
			
		||||
	return value
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Set) contains(value interface{}) bool {
 | 
			
		||||
 | 
			
		||||
	key := s.hashcodeFunction(value)
 | 
			
		||||
 | 
			
		||||
	values := s.data[key]
 | 
			
		||||
 | 
			
		||||
	if s.data[key] != nil {
 | 
			
		||||
		for i := 0; i < len(values); i++ {
 | 
			
		||||
			if s.equalsFunction(value, values[i]) {
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Set) values() []interface{} {
 | 
			
		||||
	var l []interface{}
 | 
			
		||||
 | 
			
		||||
	for _, v := range s.data {
 | 
			
		||||
		l = append(l, v...)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return l
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Set) String() string {
 | 
			
		||||
	r := ""
 | 
			
		||||
 | 
			
		||||
	for _, av := range s.data {
 | 
			
		||||
		for _, v := range av {
 | 
			
		||||
			r += fmt.Sprint(v)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return r
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type BitSet struct {
 | 
			
		||||
	data map[int]bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewBitSet() *BitSet {
 | 
			
		||||
	b := new(BitSet)
 | 
			
		||||
	b.data = make(map[int]bool)
 | 
			
		||||
	return b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BitSet) add(value int) {
 | 
			
		||||
	b.data[value] = true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BitSet) clear(index int) {
 | 
			
		||||
	delete(b.data, index)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BitSet) or(set *BitSet) {
 | 
			
		||||
	for k := range set.data {
 | 
			
		||||
		b.add(k)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BitSet) remove(value int) {
 | 
			
		||||
	delete(b.data, value)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BitSet) contains(value int) bool {
 | 
			
		||||
	return b.data[value]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BitSet) values() []int {
 | 
			
		||||
	ks := make([]int, len(b.data))
 | 
			
		||||
	i := 0
 | 
			
		||||
	for k := range b.data {
 | 
			
		||||
		ks[i] = k
 | 
			
		||||
		i++
 | 
			
		||||
	}
 | 
			
		||||
	sort.Ints(ks)
 | 
			
		||||
	return ks
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BitSet) minValue() int {
 | 
			
		||||
	min := 2147483647
 | 
			
		||||
 | 
			
		||||
	for k := range b.data {
 | 
			
		||||
		if k < min {
 | 
			
		||||
			min = k
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return min
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BitSet) equals(other interface{}) bool {
 | 
			
		||||
	otherBitSet, ok := other.(*BitSet)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(b.data) != len(otherBitSet.data) {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for k, v := range b.data {
 | 
			
		||||
		if otherBitSet.data[k] != v {
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BitSet) length() int {
 | 
			
		||||
	return len(b.data)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *BitSet) String() string {
 | 
			
		||||
	vals := b.values()
 | 
			
		||||
	valsS := make([]string, len(vals))
 | 
			
		||||
 | 
			
		||||
	for i, val := range vals {
 | 
			
		||||
		valsS[i] = strconv.Itoa(val)
 | 
			
		||||
	}
 | 
			
		||||
	return "{" + strings.Join(valsS, ", ") + "}"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type AltDict struct {
 | 
			
		||||
	data map[string]interface{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewAltDict() *AltDict {
 | 
			
		||||
	d := new(AltDict)
 | 
			
		||||
	d.data = make(map[string]interface{})
 | 
			
		||||
	return d
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *AltDict) Get(key string) interface{} {
 | 
			
		||||
	key = "k-" + key
 | 
			
		||||
	return a.data[key]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *AltDict) put(key string, value interface{}) {
 | 
			
		||||
	key = "k-" + key
 | 
			
		||||
	a.data[key] = value
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *AltDict) values() []interface{} {
 | 
			
		||||
	vs := make([]interface{}, len(a.data))
 | 
			
		||||
	i := 0
 | 
			
		||||
	for _, v := range a.data {
 | 
			
		||||
		vs[i] = v
 | 
			
		||||
		i++
 | 
			
		||||
	}
 | 
			
		||||
	return vs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type DoubleDict struct {
 | 
			
		||||
	data map[int]map[int]interface{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewDoubleDict() *DoubleDict {
 | 
			
		||||
	dd := new(DoubleDict)
 | 
			
		||||
	dd.data = make(map[int]map[int]interface{})
 | 
			
		||||
	return dd
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DoubleDict) Get(a, b int) interface{} {
 | 
			
		||||
	data := d.data[a]
 | 
			
		||||
 | 
			
		||||
	if data == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return data[b]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DoubleDict) set(a, b int, o interface{}) {
 | 
			
		||||
	data := d.data[a]
 | 
			
		||||
 | 
			
		||||
	if data == nil {
 | 
			
		||||
		data = make(map[int]interface{})
 | 
			
		||||
		d.data[a] = data
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	data[b] = o
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func EscapeWhitespace(s string, escapeSpaces bool) string {
 | 
			
		||||
 | 
			
		||||
	s = strings.Replace(s, "\t", "\\t", -1)
 | 
			
		||||
	s = strings.Replace(s, "\n", "\\n", -1)
 | 
			
		||||
	s = strings.Replace(s, "\r", "\\r", -1)
 | 
			
		||||
	if escapeSpaces {
 | 
			
		||||
		s = strings.Replace(s, " ", "\u00B7", -1)
 | 
			
		||||
	}
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TerminalNodeToStringArray(sa []TerminalNode) []string {
 | 
			
		||||
	st := make([]string, len(sa))
 | 
			
		||||
 | 
			
		||||
	for i, s := range sa {
 | 
			
		||||
		st[i] = fmt.Sprintf("%v", s)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return st
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func PrintArrayJavaStyle(sa []string) string {
 | 
			
		||||
	var buffer bytes.Buffer
 | 
			
		||||
 | 
			
		||||
	buffer.WriteString("[")
 | 
			
		||||
 | 
			
		||||
	for i, s := range sa {
 | 
			
		||||
		buffer.WriteString(s)
 | 
			
		||||
		if i != len(sa)-1 {
 | 
			
		||||
			buffer.WriteString(", ")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	buffer.WriteString("]")
 | 
			
		||||
 | 
			
		||||
	return buffer.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// The following routines were lifted from bits.rotate* available in Go 1.9.
 | 
			
		||||
 | 
			
		||||
const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64
 | 
			
		||||
 | 
			
		||||
// rotateLeft returns the value of x rotated left by (k mod UintSize) bits.
 | 
			
		||||
// To rotate x right by k bits, call RotateLeft(x, -k).
 | 
			
		||||
func rotateLeft(x uint, k int) uint {
 | 
			
		||||
	if uintSize == 32 {
 | 
			
		||||
		return uint(rotateLeft32(uint32(x), k))
 | 
			
		||||
	}
 | 
			
		||||
	return uint(rotateLeft64(uint64(x), k))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// rotateLeft32 returns the value of x rotated left by (k mod 32) bits.
 | 
			
		||||
func rotateLeft32(x uint32, k int) uint32 {
 | 
			
		||||
	const n = 32
 | 
			
		||||
	s := uint(k) & (n - 1)
 | 
			
		||||
	return x<<s | x>>(n-s)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// rotateLeft64 returns the value of x rotated left by (k mod 64) bits.
 | 
			
		||||
func rotateLeft64(x uint64, k int) uint64 {
 | 
			
		||||
	const n = 64
 | 
			
		||||
	s := uint(k) & (n - 1)
 | 
			
		||||
	return x<<s | x>>(n-s)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// murmur hash
 | 
			
		||||
const (
 | 
			
		||||
	c1_32 uint = 0xCC9E2D51
 | 
			
		||||
	c2_32 uint = 0x1B873593
 | 
			
		||||
	n1_32 uint = 0xE6546B64
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func murmurInit(seed int) int {
 | 
			
		||||
	return seed
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func murmurUpdate(h1 int, k1 int) int {
 | 
			
		||||
	var k1u uint
 | 
			
		||||
	k1u = uint(k1) * c1_32
 | 
			
		||||
	k1u = rotateLeft(k1u, 15)
 | 
			
		||||
	k1u *= c2_32
 | 
			
		||||
 | 
			
		||||
	var h1u = uint(h1) ^ k1u
 | 
			
		||||
	k1u = rotateLeft(k1u, 13)
 | 
			
		||||
	h1u = h1u*5 + 0xe6546b64
 | 
			
		||||
	return int(h1u)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func murmurFinish(h1 int, numberOfWords int) int {
 | 
			
		||||
	var h1u uint = uint(h1)
 | 
			
		||||
	h1u ^= uint(numberOfWords * 4)
 | 
			
		||||
	h1u ^= h1u >> 16
 | 
			
		||||
	h1u *= uint(0x85ebca6b)
 | 
			
		||||
	h1u ^= h1u >> 13
 | 
			
		||||
	h1u *= 0xc2b2ae35
 | 
			
		||||
	h1u ^= h1u >> 16
 | 
			
		||||
 | 
			
		||||
	return int(h1u)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										202
									
								
								vendor/github.com/google/cel-go/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										202
									
								
								vendor/github.com/google/cel-go/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,202 @@
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   APPENDIX: How to apply the Apache License to your work.
 | 
			
		||||
 | 
			
		||||
      To apply the Apache License to your work, attach the following
 | 
			
		||||
      boilerplate notice, with the fields enclosed by brackets "[]"
 | 
			
		||||
      replaced with your own identifying information. (Don't include
 | 
			
		||||
      the brackets!)  The text should be enclosed in the appropriate
 | 
			
		||||
      comment syntax for the file format. We also recommend that a
 | 
			
		||||
      file or class name and description of purpose be included on the
 | 
			
		||||
      same "printed page" as the copyright notice for easier
 | 
			
		||||
      identification within third-party archives.
 | 
			
		||||
 | 
			
		||||
   Copyright [yyyy] [name of copyright owner]
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
							
								
								
									
										64
									
								
								vendor/github.com/google/cel-go/cel/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								vendor/github.com/google/cel-go/cel/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,64 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "cel.go",
 | 
			
		||||
        "env.go",
 | 
			
		||||
        "io.go",
 | 
			
		||||
        "library.go",
 | 
			
		||||
        "options.go",
 | 
			
		||||
        "program.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/cel",
 | 
			
		||||
    visibility = ["//visibility:public"],
 | 
			
		||||
    deps = [
 | 
			
		||||
        "//checker:go_default_library",
 | 
			
		||||
        "//checker/decls:go_default_library",
 | 
			
		||||
        "//common:go_default_library",
 | 
			
		||||
        "//common/containers:go_default_library",
 | 
			
		||||
        "//common/types:go_default_library",
 | 
			
		||||
        "//common/types/pb:go_default_library",
 | 
			
		||||
        "//common/types/ref:go_default_library",
 | 
			
		||||
        "//interpreter:go_default_library",
 | 
			
		||||
        "//interpreter/functions:go_default_library",
 | 
			
		||||
        "//parser:go_default_library",
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//proto:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/descriptorpb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_test(
 | 
			
		||||
    name = "go_default_test",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "cel_test.go",
 | 
			
		||||
    ],
 | 
			
		||||
    data = [
 | 
			
		||||
        "//cel/testdata:gen_test_fds",
 | 
			
		||||
    ],
 | 
			
		||||
    embed = [
 | 
			
		||||
        ":go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
    deps = [
 | 
			
		||||
        "//checker/decls:go_default_library",
 | 
			
		||||
        "//common/operators:go_default_library",
 | 
			
		||||
        "//common/overloads:go_default_library",
 | 
			
		||||
        "//common/types:go_default_library",
 | 
			
		||||
        "//common/types/ref:go_default_library",
 | 
			
		||||
        "//common/types/traits:go_default_library",
 | 
			
		||||
        "//interpreter/functions:go_default_library",
 | 
			
		||||
        "//test/proto2pb:go_default_library",
 | 
			
		||||
        "//test/proto3pb:go_default_library",
 | 
			
		||||
        "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										19
									
								
								vendor/github.com/google/cel-go/cel/cel.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								vendor/github.com/google/cel-go/cel/cel.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,19 @@
 | 
			
		||||
// Copyright 2019 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Package cel defines the top-level interface for the Common Expression Language (CEL).
 | 
			
		||||
//
 | 
			
		||||
// CEL is a non-Turing complete expression language designed to parse, check, and evaluate
 | 
			
		||||
// expressions against user-defined environments.
 | 
			
		||||
package cel
 | 
			
		||||
							
								
								
									
										466
									
								
								vendor/github.com/google/cel-go/cel/env.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										466
									
								
								vendor/github.com/google/cel-go/cel/env.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,466 @@
 | 
			
		||||
// Copyright 2019 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package cel
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"sync"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/checker"
 | 
			
		||||
	"github.com/google/cel-go/checker/decls"
 | 
			
		||||
	"github.com/google/cel-go/common"
 | 
			
		||||
	"github.com/google/cel-go/common/containers"
 | 
			
		||||
	"github.com/google/cel-go/common/types"
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/interpreter"
 | 
			
		||||
	"github.com/google/cel-go/parser"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Source interface representing a user-provided expression.
 | 
			
		||||
type Source interface {
 | 
			
		||||
	common.Source
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Ast representing the checked or unchecked expression, its source, and related metadata such as
 | 
			
		||||
// source position information.
 | 
			
		||||
type Ast struct {
 | 
			
		||||
	expr    *exprpb.Expr
 | 
			
		||||
	info    *exprpb.SourceInfo
 | 
			
		||||
	source  Source
 | 
			
		||||
	refMap  map[int64]*exprpb.Reference
 | 
			
		||||
	typeMap map[int64]*exprpb.Type
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Expr returns the proto serializable instance of the parsed/checked expression.
 | 
			
		||||
func (ast *Ast) Expr() *exprpb.Expr {
 | 
			
		||||
	return ast.expr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsChecked returns whether the Ast value has been successfully type-checked.
 | 
			
		||||
func (ast *Ast) IsChecked() bool {
 | 
			
		||||
	return ast.typeMap != nil && len(ast.typeMap) > 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SourceInfo returns character offset and newling position information about expression elements.
 | 
			
		||||
func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
 | 
			
		||||
	return ast.info
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ResultType returns the output type of the expression if the Ast has been type-checked, else
 | 
			
		||||
// returns decls.Dyn as the parse step cannot infer the type.
 | 
			
		||||
func (ast *Ast) ResultType() *exprpb.Type {
 | 
			
		||||
	if !ast.IsChecked() {
 | 
			
		||||
		return decls.Dyn
 | 
			
		||||
	}
 | 
			
		||||
	return ast.typeMap[ast.expr.Id]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Source returns a view of the input used to create the Ast. This source may be complete or
 | 
			
		||||
// constructed from the SourceInfo.
 | 
			
		||||
func (ast *Ast) Source() Source {
 | 
			
		||||
	return ast.source
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FormatType converts a type message into a string representation.
 | 
			
		||||
func FormatType(t *exprpb.Type) string {
 | 
			
		||||
	return checker.FormatCheckedType(t)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Env encapsulates the context necessary to perform parsing, type checking, or generation of
 | 
			
		||||
// evaluable programs for different expressions.
 | 
			
		||||
type Env struct {
 | 
			
		||||
	Container    *containers.Container
 | 
			
		||||
	declarations []*exprpb.Decl
 | 
			
		||||
	macros       []parser.Macro
 | 
			
		||||
	adapter      ref.TypeAdapter
 | 
			
		||||
	provider     ref.TypeProvider
 | 
			
		||||
	features     map[int]bool
 | 
			
		||||
	// program options tied to the environment.
 | 
			
		||||
	progOpts []ProgramOption
 | 
			
		||||
 | 
			
		||||
	// Internal checker representation
 | 
			
		||||
	chk    *checker.Env
 | 
			
		||||
	chkErr error
 | 
			
		||||
	once   sync.Once
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewEnv creates a program environment configured with the standard library of CEL functions and
 | 
			
		||||
// macros. The Env value returned can parse and check any CEL program which builds upon the core
 | 
			
		||||
// features documented in the CEL specification.
 | 
			
		||||
//
 | 
			
		||||
// See the EnvOption helper functions for the options that can be used to configure the
 | 
			
		||||
// environment.
 | 
			
		||||
func NewEnv(opts ...EnvOption) (*Env, error) {
 | 
			
		||||
	stdOpts := append([]EnvOption{StdLib()}, opts...)
 | 
			
		||||
	return NewCustomEnv(stdOpts...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewCustomEnv creates a custom program environment which is not automatically configured with the
 | 
			
		||||
// standard library of functions and macros documented in the CEL spec.
 | 
			
		||||
//
 | 
			
		||||
// The purpose for using a custom environment might be for subsetting the standard library produced
 | 
			
		||||
// by the cel.StdLib() function. Subsetting CEL is a core aspect of its design that allows users to
 | 
			
		||||
// limit the compute and memory impact of a CEL program by controlling the functions and macros
 | 
			
		||||
// that may appear in a given expression.
 | 
			
		||||
//
 | 
			
		||||
// See the EnvOption helper functions for the options that can be used to configure the
 | 
			
		||||
// environment.
 | 
			
		||||
func NewCustomEnv(opts ...EnvOption) (*Env, error) {
 | 
			
		||||
	registry, err := types.NewRegistry()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return (&Env{
 | 
			
		||||
		declarations: []*exprpb.Decl{},
 | 
			
		||||
		macros:       []parser.Macro{},
 | 
			
		||||
		Container:    containers.DefaultContainer,
 | 
			
		||||
		adapter:      registry,
 | 
			
		||||
		provider:     registry,
 | 
			
		||||
		features:     map[int]bool{},
 | 
			
		||||
		progOpts:     []ProgramOption{},
 | 
			
		||||
	}).configure(opts)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues.
 | 
			
		||||
//
 | 
			
		||||
// Checking has failed if the returned Issues value and its Issues.Err() value are non-nil.
 | 
			
		||||
// Issues should be inspected if they are non-nil, but may not represent a fatal error.
 | 
			
		||||
//
 | 
			
		||||
// It is possible to have both non-nil Ast and Issues values returned from this call: however,
 | 
			
		||||
// the mere presence of an Ast does not imply that it is valid for use.
 | 
			
		||||
func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
 | 
			
		||||
	// Note, errors aren't currently possible on the Ast to ParsedExpr conversion.
 | 
			
		||||
	pe, _ := AstToParsedExpr(ast)
 | 
			
		||||
 | 
			
		||||
	// Construct the internal checker env, erroring if there is an issue adding the declarations.
 | 
			
		||||
	e.once.Do(func() {
 | 
			
		||||
		ce := checker.NewEnv(e.Container, e.provider)
 | 
			
		||||
		ce.EnableDynamicAggregateLiterals(true)
 | 
			
		||||
		if e.HasFeature(FeatureDisableDynamicAggregateLiterals) {
 | 
			
		||||
			ce.EnableDynamicAggregateLiterals(false)
 | 
			
		||||
		}
 | 
			
		||||
		err := ce.Add(e.declarations...)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			e.chkErr = err
 | 
			
		||||
		} else {
 | 
			
		||||
			e.chk = ce
 | 
			
		||||
		}
 | 
			
		||||
	})
 | 
			
		||||
	// The once call will ensure that this value is set or nil for all invocations.
 | 
			
		||||
	if e.chkErr != nil {
 | 
			
		||||
		errs := common.NewErrors(ast.Source())
 | 
			
		||||
		errs.ReportError(common.NoLocation, e.chkErr.Error())
 | 
			
		||||
		return nil, NewIssues(errs)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	res, errs := checker.Check(pe, ast.Source(), e.chk)
 | 
			
		||||
	if len(errs.GetErrors()) > 0 {
 | 
			
		||||
		return nil, NewIssues(errs)
 | 
			
		||||
	}
 | 
			
		||||
	// Manually create the Ast to ensure that the Ast source information (which may be more
 | 
			
		||||
	// detailed than the information provided by Check), is returned to the caller.
 | 
			
		||||
	return &Ast{
 | 
			
		||||
		source:  ast.Source(),
 | 
			
		||||
		expr:    res.GetExpr(),
 | 
			
		||||
		info:    res.GetSourceInfo(),
 | 
			
		||||
		refMap:  res.GetReferenceMap(),
 | 
			
		||||
		typeMap: res.GetTypeMap()}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and
 | 
			
		||||
// associated issues.
 | 
			
		||||
//
 | 
			
		||||
// If an error is encountered during parsing the Compile step will not continue with the Check
 | 
			
		||||
// phase. If non-error issues are encountered during Parse, they may be combined with any issues
 | 
			
		||||
// discovered during Check.
 | 
			
		||||
//
 | 
			
		||||
// Note, for parse-only uses of CEL use Parse.
 | 
			
		||||
func (e *Env) Compile(txt string) (*Ast, *Issues) {
 | 
			
		||||
	return e.CompileSource(common.NewTextSource(txt))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CompileSource combines the Parse and Check phases CEL program compilation to produce an Ast and
 | 
			
		||||
// associated issues.
 | 
			
		||||
//
 | 
			
		||||
// If an error is encountered during parsing the CompileSource step will not continue with the
 | 
			
		||||
// Check phase. If non-error issues are encountered during Parse, they may be combined with any
 | 
			
		||||
// issues discovered during Check.
 | 
			
		||||
//
 | 
			
		||||
// Note, for parse-only uses of CEL use Parse.
 | 
			
		||||
func (e *Env) CompileSource(src common.Source) (*Ast, *Issues) {
 | 
			
		||||
	ast, iss := e.ParseSource(src)
 | 
			
		||||
	if iss.Err() != nil {
 | 
			
		||||
		return nil, iss
 | 
			
		||||
	}
 | 
			
		||||
	checked, iss2 := e.Check(ast)
 | 
			
		||||
	iss = iss.Append(iss2)
 | 
			
		||||
	if iss.Err() != nil {
 | 
			
		||||
		return nil, iss
 | 
			
		||||
	}
 | 
			
		||||
	return checked, iss
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Extend the current environment with additional options to produce a new Env.
 | 
			
		||||
//
 | 
			
		||||
// Note, the extended Env value should not share memory with the original. It is possible, however,
 | 
			
		||||
// that a CustomTypeAdapter or CustomTypeProvider options could provide values which are mutable.
 | 
			
		||||
// To ensure separation of state between extended environments either make sure the TypeAdapter and
 | 
			
		||||
// TypeProvider are immutable, or that their underlying implementations are based on the
 | 
			
		||||
// ref.TypeRegistry which provides a Copy method which will be invoked by this method.
 | 
			
		||||
func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
 | 
			
		||||
	if e.chkErr != nil {
 | 
			
		||||
		return nil, e.chkErr
 | 
			
		||||
	}
 | 
			
		||||
	// Copy slices.
 | 
			
		||||
	decsCopy := make([]*exprpb.Decl, len(e.declarations))
 | 
			
		||||
	macsCopy := make([]parser.Macro, len(e.macros))
 | 
			
		||||
	progOptsCopy := make([]ProgramOption, len(e.progOpts))
 | 
			
		||||
	copy(decsCopy, e.declarations)
 | 
			
		||||
	copy(macsCopy, e.macros)
 | 
			
		||||
	copy(progOptsCopy, e.progOpts)
 | 
			
		||||
 | 
			
		||||
	// Copy the adapter / provider if they appear to be mutable.
 | 
			
		||||
	adapter := e.adapter
 | 
			
		||||
	provider := e.provider
 | 
			
		||||
	adapterReg, isAdapterReg := e.adapter.(ref.TypeRegistry)
 | 
			
		||||
	providerReg, isProviderReg := e.provider.(ref.TypeRegistry)
 | 
			
		||||
	// In most cases the provider and adapter will be a ref.TypeRegistry;
 | 
			
		||||
	// however, in the rare cases where they are not, they are assumed to
 | 
			
		||||
	// be immutable. Since it is possible to set the TypeProvider separately
 | 
			
		||||
	// from the TypeAdapter, the possible configurations which could use a
 | 
			
		||||
	// TypeRegistry as the base implementation are captured below.
 | 
			
		||||
	if isAdapterReg && isProviderReg {
 | 
			
		||||
		reg := providerReg.Copy()
 | 
			
		||||
		provider = reg
 | 
			
		||||
		// If the adapter and provider are the same object, set the adapter
 | 
			
		||||
		// to the same ref.TypeRegistry as the provider.
 | 
			
		||||
		if adapterReg == providerReg {
 | 
			
		||||
			adapter = reg
 | 
			
		||||
		} else {
 | 
			
		||||
			// Otherwise, make a copy of the adapter.
 | 
			
		||||
			adapter = adapterReg.Copy()
 | 
			
		||||
		}
 | 
			
		||||
	} else if isProviderReg {
 | 
			
		||||
		provider = providerReg.Copy()
 | 
			
		||||
	} else if isAdapterReg {
 | 
			
		||||
		adapter = adapterReg.Copy()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	featuresCopy := make(map[int]bool, len(e.features))
 | 
			
		||||
	for k, v := range e.features {
 | 
			
		||||
		featuresCopy[k] = v
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ext := &Env{
 | 
			
		||||
		Container:    e.Container,
 | 
			
		||||
		declarations: decsCopy,
 | 
			
		||||
		macros:       macsCopy,
 | 
			
		||||
		progOpts:     progOptsCopy,
 | 
			
		||||
		adapter:      adapter,
 | 
			
		||||
		features:     featuresCopy,
 | 
			
		||||
		provider:     provider,
 | 
			
		||||
	}
 | 
			
		||||
	return ext.configure(opts)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HasFeature checks whether the environment enables the given feature
 | 
			
		||||
// flag, as enumerated in options.go.
 | 
			
		||||
func (e *Env) HasFeature(flag int) bool {
 | 
			
		||||
	_, has := e.features[flag]
 | 
			
		||||
	return has
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
 | 
			
		||||
//
 | 
			
		||||
// This form of Parse creates a common.Source value for the input `txt` and forwards to the
 | 
			
		||||
// ParseSource method.
 | 
			
		||||
func (e *Env) Parse(txt string) (*Ast, *Issues) {
 | 
			
		||||
	src := common.NewTextSource(txt)
 | 
			
		||||
	return e.ParseSource(src)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ParseSource parses the input source to an Ast and/or set of Issues.
 | 
			
		||||
//
 | 
			
		||||
// Parsing has failed if the returned Issues value and its Issues.Err() value is non-nil.
 | 
			
		||||
// Issues should be inspected if they are non-nil, but may not represent a fatal error.
 | 
			
		||||
//
 | 
			
		||||
// It is possible to have both non-nil Ast and Issues values returned from this call; however,
 | 
			
		||||
// the mere presence of an Ast does not imply that it is valid for use.
 | 
			
		||||
func (e *Env) ParseSource(src common.Source) (*Ast, *Issues) {
 | 
			
		||||
	res, errs := parser.ParseWithMacros(src, e.macros)
 | 
			
		||||
	if len(errs.GetErrors()) > 0 {
 | 
			
		||||
		return nil, &Issues{errs: errs}
 | 
			
		||||
	}
 | 
			
		||||
	// Manually create the Ast to ensure that the text source information is propagated on
 | 
			
		||||
	// subsequent calls to Check.
 | 
			
		||||
	return &Ast{
 | 
			
		||||
		source: Source(src),
 | 
			
		||||
		expr:   res.GetExpr(),
 | 
			
		||||
		info:   res.GetSourceInfo()}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Program generates an evaluable instance of the Ast within the environment (Env).
 | 
			
		||||
func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) {
 | 
			
		||||
	optSet := e.progOpts
 | 
			
		||||
	if len(opts) != 0 {
 | 
			
		||||
		mergedOpts := []ProgramOption{}
 | 
			
		||||
		mergedOpts = append(mergedOpts, e.progOpts...)
 | 
			
		||||
		mergedOpts = append(mergedOpts, opts...)
 | 
			
		||||
		optSet = mergedOpts
 | 
			
		||||
	}
 | 
			
		||||
	return newProgram(e, ast, optSet)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SetFeature sets the given feature flag, as enumerated in options.go.
 | 
			
		||||
func (e *Env) SetFeature(flag int) {
 | 
			
		||||
	e.features[flag] = true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TypeAdapter returns the `ref.TypeAdapter` configured for the environment.
 | 
			
		||||
func (e *Env) TypeAdapter() ref.TypeAdapter {
 | 
			
		||||
	return e.adapter
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TypeProvider returns the `ref.TypeProvider` configured for the environment.
 | 
			
		||||
func (e *Env) TypeProvider() ref.TypeProvider {
 | 
			
		||||
	return e.provider
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnknownVars returns an interpreter.PartialActivation which marks all variables
 | 
			
		||||
// declared in the Env as unknown AttributePattern values.
 | 
			
		||||
//
 | 
			
		||||
// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation
 | 
			
		||||
// unless the PartialAttributes option is provided as a ProgramOption.
 | 
			
		||||
func (e *Env) UnknownVars() interpreter.PartialActivation {
 | 
			
		||||
	var unknownPatterns []*interpreter.AttributePattern
 | 
			
		||||
	for _, d := range e.declarations {
 | 
			
		||||
		switch d.GetDeclKind().(type) {
 | 
			
		||||
		case *exprpb.Decl_Ident:
 | 
			
		||||
			unknownPatterns = append(unknownPatterns,
 | 
			
		||||
				interpreter.NewAttributePattern(d.GetName()))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	part, _ := PartialVars(
 | 
			
		||||
		interpreter.EmptyActivation(),
 | 
			
		||||
		unknownPatterns...)
 | 
			
		||||
	return part
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the
 | 
			
		||||
// attribute references which are unknown.
 | 
			
		||||
//
 | 
			
		||||
// Residual expressions are beneficial in a few scenarios:
 | 
			
		||||
//
 | 
			
		||||
// - Optimizing constant expression evaluations away.
 | 
			
		||||
// - Indexing and pruning expressions based on known input arguments.
 | 
			
		||||
// - Surfacing additional requirements that are needed in order to complete an evaluation.
 | 
			
		||||
// - Sharing the evaluation of an expression across multiple machines/nodes.
 | 
			
		||||
//
 | 
			
		||||
// For example, if an expression targets a 'resource' and 'request' attribute and the possible
 | 
			
		||||
// values for the resource are known, a PartialActivation could mark the 'request' as an unknown
 | 
			
		||||
// interpreter.AttributePattern and the resulting ResidualAst would be reduced to only the parts
 | 
			
		||||
// of the expression that reference the 'request'.
 | 
			
		||||
//
 | 
			
		||||
// Note, the expression ids within the residual AST generated through this method have no
 | 
			
		||||
// correlation to the expression ids of the original AST.
 | 
			
		||||
//
 | 
			
		||||
// See the PartialVars helper for how to construct a PartialActivation.
 | 
			
		||||
//
 | 
			
		||||
// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
 | 
			
		||||
// Ast format and then Program again.
 | 
			
		||||
func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
 | 
			
		||||
	pruned := interpreter.PruneAst(a.Expr(), details.State())
 | 
			
		||||
	expr, err := AstToString(ParsedExprToAst(&exprpb.ParsedExpr{Expr: pruned}))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	parsed, iss := e.Parse(expr)
 | 
			
		||||
	if iss != nil && iss.Err() != nil {
 | 
			
		||||
		return nil, iss.Err()
 | 
			
		||||
	}
 | 
			
		||||
	if !a.IsChecked() {
 | 
			
		||||
		return parsed, nil
 | 
			
		||||
	}
 | 
			
		||||
	checked, iss := e.Check(parsed)
 | 
			
		||||
	if iss != nil && iss.Err() != nil {
 | 
			
		||||
		return nil, iss.Err()
 | 
			
		||||
	}
 | 
			
		||||
	return checked, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// configure applies a series of EnvOptions to the current environment.
 | 
			
		||||
func (e *Env) configure(opts []EnvOption) (*Env, error) {
 | 
			
		||||
	// Customized the environment using the provided EnvOption values. If an error is
 | 
			
		||||
	// generated at any step this, will be returned as a nil Env with a non-nil error.
 | 
			
		||||
	var err error
 | 
			
		||||
	for _, opt := range opts {
 | 
			
		||||
		e, err = opt(e)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return e, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Issues defines methods for inspecting the error details of parse and check calls.
 | 
			
		||||
//
 | 
			
		||||
// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
 | 
			
		||||
type Issues struct {
 | 
			
		||||
	errs *common.Errors
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewIssues returns an Issues struct from a common.Errors object.
 | 
			
		||||
func NewIssues(errs *common.Errors) *Issues {
 | 
			
		||||
	return &Issues{
 | 
			
		||||
		errs: errs,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Err returns an error value if the issues list contains one or more errors.
 | 
			
		||||
func (i *Issues) Err() error {
 | 
			
		||||
	if i == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	if len(i.Errors()) > 0 {
 | 
			
		||||
		return errors.New(i.String())
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Errors returns the collection of errors encountered in more granular detail.
 | 
			
		||||
func (i *Issues) Errors() []common.Error {
 | 
			
		||||
	if i == nil {
 | 
			
		||||
		return []common.Error{}
 | 
			
		||||
	}
 | 
			
		||||
	return i.errs.GetErrors()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Append collects the issues from another Issues struct into a new Issues object.
 | 
			
		||||
func (i *Issues) Append(other *Issues) *Issues {
 | 
			
		||||
	if i == nil {
 | 
			
		||||
		return other
 | 
			
		||||
	}
 | 
			
		||||
	return NewIssues(i.errs.Append(other.errs.GetErrors()))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// String converts the issues to a suitable display string.
 | 
			
		||||
func (i *Issues) String() string {
 | 
			
		||||
	if i == nil {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
	return i.errs.ToDisplayString()
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										122
									
								
								vendor/github.com/google/cel-go/cel/io.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								vendor/github.com/google/cel-go/cel/io.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,122 @@
 | 
			
		||||
// Copyright 2019 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package cel
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common"
 | 
			
		||||
	"github.com/google/cel-go/parser"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// CheckedExprToAst converts a checked expression proto message to an Ast.
 | 
			
		||||
func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
 | 
			
		||||
	return CheckedExprToAstWithSource(checkedExpr, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CheckedExprToAstWithSource converts a checked expression proto message to an Ast,
 | 
			
		||||
// using the provided Source as the textual contents.
 | 
			
		||||
//
 | 
			
		||||
// In general the source is not necessary unless the AST has been modified between the
 | 
			
		||||
// `Parse` and `Check` calls as an `Ast` created from the `Parse` step will carry the source
 | 
			
		||||
// through future calls.
 | 
			
		||||
//
 | 
			
		||||
// Prefer CheckedExprToAst if loading expressions from storage.
 | 
			
		||||
func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src common.Source) *Ast {
 | 
			
		||||
	refMap := checkedExpr.GetReferenceMap()
 | 
			
		||||
	if refMap == nil {
 | 
			
		||||
		refMap = map[int64]*exprpb.Reference{}
 | 
			
		||||
	}
 | 
			
		||||
	typeMap := checkedExpr.GetTypeMap()
 | 
			
		||||
	if typeMap == nil {
 | 
			
		||||
		typeMap = map[int64]*exprpb.Type{}
 | 
			
		||||
	}
 | 
			
		||||
	si := checkedExpr.GetSourceInfo()
 | 
			
		||||
	if si == nil {
 | 
			
		||||
		si = &exprpb.SourceInfo{}
 | 
			
		||||
	}
 | 
			
		||||
	if src == nil {
 | 
			
		||||
		src = common.NewInfoSource(si)
 | 
			
		||||
	}
 | 
			
		||||
	return &Ast{
 | 
			
		||||
		expr:    checkedExpr.GetExpr(),
 | 
			
		||||
		info:    si,
 | 
			
		||||
		source:  src,
 | 
			
		||||
		refMap:  refMap,
 | 
			
		||||
		typeMap: typeMap,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
 | 
			
		||||
//
 | 
			
		||||
// If the Ast.IsChecked() returns false, this conversion method will return an error.
 | 
			
		||||
func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
 | 
			
		||||
	if !a.IsChecked() {
 | 
			
		||||
		return nil, fmt.Errorf("cannot convert unchecked ast")
 | 
			
		||||
	}
 | 
			
		||||
	return &exprpb.CheckedExpr{
 | 
			
		||||
		Expr:         a.Expr(),
 | 
			
		||||
		SourceInfo:   a.SourceInfo(),
 | 
			
		||||
		ReferenceMap: a.refMap,
 | 
			
		||||
		TypeMap:      a.typeMap,
 | 
			
		||||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ParsedExprToAst converts a parsed expression proto message to an Ast.
 | 
			
		||||
func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast {
 | 
			
		||||
	return ParsedExprToAstWithSource(parsedExpr, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ParsedExprToAstWithSource converts a parsed expression proto message to an Ast,
 | 
			
		||||
// using the provided Source as the textual contents.
 | 
			
		||||
//
 | 
			
		||||
// In general you only need this if you need to recheck a previously checked
 | 
			
		||||
// expression, or if you need to separately check a subset of an expression.
 | 
			
		||||
//
 | 
			
		||||
// Prefer ParsedExprToAst if loading expressions from storage.
 | 
			
		||||
func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src common.Source) *Ast {
 | 
			
		||||
	si := parsedExpr.GetSourceInfo()
 | 
			
		||||
	if si == nil {
 | 
			
		||||
		si = &exprpb.SourceInfo{}
 | 
			
		||||
	}
 | 
			
		||||
	if src == nil {
 | 
			
		||||
		src = common.NewInfoSource(si)
 | 
			
		||||
	}
 | 
			
		||||
	return &Ast{
 | 
			
		||||
		expr:   parsedExpr.GetExpr(),
 | 
			
		||||
		info:   si,
 | 
			
		||||
		source: src,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value.
 | 
			
		||||
func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) {
 | 
			
		||||
	return &exprpb.ParsedExpr{
 | 
			
		||||
		Expr:       a.Expr(),
 | 
			
		||||
		SourceInfo: a.SourceInfo(),
 | 
			
		||||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AstToString converts an Ast back to a string if possible.
 | 
			
		||||
//
 | 
			
		||||
// Note, the conversion may not be an exact replica of the original expression, but will produce
 | 
			
		||||
// a string that is semantically equivalent and whose textual representation is stable.
 | 
			
		||||
func AstToString(a *Ast) (string, error) {
 | 
			
		||||
	expr := a.Expr()
 | 
			
		||||
	info := a.SourceInfo()
 | 
			
		||||
	return parser.Unparse(expr, info)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										77
									
								
								vendor/github.com/google/cel-go/cel/library.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								vendor/github.com/google/cel-go/cel/library.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,77 @@
 | 
			
		||||
// Copyright 2020 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package cel
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/google/cel-go/checker"
 | 
			
		||||
	"github.com/google/cel-go/interpreter/functions"
 | 
			
		||||
	"github.com/google/cel-go/parser"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Library provides a collection of EnvOption and ProgramOption values used to confiugre a CEL
 | 
			
		||||
// environment for a particular use case or with a related set of functionality.
 | 
			
		||||
//
 | 
			
		||||
// Note, the ProgramOption values provided by a library are expected to be static and not vary
 | 
			
		||||
// between calls to Env.Program(). If there is a need for such dynamic configuration, prefer to
 | 
			
		||||
// configure these options outside the Library and within the Env.Program() call directly.
 | 
			
		||||
type Library interface {
 | 
			
		||||
	// CompileOptions returns a collection of funcitional options for configuring the Parse / Check
 | 
			
		||||
	// environment.
 | 
			
		||||
	CompileOptions() []EnvOption
 | 
			
		||||
 | 
			
		||||
	// ProgramOptions returns a collection of functional options which should be included in every
 | 
			
		||||
	// Program generated from the Env.Program() call.
 | 
			
		||||
	ProgramOptions() []ProgramOption
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
 | 
			
		||||
// and to be linked to each other.
 | 
			
		||||
func Lib(l Library) EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		var err error
 | 
			
		||||
		for _, opt := range l.CompileOptions() {
 | 
			
		||||
			e, err = opt(e)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		e.progOpts = append(e.progOpts, l.ProgramOptions()...)
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StdLib returns an EnvOption for the standard library of CEL functions and macros.
 | 
			
		||||
func StdLib() EnvOption {
 | 
			
		||||
	return Lib(stdLibrary{})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// stdLibrary implements the Library interface and provides functional options for the core CEL
 | 
			
		||||
// features documented in the specification.
 | 
			
		||||
type stdLibrary struct{}
 | 
			
		||||
 | 
			
		||||
// EnvOptions returns options for the standard CEL function declarations and macros.
 | 
			
		||||
func (stdLibrary) CompileOptions() []EnvOption {
 | 
			
		||||
	return []EnvOption{
 | 
			
		||||
		Declarations(checker.StandardDeclarations()...),
 | 
			
		||||
		Macros(parser.AllMacros...),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ProgramOptions returns function implementations for the standard CEL functions.
 | 
			
		||||
func (stdLibrary) ProgramOptions() []ProgramOption {
 | 
			
		||||
	return []ProgramOption{
 | 
			
		||||
		Functions(functions.StandardOverloads()...),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										451
									
								
								vendor/github.com/google/cel-go/cel/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										451
									
								
								vendor/github.com/google/cel-go/cel/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,451 @@
 | 
			
		||||
// Copyright 2019 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package cel
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/checker/decls"
 | 
			
		||||
	"github.com/google/cel-go/common/containers"
 | 
			
		||||
	"github.com/google/cel-go/common/types/pb"
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/interpreter"
 | 
			
		||||
	"github.com/google/cel-go/interpreter/functions"
 | 
			
		||||
	"github.com/google/cel-go/parser"
 | 
			
		||||
	"google.golang.org/protobuf/proto"
 | 
			
		||||
	"google.golang.org/protobuf/reflect/protodesc"
 | 
			
		||||
	"google.golang.org/protobuf/reflect/protoreflect"
 | 
			
		||||
	"google.golang.org/protobuf/reflect/protoregistry"
 | 
			
		||||
	"google.golang.org/protobuf/types/dynamicpb"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
	descpb "google.golang.org/protobuf/types/descriptorpb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// These constants beginning with "Feature" enable optional behavior in
 | 
			
		||||
// the library.  See the documentation for each constant to see its
 | 
			
		||||
// effects, compatibility restrictions, and standard conformance.
 | 
			
		||||
const (
 | 
			
		||||
	_ = iota
 | 
			
		||||
 | 
			
		||||
	// Disallow heterogeneous aggregate (list, map) literals.
 | 
			
		||||
	// Note, it is still possible to have heterogeneous aggregates when
 | 
			
		||||
	// provided as variables to the expression, as well as via conversion
 | 
			
		||||
	// of well-known dynamic types, or with unchecked expressions.
 | 
			
		||||
	// Affects checking.  Provides a subset of standard behavior.
 | 
			
		||||
	FeatureDisableDynamicAggregateLiterals
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// EnvOption is a functional interface for configuring the environment.
 | 
			
		||||
type EnvOption func(e *Env) (*Env, error)
 | 
			
		||||
 | 
			
		||||
// ClearMacros options clears all parser macros.
 | 
			
		||||
//
 | 
			
		||||
// Clearing macros will ensure CEL expressions can only contain linear evaluation paths, as
 | 
			
		||||
// comprehensions such as `all` and `exists` are enabled only via macros.
 | 
			
		||||
func ClearMacros() EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		e.macros = parser.NoMacros
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CustomTypeAdapter swaps the default ref.TypeAdapter implementation with a custom one.
 | 
			
		||||
//
 | 
			
		||||
// Note: This option must be specified before the Types and TypeDescs options when used together.
 | 
			
		||||
func CustomTypeAdapter(adapter ref.TypeAdapter) EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		e.adapter = adapter
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CustomTypeProvider swaps the default ref.TypeProvider implementation with a custom one.
 | 
			
		||||
//
 | 
			
		||||
// Note: This option must be specified before the Types and TypeDescs options when used together.
 | 
			
		||||
func CustomTypeProvider(provider ref.TypeProvider) EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		e.provider = provider
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Declarations option extends the declaration set configured in the environment.
 | 
			
		||||
//
 | 
			
		||||
// Note: Declarations will by default be appended to the pre-existing declaration set configured
 | 
			
		||||
// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a
 | 
			
		||||
// purely custom set of declarations use NewCustomEnv.
 | 
			
		||||
func Declarations(decls ...*exprpb.Decl) EnvOption {
 | 
			
		||||
	// TODO: provide an alternative means of specifying declarations that doesn't refer
 | 
			
		||||
	// to the underlying proto implementations.
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		e.declarations = append(e.declarations, decls...)
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Features sets the given feature flags.  See list of Feature constants above.
 | 
			
		||||
func Features(flags ...int) EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		for _, flag := range flags {
 | 
			
		||||
			e.SetFeature(flag)
 | 
			
		||||
		}
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HomogeneousAggregateLiterals option ensures that list and map literal entry types must agree
 | 
			
		||||
// during type-checking.
 | 
			
		||||
//
 | 
			
		||||
// Note, it is still possible to have heterogeneous aggregates when provided as variables to the
 | 
			
		||||
// expression, as well as via conversion of well-known dynamic types, or with unchecked
 | 
			
		||||
// expressions.
 | 
			
		||||
func HomogeneousAggregateLiterals() EnvOption {
 | 
			
		||||
	return Features(FeatureDisableDynamicAggregateLiterals)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Macros option extends the macro set configured in the environment.
 | 
			
		||||
//
 | 
			
		||||
// Note: This option must be specified after ClearMacros if used together.
 | 
			
		||||
func Macros(macros ...parser.Macro) EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		e.macros = append(e.macros, macros...)
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Container sets the container for resolving variable names. Defaults to an empty container.
 | 
			
		||||
//
 | 
			
		||||
// If all references within an expression are relative to a protocol buffer package, then
 | 
			
		||||
// specifying a container of `google.type` would make it possible to write expressions such as
 | 
			
		||||
// `Expr{expression: 'a < b'}` instead of having to write `google.type.Expr{...}`.
 | 
			
		||||
func Container(name string) EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		cont, err := e.Container.Extend(containers.Name(name))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		e.Container = cont
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
 | 
			
		||||
//
 | 
			
		||||
// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
 | 
			
		||||
// Abbreviations can be useful when working with variables, functions, and especially types from
 | 
			
		||||
// multiple namespaces:
 | 
			
		||||
//
 | 
			
		||||
//    // CEL object construction
 | 
			
		||||
//    qual.pkg.version.ObjTypeName{
 | 
			
		||||
//       field: alt.container.ver.FieldTypeName{value: ...}
 | 
			
		||||
//    }
 | 
			
		||||
//
 | 
			
		||||
// Only one the qualified names above may be used as the CEL container, so at least one of these
 | 
			
		||||
// references must be a long qualified name within an otherwise short CEL program. Using the
 | 
			
		||||
// following abbreviations, the program becomes much simpler:
 | 
			
		||||
//
 | 
			
		||||
//    // CEL Go option
 | 
			
		||||
//    Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
 | 
			
		||||
//    // Simplified Object construction
 | 
			
		||||
//    ObjTypeName{field: FieldTypeName{value: ...}}
 | 
			
		||||
//
 | 
			
		||||
// There are a few rules for the qualified names and the simple abbreviations generated from them:
 | 
			
		||||
// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
 | 
			
		||||
// - The last element in the qualified name is the abbreviation.
 | 
			
		||||
// - Abbreviations must not collide with each other.
 | 
			
		||||
// - The abbreviation must not collide with unqualified names in use.
 | 
			
		||||
//
 | 
			
		||||
// Abbreviations are distinct from container-based references in the following important ways:
 | 
			
		||||
// - Abbreviations must expand to a fully-qualified name.
 | 
			
		||||
// - Expanded abbreviations do not participate in namespace resolution.
 | 
			
		||||
// - Abbreviation expansion is done instead of the container search for a matching identifier.
 | 
			
		||||
// - Containers follow C++ namespace resolution rules with searches from the most qualified name
 | 
			
		||||
//   to the least qualified name.
 | 
			
		||||
// - Container references within the CEL program may be relative, and are resolved to fully
 | 
			
		||||
//   qualified names at either type-check time or program plan time, whichever comes first.
 | 
			
		||||
//
 | 
			
		||||
// If there is ever a case where an identifier could be in both the container and as an
 | 
			
		||||
// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
 | 
			
		||||
// preserved between compilations even as the container evolves.
 | 
			
		||||
func Abbrevs(qualifiedNames ...string) EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		cont, err := e.Container.Extend(containers.Abbrevs(qualifiedNames...))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		e.Container = cont
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Types adds one or more type declarations to the environment, allowing for construction of
 | 
			
		||||
// type-literals whose definitions are included in the common expression built-in set.
 | 
			
		||||
//
 | 
			
		||||
// The input types may either be instances of `proto.Message` or `ref.Type`. Any other type
 | 
			
		||||
// provided to this option will result in an error.
 | 
			
		||||
//
 | 
			
		||||
// Well-known protobuf types within the `google.protobuf.*` package are included in the standard
 | 
			
		||||
// environment by default.
 | 
			
		||||
//
 | 
			
		||||
// Note: This option must be specified after the CustomTypeProvider option when used together.
 | 
			
		||||
func Types(addTypes ...interface{}) EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		reg, isReg := e.provider.(ref.TypeRegistry)
 | 
			
		||||
		if !isReg {
 | 
			
		||||
			return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
 | 
			
		||||
		}
 | 
			
		||||
		for _, t := range addTypes {
 | 
			
		||||
			switch v := t.(type) {
 | 
			
		||||
			case proto.Message:
 | 
			
		||||
				fdMap := pb.CollectFileDescriptorSet(v)
 | 
			
		||||
				for _, fd := range fdMap {
 | 
			
		||||
					err := reg.RegisterDescriptor(fd)
 | 
			
		||||
					if err != nil {
 | 
			
		||||
						return nil, err
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
			case ref.Type:
 | 
			
		||||
				err := reg.RegisterType(v)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					return nil, err
 | 
			
		||||
				}
 | 
			
		||||
			default:
 | 
			
		||||
				return nil, fmt.Errorf("unsupported type: %T", t)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TypeDescs adds type declarations from any protoreflect.FileDescriptor, protoregistry.Files,
 | 
			
		||||
// google.protobuf.FileDescriptorProto or google.protobuf.FileDescriptorSet provided.
 | 
			
		||||
//
 | 
			
		||||
// Note that messages instantiated from these descriptors will be *dynamicpb.Message values
 | 
			
		||||
// rather than the concrete message type.
 | 
			
		||||
//
 | 
			
		||||
// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via
 | 
			
		||||
// extension or by re-using the same EnvOption with another NewEnv() call.
 | 
			
		||||
func TypeDescs(descs ...interface{}) EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		reg, isReg := e.provider.(ref.TypeRegistry)
 | 
			
		||||
		if !isReg {
 | 
			
		||||
			return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
 | 
			
		||||
		}
 | 
			
		||||
		// Scan the input descriptors for FileDescriptorProto messages and accumulate them into a
 | 
			
		||||
		// synthetic FileDescriptorSet as the FileDescriptorProto messages may refer to each other
 | 
			
		||||
		// and will not resolve properly unless they are part of the same set.
 | 
			
		||||
		var fds *descpb.FileDescriptorSet
 | 
			
		||||
		for _, d := range descs {
 | 
			
		||||
			switch f := d.(type) {
 | 
			
		||||
			case *descpb.FileDescriptorProto:
 | 
			
		||||
				if fds == nil {
 | 
			
		||||
					fds = &descpb.FileDescriptorSet{
 | 
			
		||||
						File: []*descpb.FileDescriptorProto{},
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				fds.File = append(fds.File, f)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if fds != nil {
 | 
			
		||||
			if err := registerFileSet(reg, fds); err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		for _, d := range descs {
 | 
			
		||||
			switch f := d.(type) {
 | 
			
		||||
			case *protoregistry.Files:
 | 
			
		||||
				if err := registerFiles(reg, f); err != nil {
 | 
			
		||||
					return nil, err
 | 
			
		||||
				}
 | 
			
		||||
			case protoreflect.FileDescriptor:
 | 
			
		||||
				if err := reg.RegisterDescriptor(f); err != nil {
 | 
			
		||||
					return nil, err
 | 
			
		||||
				}
 | 
			
		||||
			case *descpb.FileDescriptorSet:
 | 
			
		||||
				if err := registerFileSet(reg, f); err != nil {
 | 
			
		||||
					return nil, err
 | 
			
		||||
				}
 | 
			
		||||
			case *descpb.FileDescriptorProto:
 | 
			
		||||
				// skip, handled as a synthetic file descriptor set.
 | 
			
		||||
			default:
 | 
			
		||||
				return nil, fmt.Errorf("unsupported type descriptor: %T", d)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return e, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error {
 | 
			
		||||
	files, err := protodesc.NewFiles(fileSet)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err)
 | 
			
		||||
	}
 | 
			
		||||
	return registerFiles(reg, files)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error {
 | 
			
		||||
	var err error
 | 
			
		||||
	files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
 | 
			
		||||
		err = reg.RegisterDescriptor(fd)
 | 
			
		||||
		return err == nil
 | 
			
		||||
	})
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ProgramOption is a functional interface for configuring evaluation bindings and behaviors.
 | 
			
		||||
type ProgramOption func(p *prog) (*prog, error)
 | 
			
		||||
 | 
			
		||||
// CustomDecorator appends an InterpreterDecorator to the program.
 | 
			
		||||
//
 | 
			
		||||
// InterpretableDecorators can be used to inspect, alter, or replace the Program plan.
 | 
			
		||||
func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption {
 | 
			
		||||
	return func(p *prog) (*prog, error) {
 | 
			
		||||
		p.decorators = append(p.decorators, dec)
 | 
			
		||||
		return p, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Functions adds function overloads that extend or override the set of CEL built-ins.
 | 
			
		||||
func Functions(funcs ...*functions.Overload) ProgramOption {
 | 
			
		||||
	return func(p *prog) (*prog, error) {
 | 
			
		||||
		if err := p.dispatcher.Add(funcs...); err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return p, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Globals sets the global variable values for a given program. These values may be shadowed by
 | 
			
		||||
// variables with the same name provided to the Eval() call.
 | 
			
		||||
//
 | 
			
		||||
// The vars value may either be an `interpreter.Activation` instance or a `map[string]interface{}`.
 | 
			
		||||
func Globals(vars interface{}) ProgramOption {
 | 
			
		||||
	return func(p *prog) (*prog, error) {
 | 
			
		||||
		defaultVars, err :=
 | 
			
		||||
			interpreter.NewActivation(vars)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		p.defaultVars = defaultVars
 | 
			
		||||
		return p, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EvalOption indicates an evaluation option that may affect the evaluation behavior or information
 | 
			
		||||
// in the output result.
 | 
			
		||||
type EvalOption int
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// OptTrackState will cause the runtime to return an immutable EvalState value in the Result.
 | 
			
		||||
	OptTrackState EvalOption = 1 << iota
 | 
			
		||||
 | 
			
		||||
	// OptExhaustiveEval causes the runtime to disable short-circuits and track state.
 | 
			
		||||
	OptExhaustiveEval EvalOption = 1<<iota | OptTrackState
 | 
			
		||||
 | 
			
		||||
	// OptOptimize precomputes functions and operators with constants as arguments at program
 | 
			
		||||
	// creation time. This flag is useful when the expression will be evaluated repeatedly against
 | 
			
		||||
	// a series of different inputs.
 | 
			
		||||
	OptOptimize EvalOption = 1 << iota
 | 
			
		||||
 | 
			
		||||
	// OptPartialEval enables the evaluation of a partial state where the input data that may be
 | 
			
		||||
	// known to be missing, either as top-level variables, or somewhere within a variable's object
 | 
			
		||||
	// member graph.
 | 
			
		||||
	//
 | 
			
		||||
	// By itself, OptPartialEval does not change evaluation behavior unless the input to the
 | 
			
		||||
	// Program Eval is an PartialVars.
 | 
			
		||||
	OptPartialEval EvalOption = 1 << iota
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// EvalOptions sets one or more evaluation options which may affect the evaluation or Result.
 | 
			
		||||
func EvalOptions(opts ...EvalOption) ProgramOption {
 | 
			
		||||
	return func(p *prog) (*prog, error) {
 | 
			
		||||
		for _, opt := range opts {
 | 
			
		||||
			p.evalOpts |= opt
 | 
			
		||||
		}
 | 
			
		||||
		return p, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func fieldToCELType(field protoreflect.FieldDescriptor) (*exprpb.Type, error) {
 | 
			
		||||
	if field.Kind() == protoreflect.MessageKind {
 | 
			
		||||
		msgName := (string)(field.Message().FullName())
 | 
			
		||||
		wellKnownType, found := pb.CheckedWellKnowns[msgName]
 | 
			
		||||
		if found {
 | 
			
		||||
			return wellKnownType, nil
 | 
			
		||||
		}
 | 
			
		||||
		return decls.NewObjectType(msgName), nil
 | 
			
		||||
	}
 | 
			
		||||
	if primitiveType, found := pb.CheckedPrimitives[field.Kind()]; found {
 | 
			
		||||
		return primitiveType, nil
 | 
			
		||||
	}
 | 
			
		||||
	if field.Kind() == protoreflect.EnumKind {
 | 
			
		||||
		return decls.Int, nil
 | 
			
		||||
	}
 | 
			
		||||
	return nil, fmt.Errorf("field %s type %s not implemented", field.FullName(), field.Kind().String())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) {
 | 
			
		||||
	name := string(field.Name())
 | 
			
		||||
	if field.IsMap() {
 | 
			
		||||
		mapKey := field.MapKey()
 | 
			
		||||
		mapValue := field.MapValue()
 | 
			
		||||
		keyType, err := fieldToCELType(mapKey)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		valueType, err := fieldToCELType(mapValue)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return decls.NewVar(name, decls.NewMapType(keyType, valueType)), nil
 | 
			
		||||
	} else if field.IsList() {
 | 
			
		||||
		elemType, err := fieldToCELType(field)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return decls.NewVar(name, decls.NewListType(elemType)), nil
 | 
			
		||||
	} else {
 | 
			
		||||
		celType, err := fieldToCELType(field)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return decls.NewVar(name, celType), nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeclareContextProto returns an option to extend CEL environment with declarations from the given context proto.
 | 
			
		||||
// Each field of the proto defines a variable of the same name in the environment.
 | 
			
		||||
// https://github.com/google/cel-spec/blob/master/doc/langdef.md#evaluation-environment
 | 
			
		||||
func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption {
 | 
			
		||||
	return func(e *Env) (*Env, error) {
 | 
			
		||||
		var decls []*exprpb.Decl
 | 
			
		||||
		fields := descriptor.Fields()
 | 
			
		||||
		for i := 0; i < fields.Len(); i++ {
 | 
			
		||||
			field := fields.Get(i)
 | 
			
		||||
			decl, err := fieldToDecl(field)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
			decls = append(decls, decl)
 | 
			
		||||
		}
 | 
			
		||||
		var err error
 | 
			
		||||
		e, err = Declarations(decls...)(e)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return Types(dynamicpb.NewMessage(descriptor))(e)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										318
									
								
								vendor/github.com/google/cel-go/cel/program.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										318
									
								
								vendor/github.com/google/cel-go/cel/program.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,318 @@
 | 
			
		||||
// Copyright 2019 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package cel
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"math"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types"
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/interpreter"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Program is an evaluable view of an Ast.
 | 
			
		||||
type Program interface {
 | 
			
		||||
	// Eval returns the result of an evaluation of the Ast and environment against the input vars.
 | 
			
		||||
	//
 | 
			
		||||
	// The vars value may either be an `interpreter.Activation` or a `map[string]interface{}`.
 | 
			
		||||
	//
 | 
			
		||||
	// If the `OptTrackState` or `OptExhaustiveEval` flags are used, the `details` response will
 | 
			
		||||
	// be non-nil. Given this caveat on `details`, the return state from evaluation will be:
 | 
			
		||||
	//
 | 
			
		||||
	// *  `val`, `details`, `nil` - Successful evaluation of a non-error result.
 | 
			
		||||
	// *  `val`, `details`, `err` - Successful evaluation to an error result.
 | 
			
		||||
	// *  `nil`, `details`, `err` - Unsuccessful evaluation.
 | 
			
		||||
	//
 | 
			
		||||
	// An unsuccessful evaluation is typically the result of a series of incompatible `EnvOption`
 | 
			
		||||
	// or `ProgramOption` values used in the creation of the evaluation environment or executable
 | 
			
		||||
	// program.
 | 
			
		||||
	Eval(vars interface{}) (ref.Val, *EvalDetails, error)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NoVars returns an empty Activation.
 | 
			
		||||
func NoVars() interpreter.Activation {
 | 
			
		||||
	return interpreter.EmptyActivation()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PartialVars returns a PartialActivation which contains variables and a set of AttributePattern
 | 
			
		||||
// values that indicate variables or parts of variables whose value are not yet known.
 | 
			
		||||
//
 | 
			
		||||
// The `vars` value may either be an interpreter.Activation or any valid input to the
 | 
			
		||||
// interpreter.NewActivation call.
 | 
			
		||||
func PartialVars(vars interface{},
 | 
			
		||||
	unknowns ...*interpreter.AttributePattern) (interpreter.PartialActivation, error) {
 | 
			
		||||
	return interpreter.NewPartialActivation(vars, unknowns...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AttributePattern returns an AttributePattern that matches a top-level variable. The pattern is
 | 
			
		||||
// mutable, and its methods support the specification of one or more qualifier patterns.
 | 
			
		||||
//
 | 
			
		||||
// For example, the AttributePattern(`a`).QualString(`b`) represents a variable access `a` with a
 | 
			
		||||
// string field or index qualification `b`. This pattern will match Attributes `a`, and `a.b`,
 | 
			
		||||
// but not `a.c`.
 | 
			
		||||
//
 | 
			
		||||
// When using a CEL expression within a container, e.g. a package or namespace, the variable name
 | 
			
		||||
// in the pattern must match the qualified name produced during the variable namespace resolution.
 | 
			
		||||
// For example, when variable `a` is declared within an expression whose container is `ns.app`, the
 | 
			
		||||
// fully qualified variable name may be `ns.app.a`, `ns.a`, or `a` per the CEL namespace resolution
 | 
			
		||||
// rules. Pick the fully qualified variable name that makes sense within the container as the
 | 
			
		||||
// AttributePattern `varName` argument.
 | 
			
		||||
//
 | 
			
		||||
// See the interpreter.AttributePattern and interpreter.AttributeQualifierPattern for more info
 | 
			
		||||
// about how to create and manipulate AttributePattern values.
 | 
			
		||||
func AttributePattern(varName string) *interpreter.AttributePattern {
 | 
			
		||||
	return interpreter.NewAttributePattern(varName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EvalDetails holds additional information observed during the Eval() call.
 | 
			
		||||
type EvalDetails struct {
 | 
			
		||||
	state interpreter.EvalState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// State of the evaluation, non-nil if the OptTrackState or OptExhaustiveEval is specified
 | 
			
		||||
// within EvalOptions.
 | 
			
		||||
func (ed *EvalDetails) State() interpreter.EvalState {
 | 
			
		||||
	return ed.state
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// prog is the internal implementation of the Program interface.
 | 
			
		||||
type prog struct {
 | 
			
		||||
	*Env
 | 
			
		||||
	evalOpts      EvalOption
 | 
			
		||||
	decorators    []interpreter.InterpretableDecorator
 | 
			
		||||
	defaultVars   interpreter.Activation
 | 
			
		||||
	dispatcher    interpreter.Dispatcher
 | 
			
		||||
	interpreter   interpreter.Interpreter
 | 
			
		||||
	interpretable interpreter.Interpretable
 | 
			
		||||
	attrFactory   interpreter.AttributeFactory
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// progFactory is a helper alias for marking a program creation factory function.
 | 
			
		||||
type progFactory func(interpreter.EvalState) (Program, error)
 | 
			
		||||
 | 
			
		||||
// progGen holds a reference to a progFactory instance and implements the Program interface.
 | 
			
		||||
type progGen struct {
 | 
			
		||||
	factory progFactory
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// newProgram creates a program instance with an environment, an ast, and an optional list of
 | 
			
		||||
// ProgramOption values.
 | 
			
		||||
//
 | 
			
		||||
// If the program cannot be configured the prog will be nil, with a non-nil error response.
 | 
			
		||||
func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
 | 
			
		||||
	// Build the dispatcher, interpreter, and default program value.
 | 
			
		||||
	disp := interpreter.NewDispatcher()
 | 
			
		||||
 | 
			
		||||
	// Ensure the default attribute factory is set after the adapter and provider are
 | 
			
		||||
	// configured.
 | 
			
		||||
	p := &prog{
 | 
			
		||||
		Env:        e,
 | 
			
		||||
		decorators: []interpreter.InterpretableDecorator{},
 | 
			
		||||
		dispatcher: disp,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Configure the program via the ProgramOption values.
 | 
			
		||||
	var err error
 | 
			
		||||
	for _, opt := range opts {
 | 
			
		||||
		if opt == nil {
 | 
			
		||||
			return nil, fmt.Errorf("program options should be non-nil")
 | 
			
		||||
		}
 | 
			
		||||
		p, err = opt(p)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Set the attribute factory after the options have been set.
 | 
			
		||||
	if p.evalOpts&OptPartialEval == OptPartialEval {
 | 
			
		||||
		p.attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider)
 | 
			
		||||
	} else {
 | 
			
		||||
		p.attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	interp := interpreter.NewInterpreter(disp, e.Container, e.provider, e.adapter, p.attrFactory)
 | 
			
		||||
	p.interpreter = interp
 | 
			
		||||
 | 
			
		||||
	// Translate the EvalOption flags into InterpretableDecorator instances.
 | 
			
		||||
	decorators := make([]interpreter.InterpretableDecorator, len(p.decorators))
 | 
			
		||||
	copy(decorators, p.decorators)
 | 
			
		||||
 | 
			
		||||
	// Enable constant folding first.
 | 
			
		||||
	if p.evalOpts&OptOptimize == OptOptimize {
 | 
			
		||||
		decorators = append(decorators, interpreter.Optimize())
 | 
			
		||||
	}
 | 
			
		||||
	// Enable exhaustive eval over state tracking since it offers a superset of features.
 | 
			
		||||
	if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval {
 | 
			
		||||
		// State tracking requires that each Eval() call operate on an isolated EvalState
 | 
			
		||||
		// object; hence, the presence of the factory.
 | 
			
		||||
		factory := func(state interpreter.EvalState) (Program, error) {
 | 
			
		||||
			decs := append(decorators, interpreter.ExhaustiveEval(state))
 | 
			
		||||
			clone := &prog{
 | 
			
		||||
				evalOpts:    p.evalOpts,
 | 
			
		||||
				defaultVars: p.defaultVars,
 | 
			
		||||
				Env:         e,
 | 
			
		||||
				dispatcher:  disp,
 | 
			
		||||
				interpreter: interp}
 | 
			
		||||
			return initInterpretable(clone, ast, decs)
 | 
			
		||||
		}
 | 
			
		||||
		return initProgGen(factory)
 | 
			
		||||
	}
 | 
			
		||||
	// Enable state tracking last since it too requires the factory approach but is less
 | 
			
		||||
	// featured than the ExhaustiveEval decorator.
 | 
			
		||||
	if p.evalOpts&OptTrackState == OptTrackState {
 | 
			
		||||
		factory := func(state interpreter.EvalState) (Program, error) {
 | 
			
		||||
			decs := append(decorators, interpreter.TrackState(state))
 | 
			
		||||
			clone := &prog{
 | 
			
		||||
				evalOpts:    p.evalOpts,
 | 
			
		||||
				defaultVars: p.defaultVars,
 | 
			
		||||
				Env:         e,
 | 
			
		||||
				dispatcher:  disp,
 | 
			
		||||
				interpreter: interp}
 | 
			
		||||
			return initInterpretable(clone, ast, decs)
 | 
			
		||||
		}
 | 
			
		||||
		return initProgGen(factory)
 | 
			
		||||
	}
 | 
			
		||||
	return initInterpretable(p, ast, decorators)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// initProgGen tests the factory object by calling it once and returns a factory-based Program if
 | 
			
		||||
// the test is successful.
 | 
			
		||||
func initProgGen(factory progFactory) (Program, error) {
 | 
			
		||||
	// Test the factory to make sure that configuration errors are spotted at config
 | 
			
		||||
	_, err := factory(interpreter.NewEvalState())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return &progGen{factory: factory}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// initIterpretable creates a checked or unchecked interpretable depending on whether the Ast
 | 
			
		||||
// has been run through the type-checker.
 | 
			
		||||
func initInterpretable(
 | 
			
		||||
	p *prog,
 | 
			
		||||
	ast *Ast,
 | 
			
		||||
	decorators []interpreter.InterpretableDecorator) (Program, error) {
 | 
			
		||||
	var err error
 | 
			
		||||
	// Unchecked programs do not contain type and reference information and may be
 | 
			
		||||
	// slower to execute than their checked counterparts.
 | 
			
		||||
	if !ast.IsChecked() {
 | 
			
		||||
		p.interpretable, err =
 | 
			
		||||
			p.interpreter.NewUncheckedInterpretable(ast.Expr(), decorators...)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return p, nil
 | 
			
		||||
	}
 | 
			
		||||
	// When the AST has been checked it contains metadata that can be used to speed up program
 | 
			
		||||
	// execution.
 | 
			
		||||
	var checked *exprpb.CheckedExpr
 | 
			
		||||
	checked, err = AstToCheckedExpr(ast)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	p.interpretable, err = p.interpreter.NewInterpretable(checked, decorators...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return p, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Eval implements the Program interface method.
 | 
			
		||||
func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error) {
 | 
			
		||||
	// Configure error recovery for unexpected panics during evaluation. Note, the use of named
 | 
			
		||||
	// return values makes it possible to modify the error response during the recovery
 | 
			
		||||
	// function.
 | 
			
		||||
	defer func() {
 | 
			
		||||
		if r := recover(); r != nil {
 | 
			
		||||
			err = fmt.Errorf("internal error: %v", r)
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
	// Build a hierarchical activation if there are default vars set.
 | 
			
		||||
	vars, err := interpreter.NewActivation(input)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	if p.defaultVars != nil {
 | 
			
		||||
		vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
 | 
			
		||||
	}
 | 
			
		||||
	v = p.interpretable.Eval(vars)
 | 
			
		||||
	// The output of an internal Eval may have a value (`v`) that is a types.Err. This step
 | 
			
		||||
	// translates the CEL value to a Go error response. This interface does not quite match the
 | 
			
		||||
	// RPC signature which allows for multiple errors to be returned, but should be sufficient.
 | 
			
		||||
	if types.IsError(v) {
 | 
			
		||||
		err = v.(*types.Err)
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Cost implements the Coster interface method.
 | 
			
		||||
func (p *prog) Cost() (min, max int64) {
 | 
			
		||||
	return estimateCost(p.interpretable)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Eval implements the Program interface method.
 | 
			
		||||
func (gen *progGen) Eval(input interface{}) (ref.Val, *EvalDetails, error) {
 | 
			
		||||
	// The factory based Eval() differs from the standard evaluation model in that it generates a
 | 
			
		||||
	// new EvalState instance for each call to ensure that unique evaluations yield unique stateful
 | 
			
		||||
	// results.
 | 
			
		||||
	state := interpreter.NewEvalState()
 | 
			
		||||
	det := &EvalDetails{state: state}
 | 
			
		||||
 | 
			
		||||
	// Generate a new instance of the interpretable using the factory configured during the call to
 | 
			
		||||
	// newProgram(). It is incredibly unlikely that the factory call will generate an error given
 | 
			
		||||
	// the factory test performed within the Program() call.
 | 
			
		||||
	p, err := gen.factory(state)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, det, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Evaluate the input, returning the result and the 'state' within EvalDetails.
 | 
			
		||||
	v, _, err := p.Eval(input)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return v, det, err
 | 
			
		||||
	}
 | 
			
		||||
	return v, det, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Cost implements the Coster interface method.
 | 
			
		||||
func (gen *progGen) Cost() (min, max int64) {
 | 
			
		||||
	// Use an empty state value since no evaluation is performed.
 | 
			
		||||
	p, err := gen.factory(emptyEvalState)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return 0, math.MaxInt64
 | 
			
		||||
	}
 | 
			
		||||
	return estimateCost(p)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	emptyEvalState = interpreter.NewEvalState()
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// EstimateCost returns the heuristic cost interval for the program.
 | 
			
		||||
func EstimateCost(p Program) (min, max int64) {
 | 
			
		||||
	return estimateCost(p)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func estimateCost(i interface{}) (min, max int64) {
 | 
			
		||||
	c, ok := i.(interpreter.Coster)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return 0, math.MaxInt64
 | 
			
		||||
	}
 | 
			
		||||
	return c.Cost()
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										57
									
								
								vendor/github.com/google/cel-go/checker/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										57
									
								
								vendor/github.com/google/cel-go/checker/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,57 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "checker.go",
 | 
			
		||||
        "env.go",
 | 
			
		||||
        "errors.go",
 | 
			
		||||
        "mapping.go",
 | 
			
		||||
        "printer.go",
 | 
			
		||||
        "standard.go",
 | 
			
		||||
        "types.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/checker",
 | 
			
		||||
    visibility = ["//visibility:public"],
 | 
			
		||||
    deps = [
 | 
			
		||||
        "//checker/decls:go_default_library",
 | 
			
		||||
        "//common:go_default_library",
 | 
			
		||||
        "//common/containers:go_default_library",
 | 
			
		||||
        "//common/debug:go_default_library",
 | 
			
		||||
        "//common/operators:go_default_library",
 | 
			
		||||
        "//common/overloads:go_default_library",
 | 
			
		||||
        "//common/types:go_default_library",
 | 
			
		||||
        "//common/types/pb:go_default_library",
 | 
			
		||||
        "//common/types/ref:go_default_library",
 | 
			
		||||
        "//parser:go_default_library",
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//proto:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/structpb:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_test(
 | 
			
		||||
    name = "go_default_test",
 | 
			
		||||
    size = "small",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "checker_test.go",
 | 
			
		||||
        "env_test.go",
 | 
			
		||||
    ],
 | 
			
		||||
    embed = [
 | 
			
		||||
        ":go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
    deps = [
 | 
			
		||||
        "//common/types:go_default_library",
 | 
			
		||||
        "//parser:go_default_library",
 | 
			
		||||
        "//test:go_default_library",
 | 
			
		||||
        "//test/proto2pb:go_default_library",
 | 
			
		||||
        "//test/proto3pb:go_default_library",
 | 
			
		||||
        "@com_github_antlr//runtime/Go/antlr:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//proto:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										642
									
								
								vendor/github.com/google/cel-go/checker/checker.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										642
									
								
								vendor/github.com/google/cel-go/checker/checker.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,642 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Package checker defines functions to type-checked a parsed expression
 | 
			
		||||
// against a set of identifier and function declarations.
 | 
			
		||||
package checker
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/checker/decls"
 | 
			
		||||
	"github.com/google/cel-go/common"
 | 
			
		||||
	"github.com/google/cel-go/common/containers"
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
 | 
			
		||||
	"google.golang.org/protobuf/proto"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type checker struct {
 | 
			
		||||
	env                *Env
 | 
			
		||||
	errors             *typeErrors
 | 
			
		||||
	mappings           *mapping
 | 
			
		||||
	freeTypeVarCounter int
 | 
			
		||||
	sourceInfo         *exprpb.SourceInfo
 | 
			
		||||
	types              map[int64]*exprpb.Type
 | 
			
		||||
	references         map[int64]*exprpb.Reference
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Check performs type checking, giving a typed AST.
 | 
			
		||||
// The input is a ParsedExpr proto and an env which encapsulates
 | 
			
		||||
// type binding of variables, declarations of built-in functions,
 | 
			
		||||
// descriptions of protocol buffers, and a registry for errors.
 | 
			
		||||
// Returns a CheckedExpr proto, which might not be usable if
 | 
			
		||||
// there are errors in the error registry.
 | 
			
		||||
func Check(parsedExpr *exprpb.ParsedExpr,
 | 
			
		||||
	source common.Source,
 | 
			
		||||
	env *Env) (*exprpb.CheckedExpr, *common.Errors) {
 | 
			
		||||
	c := checker{
 | 
			
		||||
		env:                env,
 | 
			
		||||
		errors:             &typeErrors{common.NewErrors(source)},
 | 
			
		||||
		mappings:           newMapping(),
 | 
			
		||||
		freeTypeVarCounter: 0,
 | 
			
		||||
		sourceInfo:         parsedExpr.GetSourceInfo(),
 | 
			
		||||
		types:              make(map[int64]*exprpb.Type),
 | 
			
		||||
		references:         make(map[int64]*exprpb.Reference),
 | 
			
		||||
	}
 | 
			
		||||
	c.check(parsedExpr.GetExpr())
 | 
			
		||||
 | 
			
		||||
	// Walk over the final type map substituting any type parameters either by their bound value or
 | 
			
		||||
	// by DYN.
 | 
			
		||||
	m := make(map[int64]*exprpb.Type)
 | 
			
		||||
	for k, v := range c.types {
 | 
			
		||||
		m[k] = substitute(c.mappings, v, true)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &exprpb.CheckedExpr{
 | 
			
		||||
		Expr:         parsedExpr.GetExpr(),
 | 
			
		||||
		SourceInfo:   parsedExpr.GetSourceInfo(),
 | 
			
		||||
		TypeMap:      m,
 | 
			
		||||
		ReferenceMap: c.references,
 | 
			
		||||
	}, c.errors.Errors
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) check(e *exprpb.Expr) {
 | 
			
		||||
	if e == nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	switch e.ExprKind.(type) {
 | 
			
		||||
	case *exprpb.Expr_ConstExpr:
 | 
			
		||||
		literal := e.GetConstExpr()
 | 
			
		||||
		switch literal.ConstantKind.(type) {
 | 
			
		||||
		case *exprpb.Constant_BoolValue:
 | 
			
		||||
			c.checkBoolLiteral(e)
 | 
			
		||||
		case *exprpb.Constant_BytesValue:
 | 
			
		||||
			c.checkBytesLiteral(e)
 | 
			
		||||
		case *exprpb.Constant_DoubleValue:
 | 
			
		||||
			c.checkDoubleLiteral(e)
 | 
			
		||||
		case *exprpb.Constant_Int64Value:
 | 
			
		||||
			c.checkInt64Literal(e)
 | 
			
		||||
		case *exprpb.Constant_NullValue:
 | 
			
		||||
			c.checkNullLiteral(e)
 | 
			
		||||
		case *exprpb.Constant_StringValue:
 | 
			
		||||
			c.checkStringLiteral(e)
 | 
			
		||||
		case *exprpb.Constant_Uint64Value:
 | 
			
		||||
			c.checkUint64Literal(e)
 | 
			
		||||
		}
 | 
			
		||||
	case *exprpb.Expr_IdentExpr:
 | 
			
		||||
		c.checkIdent(e)
 | 
			
		||||
	case *exprpb.Expr_SelectExpr:
 | 
			
		||||
		c.checkSelect(e)
 | 
			
		||||
	case *exprpb.Expr_CallExpr:
 | 
			
		||||
		c.checkCall(e)
 | 
			
		||||
	case *exprpb.Expr_ListExpr:
 | 
			
		||||
		c.checkCreateList(e)
 | 
			
		||||
	case *exprpb.Expr_StructExpr:
 | 
			
		||||
		c.checkCreateStruct(e)
 | 
			
		||||
	case *exprpb.Expr_ComprehensionExpr:
 | 
			
		||||
		c.checkComprehension(e)
 | 
			
		||||
	default:
 | 
			
		||||
		c.errors.ReportError(
 | 
			
		||||
			c.location(e), "Unrecognized ast type: %v", reflect.TypeOf(e))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkInt64Literal(e *exprpb.Expr) {
 | 
			
		||||
	c.setType(e, decls.Int)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkUint64Literal(e *exprpb.Expr) {
 | 
			
		||||
	c.setType(e, decls.Uint)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkStringLiteral(e *exprpb.Expr) {
 | 
			
		||||
	c.setType(e, decls.String)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkBytesLiteral(e *exprpb.Expr) {
 | 
			
		||||
	c.setType(e, decls.Bytes)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkDoubleLiteral(e *exprpb.Expr) {
 | 
			
		||||
	c.setType(e, decls.Double)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkBoolLiteral(e *exprpb.Expr) {
 | 
			
		||||
	c.setType(e, decls.Bool)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkNullLiteral(e *exprpb.Expr) {
 | 
			
		||||
	c.setType(e, decls.Null)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkIdent(e *exprpb.Expr) {
 | 
			
		||||
	identExpr := e.GetIdentExpr()
 | 
			
		||||
	// Check to see if the identifier is declared.
 | 
			
		||||
	if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil {
 | 
			
		||||
		c.setType(e, ident.GetIdent().Type)
 | 
			
		||||
		c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().Value))
 | 
			
		||||
		// Overwrite the identifier with its fully qualified name.
 | 
			
		||||
		identExpr.Name = ident.GetName()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.setType(e, decls.Error)
 | 
			
		||||
	c.errors.undeclaredReference(
 | 
			
		||||
		c.location(e), c.env.container.Name(), identExpr.GetName())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkSelect(e *exprpb.Expr) {
 | 
			
		||||
	sel := e.GetSelectExpr()
 | 
			
		||||
	// Before traversing down the tree, try to interpret as qualified name.
 | 
			
		||||
	qname, found := containers.ToQualifiedName(e)
 | 
			
		||||
	if found {
 | 
			
		||||
		ident := c.env.LookupIdent(qname)
 | 
			
		||||
		if ident != nil {
 | 
			
		||||
			if sel.TestOnly {
 | 
			
		||||
				c.errors.expressionDoesNotSelectField(c.location(e))
 | 
			
		||||
				c.setType(e, decls.Bool)
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			// Rewrite the node to be a variable reference to the resolved fully-qualified
 | 
			
		||||
			// variable name.
 | 
			
		||||
			c.setType(e, ident.GetIdent().Type)
 | 
			
		||||
			c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().Value))
 | 
			
		||||
			identName := ident.GetName()
 | 
			
		||||
			e.ExprKind = &exprpb.Expr_IdentExpr{
 | 
			
		||||
				IdentExpr: &exprpb.Expr_Ident{
 | 
			
		||||
					Name: identName,
 | 
			
		||||
				},
 | 
			
		||||
			}
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Interpret as field selection, first traversing down the operand.
 | 
			
		||||
	c.check(sel.Operand)
 | 
			
		||||
	targetType := c.getType(sel.Operand)
 | 
			
		||||
	// Assume error type by default as most types do not support field selection.
 | 
			
		||||
	resultType := decls.Error
 | 
			
		||||
	switch kindOf(targetType) {
 | 
			
		||||
	case kindMap:
 | 
			
		||||
		// Maps yield their value type as the selection result type.
 | 
			
		||||
		mapType := targetType.GetMapType()
 | 
			
		||||
		resultType = mapType.ValueType
 | 
			
		||||
	case kindObject:
 | 
			
		||||
		// Objects yield their field type declaration as the selection result type, but only if
 | 
			
		||||
		// the field is defined.
 | 
			
		||||
		messageType := targetType
 | 
			
		||||
		if fieldType, found := c.lookupFieldType(
 | 
			
		||||
			c.location(e),
 | 
			
		||||
			messageType.GetMessageType(),
 | 
			
		||||
			sel.Field); found {
 | 
			
		||||
			resultType = fieldType.Type
 | 
			
		||||
		}
 | 
			
		||||
	case kindTypeParam:
 | 
			
		||||
		// Set the operand type to DYN to prevent assignment to a potentionally incorrect type
 | 
			
		||||
		// at a later point in type-checking. The isAssignable call will update the type
 | 
			
		||||
		// substitutions for the type param under the covers.
 | 
			
		||||
		c.isAssignable(decls.Dyn, targetType)
 | 
			
		||||
		// Also, set the result type to DYN.
 | 
			
		||||
		resultType = decls.Dyn
 | 
			
		||||
	default:
 | 
			
		||||
		// Dynamic / error values are treated as DYN type. Errors are handled this way as well
 | 
			
		||||
		// in order to allow forward progress on the check.
 | 
			
		||||
		if isDynOrError(targetType) {
 | 
			
		||||
			resultType = decls.Dyn
 | 
			
		||||
		} else {
 | 
			
		||||
			c.errors.typeDoesNotSupportFieldSelection(c.location(e), targetType)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if sel.TestOnly {
 | 
			
		||||
		resultType = decls.Bool
 | 
			
		||||
	}
 | 
			
		||||
	c.setType(e, resultType)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkCall(e *exprpb.Expr) {
 | 
			
		||||
	// Note: similar logic exists within the `interpreter/planner.go`. If making changes here
 | 
			
		||||
	// please consider the impact on planner.go and consolidate implementations or mirror code
 | 
			
		||||
	// as appropriate.
 | 
			
		||||
	call := e.GetCallExpr()
 | 
			
		||||
	target := call.GetTarget()
 | 
			
		||||
	args := call.GetArgs()
 | 
			
		||||
	fnName := call.GetFunction()
 | 
			
		||||
 | 
			
		||||
	// Traverse arguments.
 | 
			
		||||
	for _, arg := range args {
 | 
			
		||||
		c.check(arg)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Regular static call with simple name.
 | 
			
		||||
	if target == nil {
 | 
			
		||||
		// Check for the existence of the function.
 | 
			
		||||
		fn := c.env.LookupFunction(fnName)
 | 
			
		||||
		if fn == nil {
 | 
			
		||||
			c.errors.undeclaredReference(
 | 
			
		||||
				c.location(e), c.env.container.Name(), fnName)
 | 
			
		||||
			c.setType(e, decls.Error)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		// Overwrite the function name with its fully qualified resolved name.
 | 
			
		||||
		call.Function = fn.GetName()
 | 
			
		||||
		// Check to see whether the overload resolves.
 | 
			
		||||
		c.resolveOverloadOrError(c.location(e), e, fn, nil, args)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If a receiver 'target' is present, it may either be a receiver function, or a namespaced
 | 
			
		||||
	// function, but not both. Given a.b.c() either a.b.c is a function or c is a function with
 | 
			
		||||
	// target a.b.
 | 
			
		||||
	//
 | 
			
		||||
	// Check whether the target is a namespaced function name.
 | 
			
		||||
	qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target)
 | 
			
		||||
	if maybeQualified {
 | 
			
		||||
		maybeQualifiedName := qualifiedPrefix + "." + fnName
 | 
			
		||||
		fn := c.env.LookupFunction(maybeQualifiedName)
 | 
			
		||||
		if fn != nil {
 | 
			
		||||
			// The function name is namespaced and so preserving the target operand would
 | 
			
		||||
			// be an inaccurate representation of the desired evaluation behavior.
 | 
			
		||||
			// Overwrite with fully-qualified resolved function name sans receiver target.
 | 
			
		||||
			call.Target = nil
 | 
			
		||||
			call.Function = fn.GetName()
 | 
			
		||||
			c.resolveOverloadOrError(c.location(e), e, fn, nil, args)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Regular instance call.
 | 
			
		||||
	c.check(call.Target)
 | 
			
		||||
	fn := c.env.LookupFunction(fnName)
 | 
			
		||||
	// Function found, attempt overload resolution.
 | 
			
		||||
	if fn != nil {
 | 
			
		||||
		c.resolveOverloadOrError(c.location(e), e, fn, target, args)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	// Function name not declared, record error.
 | 
			
		||||
	c.errors.undeclaredReference(c.location(e), c.env.container.Name(), fnName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) resolveOverloadOrError(
 | 
			
		||||
	loc common.Location,
 | 
			
		||||
	e *exprpb.Expr,
 | 
			
		||||
	fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) {
 | 
			
		||||
	// Attempt to resolve the overload.
 | 
			
		||||
	resolution := c.resolveOverload(loc, fn, target, args)
 | 
			
		||||
	// No such overload, error noted in the resolveOverload call, type recorded here.
 | 
			
		||||
	if resolution == nil {
 | 
			
		||||
		c.setType(e, decls.Error)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	// Overload found.
 | 
			
		||||
	c.setType(e, resolution.Type)
 | 
			
		||||
	c.setReference(e, resolution.Reference)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) resolveOverload(
 | 
			
		||||
	loc common.Location,
 | 
			
		||||
	fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution {
 | 
			
		||||
 | 
			
		||||
	var argTypes []*exprpb.Type
 | 
			
		||||
	if target != nil {
 | 
			
		||||
		argTypes = append(argTypes, c.getType(target))
 | 
			
		||||
	}
 | 
			
		||||
	for _, arg := range args {
 | 
			
		||||
		argTypes = append(argTypes, c.getType(arg))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var resultType *exprpb.Type
 | 
			
		||||
	var checkedRef *exprpb.Reference
 | 
			
		||||
	for _, overload := range fn.GetFunction().Overloads {
 | 
			
		||||
		if (target == nil && overload.IsInstanceFunction) ||
 | 
			
		||||
			(target != nil && !overload.IsInstanceFunction) {
 | 
			
		||||
			// not a compatible call style.
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		overloadType := decls.NewFunctionType(overload.ResultType, overload.Params...)
 | 
			
		||||
		if len(overload.TypeParams) > 0 {
 | 
			
		||||
			// Instantiate overload's type with fresh type variables.
 | 
			
		||||
			substitutions := newMapping()
 | 
			
		||||
			for _, typePar := range overload.TypeParams {
 | 
			
		||||
				substitutions.add(decls.NewTypeParamType(typePar), c.newTypeVar())
 | 
			
		||||
			}
 | 
			
		||||
			overloadType = substitute(substitutions, overloadType, false)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		candidateArgTypes := overloadType.GetFunction().ArgTypes
 | 
			
		||||
		if c.isAssignableList(argTypes, candidateArgTypes) {
 | 
			
		||||
			if checkedRef == nil {
 | 
			
		||||
				checkedRef = newFunctionReference(overload.OverloadId)
 | 
			
		||||
			} else {
 | 
			
		||||
				checkedRef.OverloadId = append(checkedRef.OverloadId, overload.OverloadId)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// First matching overload, determines result type.
 | 
			
		||||
			fnResultType := substitute(c.mappings,
 | 
			
		||||
				overloadType.GetFunction().ResultType,
 | 
			
		||||
				false)
 | 
			
		||||
			if resultType == nil {
 | 
			
		||||
				resultType = fnResultType
 | 
			
		||||
			} else if !isDyn(resultType) && !proto.Equal(fnResultType, resultType) {
 | 
			
		||||
				resultType = decls.Dyn
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if resultType == nil {
 | 
			
		||||
		c.errors.noMatchingOverload(loc, fn.GetName(), argTypes, target != nil)
 | 
			
		||||
		resultType = decls.Error
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return newResolution(checkedRef, resultType)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkCreateList(e *exprpb.Expr) {
 | 
			
		||||
	create := e.GetListExpr()
 | 
			
		||||
	var elemType *exprpb.Type
 | 
			
		||||
	for _, e := range create.Elements {
 | 
			
		||||
		c.check(e)
 | 
			
		||||
		elemType = c.joinTypes(c.location(e), elemType, c.getType(e))
 | 
			
		||||
	}
 | 
			
		||||
	if elemType == nil {
 | 
			
		||||
		// If the list is empty, assign free type var to elem type.
 | 
			
		||||
		elemType = c.newTypeVar()
 | 
			
		||||
	}
 | 
			
		||||
	c.setType(e, decls.NewListType(elemType))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkCreateStruct(e *exprpb.Expr) {
 | 
			
		||||
	str := e.GetStructExpr()
 | 
			
		||||
	if str.MessageName != "" {
 | 
			
		||||
		c.checkCreateMessage(e)
 | 
			
		||||
	} else {
 | 
			
		||||
		c.checkCreateMap(e)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkCreateMap(e *exprpb.Expr) {
 | 
			
		||||
	mapVal := e.GetStructExpr()
 | 
			
		||||
	var keyType *exprpb.Type
 | 
			
		||||
	var valueType *exprpb.Type
 | 
			
		||||
	for _, ent := range mapVal.GetEntries() {
 | 
			
		||||
		key := ent.GetMapKey()
 | 
			
		||||
		c.check(key)
 | 
			
		||||
		keyType = c.joinTypes(c.location(key), keyType, c.getType(key))
 | 
			
		||||
 | 
			
		||||
		c.check(ent.Value)
 | 
			
		||||
		valueType = c.joinTypes(c.location(ent.Value), valueType, c.getType(ent.Value))
 | 
			
		||||
	}
 | 
			
		||||
	if keyType == nil {
 | 
			
		||||
		// If the map is empty, assign free type variables to typeKey and value type.
 | 
			
		||||
		keyType = c.newTypeVar()
 | 
			
		||||
		valueType = c.newTypeVar()
 | 
			
		||||
	}
 | 
			
		||||
	c.setType(e, decls.NewMapType(keyType, valueType))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkCreateMessage(e *exprpb.Expr) {
 | 
			
		||||
	msgVal := e.GetStructExpr()
 | 
			
		||||
	// Determine the type of the message.
 | 
			
		||||
	messageType := decls.Error
 | 
			
		||||
	decl := c.env.LookupIdent(msgVal.MessageName)
 | 
			
		||||
	if decl == nil {
 | 
			
		||||
		c.errors.undeclaredReference(
 | 
			
		||||
			c.location(e), c.env.container.Name(), msgVal.MessageName)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	// Ensure the type name is fully qualified in the AST.
 | 
			
		||||
	msgVal.MessageName = decl.GetName()
 | 
			
		||||
	c.setReference(e, newIdentReference(decl.GetName(), nil))
 | 
			
		||||
	ident := decl.GetIdent()
 | 
			
		||||
	identKind := kindOf(ident.Type)
 | 
			
		||||
	if identKind != kindError {
 | 
			
		||||
		if identKind != kindType {
 | 
			
		||||
			c.errors.notAType(c.location(e), ident.Type)
 | 
			
		||||
		} else {
 | 
			
		||||
			messageType = ident.Type.GetType()
 | 
			
		||||
			if kindOf(messageType) != kindObject {
 | 
			
		||||
				c.errors.notAMessageType(c.location(e), messageType)
 | 
			
		||||
				messageType = decls.Error
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if isObjectWellKnownType(messageType) {
 | 
			
		||||
		c.setType(e, getObjectWellKnownType(messageType))
 | 
			
		||||
	} else {
 | 
			
		||||
		c.setType(e, messageType)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Check the field initializers.
 | 
			
		||||
	for _, ent := range msgVal.GetEntries() {
 | 
			
		||||
		field := ent.GetFieldKey()
 | 
			
		||||
		value := ent.Value
 | 
			
		||||
		c.check(value)
 | 
			
		||||
 | 
			
		||||
		fieldType := decls.Error
 | 
			
		||||
		if t, found := c.lookupFieldType(
 | 
			
		||||
			c.locationByID(ent.Id),
 | 
			
		||||
			messageType.GetMessageType(),
 | 
			
		||||
			field); found {
 | 
			
		||||
			fieldType = t.Type
 | 
			
		||||
		}
 | 
			
		||||
		if !c.isAssignable(fieldType, c.getType(value)) {
 | 
			
		||||
			c.errors.fieldTypeMismatch(
 | 
			
		||||
				c.locationByID(ent.Id), field, fieldType, c.getType(value))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) checkComprehension(e *exprpb.Expr) {
 | 
			
		||||
	comp := e.GetComprehensionExpr()
 | 
			
		||||
	c.check(comp.IterRange)
 | 
			
		||||
	c.check(comp.AccuInit)
 | 
			
		||||
	accuType := c.getType(comp.AccuInit)
 | 
			
		||||
	rangeType := c.getType(comp.IterRange)
 | 
			
		||||
	var varType *exprpb.Type
 | 
			
		||||
 | 
			
		||||
	switch kindOf(rangeType) {
 | 
			
		||||
	case kindList:
 | 
			
		||||
		varType = rangeType.GetListType().ElemType
 | 
			
		||||
	case kindMap:
 | 
			
		||||
		// Ranges over the keys.
 | 
			
		||||
		varType = rangeType.GetMapType().KeyType
 | 
			
		||||
	case kindDyn, kindError, kindTypeParam:
 | 
			
		||||
		// Set the range type to DYN to prevent assignment to a potentionally incorrect type
 | 
			
		||||
		// at a later point in type-checking. The isAssignable call will update the type
 | 
			
		||||
		// substitutions for the type param under the covers.
 | 
			
		||||
		c.isAssignable(decls.Dyn, rangeType)
 | 
			
		||||
		// Set the range iteration variable to type DYN as well.
 | 
			
		||||
		varType = decls.Dyn
 | 
			
		||||
	default:
 | 
			
		||||
		c.errors.notAComprehensionRange(c.location(comp.IterRange), rangeType)
 | 
			
		||||
		varType = decls.Error
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Create a scope for the comprehension since it has a local accumulation variable.
 | 
			
		||||
	// This scope will contain the accumulation variable used to compute the result.
 | 
			
		||||
	c.env = c.env.enterScope()
 | 
			
		||||
	c.env.Add(decls.NewVar(comp.AccuVar, accuType))
 | 
			
		||||
	// Create a block scope for the loop.
 | 
			
		||||
	c.env = c.env.enterScope()
 | 
			
		||||
	c.env.Add(decls.NewVar(comp.IterVar, varType))
 | 
			
		||||
	// Check the variable references in the condition and step.
 | 
			
		||||
	c.check(comp.LoopCondition)
 | 
			
		||||
	c.assertType(comp.LoopCondition, decls.Bool)
 | 
			
		||||
	c.check(comp.LoopStep)
 | 
			
		||||
	c.assertType(comp.LoopStep, accuType)
 | 
			
		||||
	// Exit the loop's block scope before checking the result.
 | 
			
		||||
	c.env = c.env.exitScope()
 | 
			
		||||
	c.check(comp.Result)
 | 
			
		||||
	// Exit the comprehension scope.
 | 
			
		||||
	c.env = c.env.exitScope()
 | 
			
		||||
	c.setType(e, c.getType(comp.Result))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Checks compatibility of joined types, and returns the most general common type.
 | 
			
		||||
func (c *checker) joinTypes(loc common.Location,
 | 
			
		||||
	previous *exprpb.Type,
 | 
			
		||||
	current *exprpb.Type) *exprpb.Type {
 | 
			
		||||
	if previous == nil {
 | 
			
		||||
		return current
 | 
			
		||||
	}
 | 
			
		||||
	if c.isAssignable(previous, current) {
 | 
			
		||||
		return mostGeneral(previous, current)
 | 
			
		||||
	}
 | 
			
		||||
	if c.dynAggregateLiteralElementTypesEnabled() {
 | 
			
		||||
		return decls.Dyn
 | 
			
		||||
	}
 | 
			
		||||
	c.errors.typeMismatch(loc, previous, current)
 | 
			
		||||
	return decls.Error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) dynAggregateLiteralElementTypesEnabled() bool {
 | 
			
		||||
	return c.env.aggLitElemType == dynElementType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) newTypeVar() *exprpb.Type {
 | 
			
		||||
	id := c.freeTypeVarCounter
 | 
			
		||||
	c.freeTypeVarCounter++
 | 
			
		||||
	return decls.NewTypeParamType(fmt.Sprintf("_var%d", id))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool {
 | 
			
		||||
	subs := isAssignable(c.mappings, t1, t2)
 | 
			
		||||
	if subs != nil {
 | 
			
		||||
		c.mappings = subs
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool {
 | 
			
		||||
	subs := isAssignableList(c.mappings, l1, l2)
 | 
			
		||||
	if subs != nil {
 | 
			
		||||
		c.mappings = subs
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) lookupFieldType(l common.Location, messageType string, fieldName string) (*ref.FieldType, bool) {
 | 
			
		||||
	if _, found := c.env.provider.FindType(messageType); !found {
 | 
			
		||||
		// This should not happen, anyway, report an error.
 | 
			
		||||
		c.errors.unexpectedFailedResolution(l, messageType)
 | 
			
		||||
		return nil, false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ft, found := c.env.provider.FindFieldType(messageType, fieldName); found {
 | 
			
		||||
		return ft, found
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.errors.undefinedField(l, fieldName)
 | 
			
		||||
	return nil, false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) setType(e *exprpb.Expr, t *exprpb.Type) {
 | 
			
		||||
	if old, found := c.types[e.Id]; found && !proto.Equal(old, t) {
 | 
			
		||||
		c.errors.ReportError(c.location(e),
 | 
			
		||||
			"(Incompatible) Type already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, t)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	c.types[e.Id] = t
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) getType(e *exprpb.Expr) *exprpb.Type {
 | 
			
		||||
	return c.types[e.Id]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) setReference(e *exprpb.Expr, r *exprpb.Reference) {
 | 
			
		||||
	if old, found := c.references[e.Id]; found && !proto.Equal(old, r) {
 | 
			
		||||
		c.errors.ReportError(c.location(e),
 | 
			
		||||
			"Reference already exists for expression: %v(%d) old:%v, new:%v", e, e.Id, old, r)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	c.references[e.Id] = r
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) assertType(e *exprpb.Expr, t *exprpb.Type) {
 | 
			
		||||
	if !c.isAssignable(t, c.getType(e)) {
 | 
			
		||||
		c.errors.typeMismatch(c.location(e), t, c.getType(e))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type overloadResolution struct {
 | 
			
		||||
	Reference *exprpb.Reference
 | 
			
		||||
	Type      *exprpb.Type
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newResolution(checkedRef *exprpb.Reference, t *exprpb.Type) *overloadResolution {
 | 
			
		||||
	return &overloadResolution{
 | 
			
		||||
		Reference: checkedRef,
 | 
			
		||||
		Type:      t,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) location(e *exprpb.Expr) common.Location {
 | 
			
		||||
	return c.locationByID(e.Id)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *checker) locationByID(id int64) common.Location {
 | 
			
		||||
	positions := c.sourceInfo.GetPositions()
 | 
			
		||||
	var line = 1
 | 
			
		||||
	if offset, found := positions[id]; found {
 | 
			
		||||
		col := int(offset)
 | 
			
		||||
		for _, lineOffset := range c.sourceInfo.LineOffsets {
 | 
			
		||||
			if lineOffset < offset {
 | 
			
		||||
				line++
 | 
			
		||||
				col = int(offset - lineOffset)
 | 
			
		||||
			} else {
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return common.NewLocation(line, col)
 | 
			
		||||
	}
 | 
			
		||||
	return common.NoLocation
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newIdentReference(name string, value *exprpb.Constant) *exprpb.Reference {
 | 
			
		||||
	return &exprpb.Reference{Name: name, Value: value}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newFunctionReference(overloads ...string) *exprpb.Reference {
 | 
			
		||||
	return &exprpb.Reference{OverloadId: overloads}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										20
									
								
								vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,20 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    default_visibility = ["//visibility:public"],
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "decls.go",
 | 
			
		||||
        "scopes.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/checker/decls",
 | 
			
		||||
    deps = [
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/structpb:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										231
									
								
								vendor/github.com/google/cel-go/checker/decls/decls.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										231
									
								
								vendor/github.com/google/cel-go/checker/decls/decls.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,231 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Package decls provides helpers for creating variable and function declarations.
 | 
			
		||||
package decls
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	emptypb "google.golang.org/protobuf/types/known/emptypb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// Error type used to communicate issues during type-checking.
 | 
			
		||||
	Error = &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_Error{
 | 
			
		||||
			Error: &emptypb.Empty{}}}
 | 
			
		||||
 | 
			
		||||
	// Dyn is a top-type used to represent any value.
 | 
			
		||||
	Dyn = &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_Dyn{
 | 
			
		||||
			Dyn: &emptypb.Empty{}}}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Commonly used types.
 | 
			
		||||
var (
 | 
			
		||||
	Bool   = NewPrimitiveType(exprpb.Type_BOOL)
 | 
			
		||||
	Bytes  = NewPrimitiveType(exprpb.Type_BYTES)
 | 
			
		||||
	Double = NewPrimitiveType(exprpb.Type_DOUBLE)
 | 
			
		||||
	Int    = NewPrimitiveType(exprpb.Type_INT64)
 | 
			
		||||
	Null   = &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_Null{
 | 
			
		||||
			Null: structpb.NullValue_NULL_VALUE}}
 | 
			
		||||
	String = NewPrimitiveType(exprpb.Type_STRING)
 | 
			
		||||
	Uint   = NewPrimitiveType(exprpb.Type_UINT64)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Well-known types.
 | 
			
		||||
// TODO: Replace with an abstract type registry.
 | 
			
		||||
var (
 | 
			
		||||
	Any       = NewWellKnownType(exprpb.Type_ANY)
 | 
			
		||||
	Duration  = NewWellKnownType(exprpb.Type_DURATION)
 | 
			
		||||
	Timestamp = NewWellKnownType(exprpb.Type_TIMESTAMP)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// NewAbstractType creates an abstract type declaration which references a proto
 | 
			
		||||
// message name and may also include type parameters.
 | 
			
		||||
func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type {
 | 
			
		||||
	return &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_AbstractType_{
 | 
			
		||||
			AbstractType: &exprpb.Type_AbstractType{
 | 
			
		||||
				Name:           name,
 | 
			
		||||
				ParameterTypes: paramTypes}}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewFunctionType creates a function invocation contract, typically only used
 | 
			
		||||
// by type-checking steps after overload resolution.
 | 
			
		||||
func NewFunctionType(resultType *exprpb.Type,
 | 
			
		||||
	argTypes ...*exprpb.Type) *exprpb.Type {
 | 
			
		||||
	return &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_Function{
 | 
			
		||||
			Function: &exprpb.Type_FunctionType{
 | 
			
		||||
				ResultType: resultType,
 | 
			
		||||
				ArgTypes:   argTypes}}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewFunction creates a named function declaration with one or more overloads.
 | 
			
		||||
func NewFunction(name string,
 | 
			
		||||
	overloads ...*exprpb.Decl_FunctionDecl_Overload) *exprpb.Decl {
 | 
			
		||||
	return &exprpb.Decl{
 | 
			
		||||
		Name: name,
 | 
			
		||||
		DeclKind: &exprpb.Decl_Function{
 | 
			
		||||
			Function: &exprpb.Decl_FunctionDecl{
 | 
			
		||||
				Overloads: overloads}}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewIdent creates a named identifier declaration with an optional literal
 | 
			
		||||
// value.
 | 
			
		||||
//
 | 
			
		||||
// Literal values are typically only associated with enum identifiers.
 | 
			
		||||
//
 | 
			
		||||
// Deprecated: Use NewVar or NewConst instead.
 | 
			
		||||
func NewIdent(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
 | 
			
		||||
	return &exprpb.Decl{
 | 
			
		||||
		Name: name,
 | 
			
		||||
		DeclKind: &exprpb.Decl_Ident{
 | 
			
		||||
			Ident: &exprpb.Decl_IdentDecl{
 | 
			
		||||
				Type:  t,
 | 
			
		||||
				Value: v}}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewConst creates a constant identifier with a CEL constant literal value.
 | 
			
		||||
func NewConst(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
 | 
			
		||||
	return NewIdent(name, t, v)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewVar creates a variable identifier.
 | 
			
		||||
func NewVar(name string, t *exprpb.Type) *exprpb.Decl {
 | 
			
		||||
	return NewIdent(name, t, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewInstanceOverload creates a instance function overload contract.
 | 
			
		||||
// First element of argTypes is instance.
 | 
			
		||||
func NewInstanceOverload(id string, argTypes []*exprpb.Type,
 | 
			
		||||
	resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
 | 
			
		||||
	return &exprpb.Decl_FunctionDecl_Overload{
 | 
			
		||||
		OverloadId:         id,
 | 
			
		||||
		ResultType:         resultType,
 | 
			
		||||
		Params:             argTypes,
 | 
			
		||||
		IsInstanceFunction: true}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewListType generates a new list with elements of a certain type.
 | 
			
		||||
func NewListType(elem *exprpb.Type) *exprpb.Type {
 | 
			
		||||
	return &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_ListType_{
 | 
			
		||||
			ListType: &exprpb.Type_ListType{
 | 
			
		||||
				ElemType: elem}}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewMapType generates a new map with typed keys and values.
 | 
			
		||||
func NewMapType(key *exprpb.Type, value *exprpb.Type) *exprpb.Type {
 | 
			
		||||
	return &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_MapType_{
 | 
			
		||||
			MapType: &exprpb.Type_MapType{
 | 
			
		||||
				KeyType:   key,
 | 
			
		||||
				ValueType: value}}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewObjectType creates an object type for a qualified type name.
 | 
			
		||||
func NewObjectType(typeName string) *exprpb.Type {
 | 
			
		||||
	return &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_MessageType{
 | 
			
		||||
			MessageType: typeName}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewOverload creates a function overload declaration which contains a unique
 | 
			
		||||
// overload id as well as the expected argument and result types. Overloads
 | 
			
		||||
// must be aggregated within a Function declaration.
 | 
			
		||||
func NewOverload(id string, argTypes []*exprpb.Type,
 | 
			
		||||
	resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
 | 
			
		||||
	return &exprpb.Decl_FunctionDecl_Overload{
 | 
			
		||||
		OverloadId:         id,
 | 
			
		||||
		ResultType:         resultType,
 | 
			
		||||
		Params:             argTypes,
 | 
			
		||||
		IsInstanceFunction: false}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewParameterizedInstanceOverload creates a parametric function instance overload type.
 | 
			
		||||
func NewParameterizedInstanceOverload(id string,
 | 
			
		||||
	argTypes []*exprpb.Type,
 | 
			
		||||
	resultType *exprpb.Type,
 | 
			
		||||
	typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
 | 
			
		||||
	return &exprpb.Decl_FunctionDecl_Overload{
 | 
			
		||||
		OverloadId:         id,
 | 
			
		||||
		ResultType:         resultType,
 | 
			
		||||
		Params:             argTypes,
 | 
			
		||||
		TypeParams:         typeParams,
 | 
			
		||||
		IsInstanceFunction: true}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewParameterizedOverload creates a parametric function overload type.
 | 
			
		||||
func NewParameterizedOverload(id string,
 | 
			
		||||
	argTypes []*exprpb.Type,
 | 
			
		||||
	resultType *exprpb.Type,
 | 
			
		||||
	typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
 | 
			
		||||
	return &exprpb.Decl_FunctionDecl_Overload{
 | 
			
		||||
		OverloadId:         id,
 | 
			
		||||
		ResultType:         resultType,
 | 
			
		||||
		Params:             argTypes,
 | 
			
		||||
		TypeParams:         typeParams,
 | 
			
		||||
		IsInstanceFunction: false}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewPrimitiveType creates a type for a primitive value. See the var declarations
 | 
			
		||||
// for Int, Uint, etc.
 | 
			
		||||
func NewPrimitiveType(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
 | 
			
		||||
	return &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_Primitive{
 | 
			
		||||
			Primitive: primitive}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewTypeType creates a new type designating a type.
 | 
			
		||||
func NewTypeType(nested *exprpb.Type) *exprpb.Type {
 | 
			
		||||
	if nested == nil {
 | 
			
		||||
		// must set the nested field for a valid oneof option
 | 
			
		||||
		nested = &exprpb.Type{}
 | 
			
		||||
	}
 | 
			
		||||
	return &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_Type{
 | 
			
		||||
			Type: nested}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewTypeParamType creates a type corresponding to a named, contextual parameter.
 | 
			
		||||
func NewTypeParamType(name string) *exprpb.Type {
 | 
			
		||||
	return &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_TypeParam{
 | 
			
		||||
			TypeParam: name}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewWellKnownType creates a type corresponding to a protobuf well-known type
 | 
			
		||||
// value.
 | 
			
		||||
func NewWellKnownType(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
 | 
			
		||||
	return &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_WellKnown{
 | 
			
		||||
			WellKnown: wellKnown}}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewWrapperType creates a wrapped primitive type instance. Wrapped types
 | 
			
		||||
// are roughly equivalent to a nullable, or optionally valued type.
 | 
			
		||||
func NewWrapperType(wrapped *exprpb.Type) *exprpb.Type {
 | 
			
		||||
	primitive := wrapped.GetPrimitive()
 | 
			
		||||
	if primitive == exprpb.Type_PRIMITIVE_TYPE_UNSPECIFIED {
 | 
			
		||||
		// TODO: return an error
 | 
			
		||||
		panic("Wrapped type must be a primitive")
 | 
			
		||||
	}
 | 
			
		||||
	return &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_Wrapper{
 | 
			
		||||
			Wrapper: primitive}}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										115
									
								
								vendor/github.com/google/cel-go/checker/decls/scopes.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										115
									
								
								vendor/github.com/google/cel-go/checker/decls/scopes.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,115 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package decls
 | 
			
		||||
 | 
			
		||||
import exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
 | 
			
		||||
// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all
 | 
			
		||||
// identifiers in scope and an optional parent representing outer scopes.
 | 
			
		||||
// Each Groups value is a mapping of names to Decls in the ident and function namespaces.
 | 
			
		||||
// Lookups are performed such that bindings in inner scopes shadow those in outer scopes.
 | 
			
		||||
type Scopes struct {
 | 
			
		||||
	parent *Scopes
 | 
			
		||||
	scopes *Group
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewScopes creates a new, empty Scopes.
 | 
			
		||||
// Some operations can't be safely performed until a Group is added with Push.
 | 
			
		||||
func NewScopes() *Scopes {
 | 
			
		||||
	return &Scopes{
 | 
			
		||||
		scopes: newGroup(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Push creates a new Scopes value which references the current Scope as its parent.
 | 
			
		||||
func (s *Scopes) Push() *Scopes {
 | 
			
		||||
	return &Scopes{
 | 
			
		||||
		parent: s,
 | 
			
		||||
		scopes: newGroup(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Pop returns the parent Scopes value for the current scope, or the current scope if the parent
 | 
			
		||||
// is nil.
 | 
			
		||||
func (s *Scopes) Pop() *Scopes {
 | 
			
		||||
	if s.parent != nil {
 | 
			
		||||
		return s.parent
 | 
			
		||||
	}
 | 
			
		||||
	// TODO: Consider whether this should be an error / panic.
 | 
			
		||||
	return s
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AddIdent adds the ident Decl in the current scope.
 | 
			
		||||
// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten.
 | 
			
		||||
func (s *Scopes) AddIdent(decl *exprpb.Decl) {
 | 
			
		||||
	s.scopes.idents[decl.Name] = decl
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be
 | 
			
		||||
// found.
 | 
			
		||||
// Note: The search is performed from innermost to outermost.
 | 
			
		||||
func (s *Scopes) FindIdent(name string) *exprpb.Decl {
 | 
			
		||||
	if ident, found := s.scopes.idents[name]; found {
 | 
			
		||||
		return ident
 | 
			
		||||
	}
 | 
			
		||||
	if s.parent != nil {
 | 
			
		||||
		return s.parent.FindIdent(name)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or
 | 
			
		||||
// nil if one does not exist.
 | 
			
		||||
// Note: The search is only performed on the current scope and does not search outer scopes.
 | 
			
		||||
func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl {
 | 
			
		||||
	if ident, found := s.scopes.idents[name]; found {
 | 
			
		||||
		return ident
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AddFunction adds the function Decl to the current scope.
 | 
			
		||||
// Note: Any previous entry for a function in the current scope with the same name is overwritten.
 | 
			
		||||
func (s *Scopes) AddFunction(fn *exprpb.Decl) {
 | 
			
		||||
	s.scopes.functions[fn.Name] = fn
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FindFunction finds the first function Decl with a matching name in Scopes.
 | 
			
		||||
// The search is performed from innermost to outermost.
 | 
			
		||||
// Returns nil if no such function in Scopes.
 | 
			
		||||
func (s *Scopes) FindFunction(name string) *exprpb.Decl {
 | 
			
		||||
	if fn, found := s.scopes.functions[name]; found {
 | 
			
		||||
		return fn
 | 
			
		||||
	}
 | 
			
		||||
	if s.parent != nil {
 | 
			
		||||
		return s.parent.FindFunction(name)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Group is a set of Decls that is pushed on or popped off a Scopes as a unit.
 | 
			
		||||
// Contains separate namespaces for idenifier and function Decls.
 | 
			
		||||
// (Should be named "Scope" perhaps?)
 | 
			
		||||
type Group struct {
 | 
			
		||||
	idents    map[string]*exprpb.Decl
 | 
			
		||||
	functions map[string]*exprpb.Decl
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newGroup() *Group {
 | 
			
		||||
	return &Group{
 | 
			
		||||
		idents:    make(map[string]*exprpb.Decl),
 | 
			
		||||
		functions: make(map[string]*exprpb.Decl),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										346
									
								
								vendor/github.com/google/cel-go/checker/env.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										346
									
								
								vendor/github.com/google/cel-go/checker/env.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,346 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package checker
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/checker/decls"
 | 
			
		||||
	"github.com/google/cel-go/common/containers"
 | 
			
		||||
	"github.com/google/cel-go/common/types"
 | 
			
		||||
	"github.com/google/cel-go/common/types/pb"
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/parser"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type aggregateLiteralElementType int
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	dynElementType        aggregateLiteralElementType = iota
 | 
			
		||||
	homogenousElementType aggregateLiteralElementType = 1 << iota
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Env is the environment for type checking.
 | 
			
		||||
//
 | 
			
		||||
// The Env is comprised of a container, type provider, declarations, and other related objects
 | 
			
		||||
// which can be used to assist with type-checking.
 | 
			
		||||
type Env struct {
 | 
			
		||||
	container      *containers.Container
 | 
			
		||||
	provider       ref.TypeProvider
 | 
			
		||||
	declarations   *decls.Scopes
 | 
			
		||||
	aggLitElemType aggregateLiteralElementType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewEnv returns a new *Env with the given parameters.
 | 
			
		||||
func NewEnv(container *containers.Container, provider ref.TypeProvider) *Env {
 | 
			
		||||
	declarations := decls.NewScopes()
 | 
			
		||||
	declarations.Push()
 | 
			
		||||
 | 
			
		||||
	return &Env{
 | 
			
		||||
		container:    container,
 | 
			
		||||
		provider:     provider,
 | 
			
		||||
		declarations: declarations,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewStandardEnv returns a new *Env with the given params plus standard declarations.
 | 
			
		||||
func NewStandardEnv(container *containers.Container, provider ref.TypeProvider) *Env {
 | 
			
		||||
	e := NewEnv(container, provider)
 | 
			
		||||
	if err := e.Add(StandardDeclarations()...); err != nil {
 | 
			
		||||
		// The standard declaration set should never have duplicate declarations.
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
	// TODO: isolate standard declarations from the custom set which may be provided layer.
 | 
			
		||||
	return e
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnableDynamicAggregateLiterals detmerines whether list and map literals may support mixed
 | 
			
		||||
// element types at check-time. This does not preclude the presence of a dynamic list or map
 | 
			
		||||
// somewhere in the CEL evaluation process.
 | 
			
		||||
func (e *Env) EnableDynamicAggregateLiterals(enabled bool) *Env {
 | 
			
		||||
	e.aggLitElemType = dynElementType
 | 
			
		||||
	if !enabled {
 | 
			
		||||
		e.aggLitElemType = homogenousElementType
 | 
			
		||||
	}
 | 
			
		||||
	return e
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Add adds new Decl protos to the Env.
 | 
			
		||||
// Returns an error for identifier redeclarations.
 | 
			
		||||
func (e *Env) Add(decls ...*exprpb.Decl) error {
 | 
			
		||||
	errMsgs := make([]errorMsg, 0)
 | 
			
		||||
	for _, decl := range decls {
 | 
			
		||||
		switch decl.DeclKind.(type) {
 | 
			
		||||
		case *exprpb.Decl_Ident:
 | 
			
		||||
			errMsgs = append(errMsgs, e.addIdent(sanitizeIdent(decl)))
 | 
			
		||||
		case *exprpb.Decl_Function:
 | 
			
		||||
			errMsgs = append(errMsgs, e.addFunction(sanitizeFunction(decl))...)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return formatError(errMsgs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// LookupIdent returns a Decl proto for typeName as an identifier in the Env.
 | 
			
		||||
// Returns nil if no such identifier is found in the Env.
 | 
			
		||||
func (e *Env) LookupIdent(name string) *exprpb.Decl {
 | 
			
		||||
	for _, candidate := range e.container.ResolveCandidateNames(name) {
 | 
			
		||||
		if ident := e.declarations.FindIdent(candidate); ident != nil {
 | 
			
		||||
			return ident
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Next try to import the name as a reference to a message type. If found,
 | 
			
		||||
		// the declaration is added to the outest (global) scope of the
 | 
			
		||||
		// environment, so next time we can access it faster.
 | 
			
		||||
		if t, found := e.provider.FindType(candidate); found {
 | 
			
		||||
			decl := decls.NewVar(candidate, t)
 | 
			
		||||
			e.declarations.AddIdent(decl)
 | 
			
		||||
			return decl
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Next try to import this as an enum value by splitting the name in a type prefix and
 | 
			
		||||
		// the enum inside.
 | 
			
		||||
		if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType {
 | 
			
		||||
			decl := decls.NewIdent(candidate,
 | 
			
		||||
				decls.Int,
 | 
			
		||||
				&exprpb.Constant{
 | 
			
		||||
					ConstantKind: &exprpb.Constant_Int64Value{
 | 
			
		||||
						Int64Value: int64(enumValue.(types.Int))}})
 | 
			
		||||
			e.declarations.AddIdent(decl)
 | 
			
		||||
			return decl
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// LookupFunction returns a Decl proto for typeName as a function in env.
 | 
			
		||||
// Returns nil if no such function is found in env.
 | 
			
		||||
func (e *Env) LookupFunction(name string) *exprpb.Decl {
 | 
			
		||||
	for _, candidate := range e.container.ResolveCandidateNames(name) {
 | 
			
		||||
		if fn := e.declarations.FindFunction(candidate); fn != nil {
 | 
			
		||||
			return fn
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// addOverload adds overload to function declaration f.
 | 
			
		||||
// Returns one or more errorMsg values if the overload overlaps with an existing overload or macro.
 | 
			
		||||
func (e *Env) addOverload(f *exprpb.Decl, overload *exprpb.Decl_FunctionDecl_Overload) []errorMsg {
 | 
			
		||||
	errMsgs := make([]errorMsg, 0)
 | 
			
		||||
	function := f.GetFunction()
 | 
			
		||||
	emptyMappings := newMapping()
 | 
			
		||||
	overloadFunction := decls.NewFunctionType(overload.GetResultType(),
 | 
			
		||||
		overload.GetParams()...)
 | 
			
		||||
	overloadErased := substitute(emptyMappings, overloadFunction, true)
 | 
			
		||||
	for _, existing := range function.GetOverloads() {
 | 
			
		||||
		existingFunction := decls.NewFunctionType(existing.GetResultType(),
 | 
			
		||||
			existing.GetParams()...)
 | 
			
		||||
		existingErased := substitute(emptyMappings, existingFunction, true)
 | 
			
		||||
		overlap := isAssignable(emptyMappings, overloadErased, existingErased) != nil ||
 | 
			
		||||
			isAssignable(emptyMappings, existingErased, overloadErased) != nil
 | 
			
		||||
		if overlap &&
 | 
			
		||||
			overload.GetIsInstanceFunction() == existing.GetIsInstanceFunction() {
 | 
			
		||||
			errMsgs = append(errMsgs,
 | 
			
		||||
				overlappingOverloadError(f.Name,
 | 
			
		||||
					overload.GetOverloadId(), overloadFunction,
 | 
			
		||||
					existing.GetOverloadId(), existingFunction))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, macro := range parser.AllMacros {
 | 
			
		||||
		if macro.Function() == f.Name &&
 | 
			
		||||
			macro.IsReceiverStyle() == overload.GetIsInstanceFunction() &&
 | 
			
		||||
			macro.ArgCount() == len(overload.GetParams()) {
 | 
			
		||||
			errMsgs = append(errMsgs, overlappingMacroError(f.Name, macro.ArgCount()))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if len(errMsgs) > 0 {
 | 
			
		||||
		return errMsgs
 | 
			
		||||
	}
 | 
			
		||||
	function.Overloads = append(function.GetOverloads(), overload)
 | 
			
		||||
	return errMsgs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// addFunction adds the function Decl to the Env.
 | 
			
		||||
// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl.
 | 
			
		||||
// If overload overlaps with an existing overload, adds to the errors  in the Env instead.
 | 
			
		||||
func (e *Env) addFunction(decl *exprpb.Decl) []errorMsg {
 | 
			
		||||
	current := e.declarations.FindFunction(decl.Name)
 | 
			
		||||
	if current == nil {
 | 
			
		||||
		//Add the function declaration without overloads and check the overloads below.
 | 
			
		||||
		current = decls.NewFunction(decl.Name)
 | 
			
		||||
		e.declarations.AddFunction(current)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	errorMsgs := make([]errorMsg, 0)
 | 
			
		||||
	for _, overload := range decl.GetFunction().GetOverloads() {
 | 
			
		||||
		errorMsgs = append(errorMsgs, e.addOverload(current, overload)...)
 | 
			
		||||
	}
 | 
			
		||||
	return errorMsgs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// addIdent adds the Decl to the declarations in the Env.
 | 
			
		||||
// Returns a non-empty errorMsg if the identifier is already declared in the scope.
 | 
			
		||||
func (e *Env) addIdent(decl *exprpb.Decl) errorMsg {
 | 
			
		||||
	current := e.declarations.FindIdentInScope(decl.Name)
 | 
			
		||||
	if current != nil {
 | 
			
		||||
		return overlappingIdentifierError(decl.Name)
 | 
			
		||||
	}
 | 
			
		||||
	e.declarations.AddIdent(decl)
 | 
			
		||||
	return ""
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// sanitizeFunction replaces well-known types referenced by message name with their equivalent
 | 
			
		||||
// CEL built-in type instances.
 | 
			
		||||
func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl {
 | 
			
		||||
	fn := decl.GetFunction()
 | 
			
		||||
	// Determine whether the declaration requires replacements from proto-based message type
 | 
			
		||||
	// references to well-known CEL type references.
 | 
			
		||||
	var needsSanitizing bool
 | 
			
		||||
	for _, o := range fn.GetOverloads() {
 | 
			
		||||
		if isObjectWellKnownType(o.GetResultType()) {
 | 
			
		||||
			needsSanitizing = true
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
		for _, p := range o.GetParams() {
 | 
			
		||||
			if isObjectWellKnownType(p) {
 | 
			
		||||
				needsSanitizing = true
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Early return if the declaration requires no modification.
 | 
			
		||||
	if !needsSanitizing {
 | 
			
		||||
		return decl
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Sanitize all of the overloads if any overload requires an update to its type references.
 | 
			
		||||
	overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(fn.GetOverloads()))
 | 
			
		||||
	for i, o := range fn.GetOverloads() {
 | 
			
		||||
		rt := o.GetResultType()
 | 
			
		||||
		if isObjectWellKnownType(rt) {
 | 
			
		||||
			rt = getObjectWellKnownType(rt)
 | 
			
		||||
		}
 | 
			
		||||
		params := make([]*exprpb.Type, len(o.GetParams()))
 | 
			
		||||
		copy(params, o.GetParams())
 | 
			
		||||
		for j, p := range params {
 | 
			
		||||
			if isObjectWellKnownType(p) {
 | 
			
		||||
				params[j] = getObjectWellKnownType(p)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		// If sanitized, replace the overload definition.
 | 
			
		||||
		if o.IsInstanceFunction {
 | 
			
		||||
			overloads[i] =
 | 
			
		||||
				decls.NewInstanceOverload(o.GetOverloadId(), params, rt)
 | 
			
		||||
		} else {
 | 
			
		||||
			overloads[i] =
 | 
			
		||||
				decls.NewOverload(o.GetOverloadId(), params, rt)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return decls.NewFunction(decl.GetName(), overloads...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// sanitizeIdent replaces the identifier's well-known types referenced by message name with
 | 
			
		||||
// references to CEL built-in type instances.
 | 
			
		||||
func sanitizeIdent(decl *exprpb.Decl) *exprpb.Decl {
 | 
			
		||||
	id := decl.GetIdent()
 | 
			
		||||
	t := id.GetType()
 | 
			
		||||
	if !isObjectWellKnownType(t) {
 | 
			
		||||
		return decl
 | 
			
		||||
	}
 | 
			
		||||
	return decls.NewIdent(decl.GetName(), getObjectWellKnownType(t), id.GetValue())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isObjectWellKnownType returns true if the input type is an OBJECT type with a message name
 | 
			
		||||
// that corresponds the message name of a built-in CEL type.
 | 
			
		||||
func isObjectWellKnownType(t *exprpb.Type) bool {
 | 
			
		||||
	if kindOf(t) != kindObject {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	_, found := pb.CheckedWellKnowns[t.GetMessageType()]
 | 
			
		||||
	return found
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getObjectWellKnownType returns the built-in CEL type declaration for input type's message name.
 | 
			
		||||
func getObjectWellKnownType(t *exprpb.Type) *exprpb.Type {
 | 
			
		||||
	return pb.CheckedWellKnowns[t.GetMessageType()]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// enterScope creates a new Env instance with a new innermost declaration scope.
 | 
			
		||||
func (e *Env) enterScope() *Env {
 | 
			
		||||
	childDecls := e.declarations.Push()
 | 
			
		||||
	return &Env{
 | 
			
		||||
		declarations:   childDecls,
 | 
			
		||||
		container:      e.container,
 | 
			
		||||
		provider:       e.provider,
 | 
			
		||||
		aggLitElemType: e.aggLitElemType,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// exitScope creates a new Env instance with the nearest outer declaration scope.
 | 
			
		||||
func (e *Env) exitScope() *Env {
 | 
			
		||||
	parentDecls := e.declarations.Pop()
 | 
			
		||||
	return &Env{
 | 
			
		||||
		declarations:   parentDecls,
 | 
			
		||||
		container:      e.container,
 | 
			
		||||
		provider:       e.provider,
 | 
			
		||||
		aggLitElemType: e.aggLitElemType,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// errorMsg is a type alias meant to represent error-based return values which
 | 
			
		||||
// may be accumulated into an error at a later point in execution.
 | 
			
		||||
type errorMsg string
 | 
			
		||||
 | 
			
		||||
func overlappingIdentifierError(name string) errorMsg {
 | 
			
		||||
	return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func overlappingOverloadError(name string,
 | 
			
		||||
	overloadID1 string, f1 *exprpb.Type,
 | 
			
		||||
	overloadID2 string, f2 *exprpb.Type) errorMsg {
 | 
			
		||||
	return errorMsg(fmt.Sprintf(
 | 
			
		||||
		"overlapping overload for name '%s' (type '%s' with overloadId: '%s' "+
 | 
			
		||||
			"cannot be distinguished from '%s' with overloadId: '%s')",
 | 
			
		||||
		name,
 | 
			
		||||
		FormatCheckedType(f1),
 | 
			
		||||
		overloadID1,
 | 
			
		||||
		FormatCheckedType(f2),
 | 
			
		||||
		overloadID2))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func overlappingMacroError(name string, argCount int) errorMsg {
 | 
			
		||||
	return errorMsg(fmt.Sprintf(
 | 
			
		||||
		"overlapping macro for name '%s' with %d args", name, argCount))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func formatError(errMsgs []errorMsg) error {
 | 
			
		||||
	errStrs := make([]string, 0)
 | 
			
		||||
	if len(errMsgs) > 0 {
 | 
			
		||||
		for i := 0; i < len(errMsgs); i++ {
 | 
			
		||||
			if errMsgs[i] != "" {
 | 
			
		||||
				errStrs = append(errStrs, string(errMsgs[i]))
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if len(errStrs) > 0 {
 | 
			
		||||
		return fmt.Errorf("%s", strings.Join(errStrs, "\n"))
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										100
									
								
								vendor/github.com/google/cel-go/checker/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								vendor/github.com/google/cel-go/checker/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,100 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package checker
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/google/cel-go/common"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// typeErrors is a specialization of Errors.
 | 
			
		||||
type typeErrors struct {
 | 
			
		||||
	*common.Errors
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) undeclaredReference(l common.Location, container string, name string) {
 | 
			
		||||
	e.ReportError(l, "undeclared reference to '%s' (in container '%s')", name, container)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) expressionDoesNotSelectField(l common.Location) {
 | 
			
		||||
	e.ReportError(l, "expression does not select a field")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) typeDoesNotSupportFieldSelection(l common.Location, t *exprpb.Type) {
 | 
			
		||||
	e.ReportError(l, "type '%s' does not support field selection", t)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) undefinedField(l common.Location, field string) {
 | 
			
		||||
	e.ReportError(l, "undefined field '%s'", field)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) noMatchingOverload(l common.Location, name string, args []*exprpb.Type, isInstance bool) {
 | 
			
		||||
	signature := formatFunction(nil, args, isInstance)
 | 
			
		||||
	e.ReportError(l, "found no matching overload for '%s' applied to '%s'", name, signature)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) notAType(l common.Location, t *exprpb.Type) {
 | 
			
		||||
	e.ReportError(l, "'%s(%v)' is not a type", FormatCheckedType(t), t)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) notAMessageType(l common.Location, t *exprpb.Type) {
 | 
			
		||||
	e.ReportError(l, "'%s' is not a message type", FormatCheckedType(t))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) fieldTypeMismatch(l common.Location, name string, field *exprpb.Type, value *exprpb.Type) {
 | 
			
		||||
	e.ReportError(l, "expected type of field '%s' is '%s' but provided type is '%s'",
 | 
			
		||||
		name, FormatCheckedType(field), FormatCheckedType(value))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) unexpectedFailedResolution(l common.Location, typeName string) {
 | 
			
		||||
	e.ReportError(l, "[internal] unexpected failed resolution of '%s'", typeName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) notAComprehensionRange(l common.Location, t *exprpb.Type) {
 | 
			
		||||
	e.ReportError(l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)",
 | 
			
		||||
		FormatCheckedType(t))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *typeErrors) typeMismatch(l common.Location, expected *exprpb.Type, actual *exprpb.Type) {
 | 
			
		||||
	e.ReportError(l, "expected type '%s' but found '%s'",
 | 
			
		||||
		FormatCheckedType(expected), FormatCheckedType(actual))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func formatFunction(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string {
 | 
			
		||||
	result := ""
 | 
			
		||||
	if isInstance {
 | 
			
		||||
		target := argTypes[0]
 | 
			
		||||
		argTypes = argTypes[1:]
 | 
			
		||||
 | 
			
		||||
		result += FormatCheckedType(target)
 | 
			
		||||
		result += "."
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result += "("
 | 
			
		||||
	for i, arg := range argTypes {
 | 
			
		||||
		if i > 0 {
 | 
			
		||||
			result += ", "
 | 
			
		||||
		}
 | 
			
		||||
		result += FormatCheckedType(arg)
 | 
			
		||||
	}
 | 
			
		||||
	result += ")"
 | 
			
		||||
	if resultType != nil {
 | 
			
		||||
		result += " -> "
 | 
			
		||||
		result += FormatCheckedType(resultType)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										49
									
								
								vendor/github.com/google/cel-go/checker/mapping.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								vendor/github.com/google/cel-go/checker/mapping.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,49 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package checker
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type mapping struct {
 | 
			
		||||
	mapping map[string]*exprpb.Type
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newMapping() *mapping {
 | 
			
		||||
	return &mapping{
 | 
			
		||||
		mapping: make(map[string]*exprpb.Type),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m *mapping) add(from *exprpb.Type, to *exprpb.Type) {
 | 
			
		||||
	m.mapping[typeKey(from)] = to
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m *mapping) find(from *exprpb.Type) (*exprpb.Type, bool) {
 | 
			
		||||
	if r, found := m.mapping[typeKey(from)]; found {
 | 
			
		||||
		return r, found
 | 
			
		||||
	}
 | 
			
		||||
	return nil, false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m *mapping) copy() *mapping {
 | 
			
		||||
	c := newMapping()
 | 
			
		||||
 | 
			
		||||
	for k, v := range m.mapping {
 | 
			
		||||
		c.mapping[k] = v
 | 
			
		||||
	}
 | 
			
		||||
	return c
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										71
									
								
								vendor/github.com/google/cel-go/checker/printer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								vendor/github.com/google/cel-go/checker/printer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,71 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package checker
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/google/cel-go/common/debug"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type semanticAdorner struct {
 | 
			
		||||
	checks *exprpb.CheckedExpr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ debug.Adorner = &semanticAdorner{}
 | 
			
		||||
 | 
			
		||||
func (a *semanticAdorner) GetMetadata(elem interface{}) string {
 | 
			
		||||
	result := ""
 | 
			
		||||
	e, isExpr := elem.(*exprpb.Expr)
 | 
			
		||||
	if !isExpr {
 | 
			
		||||
		return result
 | 
			
		||||
	}
 | 
			
		||||
	t := a.checks.TypeMap[e.Id]
 | 
			
		||||
	if t != nil {
 | 
			
		||||
		result += "~"
 | 
			
		||||
		result += FormatCheckedType(t)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	switch e.ExprKind.(type) {
 | 
			
		||||
	case *exprpb.Expr_IdentExpr,
 | 
			
		||||
		*exprpb.Expr_CallExpr,
 | 
			
		||||
		*exprpb.Expr_StructExpr,
 | 
			
		||||
		*exprpb.Expr_SelectExpr:
 | 
			
		||||
		if ref, found := a.checks.ReferenceMap[e.Id]; found {
 | 
			
		||||
			if len(ref.GetOverloadId()) == 0 {
 | 
			
		||||
				result += "^" + ref.Name
 | 
			
		||||
			} else {
 | 
			
		||||
				for i, overload := range ref.OverloadId {
 | 
			
		||||
					if i == 0 {
 | 
			
		||||
						result += "^"
 | 
			
		||||
					} else {
 | 
			
		||||
						result += "|"
 | 
			
		||||
					}
 | 
			
		||||
					result += overload
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Print returns a string representation of the Expr message,
 | 
			
		||||
// annotated with types from the CheckedExpr.  The Expr must
 | 
			
		||||
// be a sub-expression embedded in the CheckedExpr.
 | 
			
		||||
func Print(e *exprpb.Expr, checks *exprpb.CheckedExpr) string {
 | 
			
		||||
	a := &semanticAdorner{checks: checks}
 | 
			
		||||
	return debug.ToAdornedDebugString(e, a)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										440
									
								
								vendor/github.com/google/cel-go/checker/standard.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										440
									
								
								vendor/github.com/google/cel-go/checker/standard.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,440 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package checker
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/google/cel-go/checker/decls"
 | 
			
		||||
	"github.com/google/cel-go/common/operators"
 | 
			
		||||
	"github.com/google/cel-go/common/overloads"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// StandardDeclarations returns the Decls for all functions and constants in the evaluator.
 | 
			
		||||
func StandardDeclarations() []*exprpb.Decl {
 | 
			
		||||
	// Some shortcuts we use when building declarations.
 | 
			
		||||
	paramA := decls.NewTypeParamType("A")
 | 
			
		||||
	typeParamAList := []string{"A"}
 | 
			
		||||
	listOfA := decls.NewListType(paramA)
 | 
			
		||||
	paramB := decls.NewTypeParamType("B")
 | 
			
		||||
	typeParamABList := []string{"A", "B"}
 | 
			
		||||
	mapOfAB := decls.NewMapType(paramA, paramB)
 | 
			
		||||
 | 
			
		||||
	var idents []*exprpb.Decl
 | 
			
		||||
	for _, t := range []*exprpb.Type{
 | 
			
		||||
		decls.Int, decls.Uint, decls.Bool,
 | 
			
		||||
		decls.Double, decls.Bytes, decls.String} {
 | 
			
		||||
		idents = append(idents,
 | 
			
		||||
			decls.NewVar(FormatCheckedType(t), decls.NewTypeType(t)))
 | 
			
		||||
	}
 | 
			
		||||
	idents = append(idents,
 | 
			
		||||
		decls.NewVar("list", decls.NewTypeType(listOfA)),
 | 
			
		||||
		decls.NewVar("map", decls.NewTypeType(mapOfAB)),
 | 
			
		||||
		decls.NewVar("null_type", decls.NewTypeType(decls.Null)),
 | 
			
		||||
		decls.NewVar("type", decls.NewTypeType(decls.NewTypeType(nil))))
 | 
			
		||||
 | 
			
		||||
	// Booleans
 | 
			
		||||
	// TODO: allow the conditional to return a heterogenous type.
 | 
			
		||||
	return append(idents, []*exprpb.Decl{
 | 
			
		||||
		decls.NewFunction(operators.Conditional,
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.Conditional,
 | 
			
		||||
				[]*exprpb.Type{decls.Bool, paramA, paramA}, paramA,
 | 
			
		||||
				typeParamAList)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.LogicalAnd,
 | 
			
		||||
			decls.NewOverload(overloads.LogicalAnd,
 | 
			
		||||
				[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.LogicalOr,
 | 
			
		||||
			decls.NewOverload(overloads.LogicalOr,
 | 
			
		||||
				[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.LogicalNot,
 | 
			
		||||
			decls.NewOverload(overloads.LogicalNot,
 | 
			
		||||
				[]*exprpb.Type{decls.Bool}, decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.NotStrictlyFalse,
 | 
			
		||||
			decls.NewOverload(overloads.NotStrictlyFalse,
 | 
			
		||||
				[]*exprpb.Type{decls.Bool}, decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		// Relations.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.Less,
 | 
			
		||||
			decls.NewOverload(overloads.LessBool,
 | 
			
		||||
				[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessInt64,
 | 
			
		||||
				[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessUint64,
 | 
			
		||||
				[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessDouble,
 | 
			
		||||
				[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessString,
 | 
			
		||||
				[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessBytes,
 | 
			
		||||
				[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessTimestamp,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.LessEquals,
 | 
			
		||||
			decls.NewOverload(overloads.LessEqualsBool,
 | 
			
		||||
				[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessEqualsInt64,
 | 
			
		||||
				[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessEqualsUint64,
 | 
			
		||||
				[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessEqualsDouble,
 | 
			
		||||
				[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessEqualsString,
 | 
			
		||||
				[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessEqualsBytes,
 | 
			
		||||
				[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessEqualsTimestamp,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.LessEqualsDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.Greater,
 | 
			
		||||
			decls.NewOverload(overloads.GreaterBool,
 | 
			
		||||
				[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterInt64,
 | 
			
		||||
				[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterUint64,
 | 
			
		||||
				[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterDouble,
 | 
			
		||||
				[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterString,
 | 
			
		||||
				[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterBytes,
 | 
			
		||||
				[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterTimestamp,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.GreaterEquals,
 | 
			
		||||
			decls.NewOverload(overloads.GreaterEqualsBool,
 | 
			
		||||
				[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterEqualsInt64,
 | 
			
		||||
				[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterEqualsUint64,
 | 
			
		||||
				[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterEqualsDouble,
 | 
			
		||||
				[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterEqualsString,
 | 
			
		||||
				[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterEqualsBytes,
 | 
			
		||||
				[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterEqualsTimestamp,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.GreaterEqualsDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.Equals,
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.Equals,
 | 
			
		||||
				[]*exprpb.Type{paramA, paramA}, decls.Bool,
 | 
			
		||||
				typeParamAList)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.NotEquals,
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.NotEquals,
 | 
			
		||||
				[]*exprpb.Type{paramA, paramA}, decls.Bool,
 | 
			
		||||
				typeParamAList)),
 | 
			
		||||
 | 
			
		||||
		// Algebra.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.Subtract,
 | 
			
		||||
			decls.NewOverload(overloads.SubtractInt64,
 | 
			
		||||
				[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.SubtractUint64,
 | 
			
		||||
				[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
 | 
			
		||||
			decls.NewOverload(overloads.SubtractDouble,
 | 
			
		||||
				[]*exprpb.Type{decls.Double, decls.Double}, decls.Double),
 | 
			
		||||
			decls.NewOverload(overloads.SubtractTimestampTimestamp,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Duration),
 | 
			
		||||
			decls.NewOverload(overloads.SubtractTimestampDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp),
 | 
			
		||||
			decls.NewOverload(overloads.SubtractDurationDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.Multiply,
 | 
			
		||||
			decls.NewOverload(overloads.MultiplyInt64,
 | 
			
		||||
				[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.MultiplyUint64,
 | 
			
		||||
				[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
 | 
			
		||||
			decls.NewOverload(overloads.MultiplyDouble,
 | 
			
		||||
				[]*exprpb.Type{decls.Double, decls.Double}, decls.Double)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.Divide,
 | 
			
		||||
			decls.NewOverload(overloads.DivideInt64,
 | 
			
		||||
				[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.DivideUint64,
 | 
			
		||||
				[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
 | 
			
		||||
			decls.NewOverload(overloads.DivideDouble,
 | 
			
		||||
				[]*exprpb.Type{decls.Double, decls.Double}, decls.Double)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.Modulo,
 | 
			
		||||
			decls.NewOverload(overloads.ModuloInt64,
 | 
			
		||||
				[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.ModuloUint64,
 | 
			
		||||
				[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.Add,
 | 
			
		||||
			decls.NewOverload(overloads.AddInt64,
 | 
			
		||||
				[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.AddUint64,
 | 
			
		||||
				[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
 | 
			
		||||
			decls.NewOverload(overloads.AddDouble,
 | 
			
		||||
				[]*exprpb.Type{decls.Double, decls.Double}, decls.Double),
 | 
			
		||||
			decls.NewOverload(overloads.AddString,
 | 
			
		||||
				[]*exprpb.Type{decls.String, decls.String}, decls.String),
 | 
			
		||||
			decls.NewOverload(overloads.AddBytes,
 | 
			
		||||
				[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bytes),
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.AddList,
 | 
			
		||||
				[]*exprpb.Type{listOfA, listOfA}, listOfA,
 | 
			
		||||
				typeParamAList),
 | 
			
		||||
			decls.NewOverload(overloads.AddTimestampDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp),
 | 
			
		||||
			decls.NewOverload(overloads.AddDurationTimestamp,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration, decls.Timestamp}, decls.Timestamp),
 | 
			
		||||
			decls.NewOverload(overloads.AddDurationDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.Negate,
 | 
			
		||||
			decls.NewOverload(overloads.NegateInt64,
 | 
			
		||||
				[]*exprpb.Type{decls.Int}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.NegateDouble,
 | 
			
		||||
				[]*exprpb.Type{decls.Double}, decls.Double)),
 | 
			
		||||
 | 
			
		||||
		// Index.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.Index,
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.IndexList,
 | 
			
		||||
				[]*exprpb.Type{listOfA, decls.Int}, paramA,
 | 
			
		||||
				typeParamAList),
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.IndexMap,
 | 
			
		||||
				[]*exprpb.Type{mapOfAB, paramA}, paramB,
 | 
			
		||||
				typeParamABList)),
 | 
			
		||||
		//decls.NewOverload(overloads.IndexMessage,
 | 
			
		||||
		//	[]*expr.Type{decls.Dyn, decls.String}, decls.Dyn)),
 | 
			
		||||
 | 
			
		||||
		// Collections.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.Size,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.SizeStringInst,
 | 
			
		||||
				[]*exprpb.Type{decls.String}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.SizeBytesInst,
 | 
			
		||||
				[]*exprpb.Type{decls.Bytes}, decls.Int),
 | 
			
		||||
			decls.NewParameterizedInstanceOverload(overloads.SizeListInst,
 | 
			
		||||
				[]*exprpb.Type{listOfA}, decls.Int, typeParamAList),
 | 
			
		||||
			decls.NewParameterizedInstanceOverload(overloads.SizeMapInst,
 | 
			
		||||
				[]*exprpb.Type{mapOfAB}, decls.Int, typeParamABList),
 | 
			
		||||
			decls.NewOverload(overloads.SizeString,
 | 
			
		||||
				[]*exprpb.Type{decls.String}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.SizeBytes,
 | 
			
		||||
				[]*exprpb.Type{decls.Bytes}, decls.Int),
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.SizeList,
 | 
			
		||||
				[]*exprpb.Type{listOfA}, decls.Int, typeParamAList),
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.SizeMap,
 | 
			
		||||
				[]*exprpb.Type{mapOfAB}, decls.Int, typeParamABList)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(operators.In,
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.InList,
 | 
			
		||||
				[]*exprpb.Type{paramA, listOfA}, decls.Bool,
 | 
			
		||||
				typeParamAList),
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.InMap,
 | 
			
		||||
				[]*exprpb.Type{paramA, mapOfAB}, decls.Bool,
 | 
			
		||||
				typeParamABList)),
 | 
			
		||||
 | 
			
		||||
		// Deprecated 'in()' function.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.DeprecatedIn,
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.InList,
 | 
			
		||||
				[]*exprpb.Type{paramA, listOfA}, decls.Bool,
 | 
			
		||||
				typeParamAList),
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.InMap,
 | 
			
		||||
				[]*exprpb.Type{paramA, mapOfAB}, decls.Bool,
 | 
			
		||||
				typeParamABList)),
 | 
			
		||||
		//decls.NewOverload(overloads.InMessage,
 | 
			
		||||
		//	[]*expr.Type{Dyn, decls.String},decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		// Conversions to type.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TypeConvertType,
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.TypeConvertType,
 | 
			
		||||
				[]*exprpb.Type{paramA}, decls.NewTypeType(paramA), typeParamAList)),
 | 
			
		||||
 | 
			
		||||
		// Conversions to int.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TypeConvertInt,
 | 
			
		||||
			decls.NewOverload(overloads.IntToInt, []*exprpb.Type{decls.Int}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.UintToInt, []*exprpb.Type{decls.Uint}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.DoubleToInt, []*exprpb.Type{decls.Double}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.StringToInt, []*exprpb.Type{decls.String}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.TimestampToInt, []*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewOverload(overloads.DurationToInt, []*exprpb.Type{decls.Duration}, decls.Int)),
 | 
			
		||||
 | 
			
		||||
		// Conversions to uint.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TypeConvertUint,
 | 
			
		||||
			decls.NewOverload(overloads.UintToUint, []*exprpb.Type{decls.Uint}, decls.Uint),
 | 
			
		||||
			decls.NewOverload(overloads.IntToUint, []*exprpb.Type{decls.Int}, decls.Uint),
 | 
			
		||||
			decls.NewOverload(overloads.DoubleToUint, []*exprpb.Type{decls.Double}, decls.Uint),
 | 
			
		||||
			decls.NewOverload(overloads.StringToUint, []*exprpb.Type{decls.String}, decls.Uint)),
 | 
			
		||||
 | 
			
		||||
		// Conversions to double.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TypeConvertDouble,
 | 
			
		||||
			decls.NewOverload(overloads.DoubleToDouble, []*exprpb.Type{decls.Double}, decls.Double),
 | 
			
		||||
			decls.NewOverload(overloads.IntToDouble, []*exprpb.Type{decls.Int}, decls.Double),
 | 
			
		||||
			decls.NewOverload(overloads.UintToDouble, []*exprpb.Type{decls.Uint}, decls.Double),
 | 
			
		||||
			decls.NewOverload(overloads.StringToDouble, []*exprpb.Type{decls.String}, decls.Double)),
 | 
			
		||||
 | 
			
		||||
		// Conversions to bool.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TypeConvertBool,
 | 
			
		||||
			decls.NewOverload(overloads.BoolToBool, []*exprpb.Type{decls.Bool}, decls.Bool),
 | 
			
		||||
			decls.NewOverload(overloads.StringToBool, []*exprpb.Type{decls.String}, decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		// Conversions to string.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TypeConvertString,
 | 
			
		||||
			decls.NewOverload(overloads.StringToString, []*exprpb.Type{decls.String}, decls.String),
 | 
			
		||||
			decls.NewOverload(overloads.BoolToString, []*exprpb.Type{decls.Bool}, decls.String),
 | 
			
		||||
			decls.NewOverload(overloads.IntToString, []*exprpb.Type{decls.Int}, decls.String),
 | 
			
		||||
			decls.NewOverload(overloads.UintToString, []*exprpb.Type{decls.Uint}, decls.String),
 | 
			
		||||
			decls.NewOverload(overloads.DoubleToString, []*exprpb.Type{decls.Double}, decls.String),
 | 
			
		||||
			decls.NewOverload(overloads.BytesToString, []*exprpb.Type{decls.Bytes}, decls.String),
 | 
			
		||||
			decls.NewOverload(overloads.TimestampToString, []*exprpb.Type{decls.Timestamp}, decls.String),
 | 
			
		||||
			decls.NewOverload(overloads.DurationToString, []*exprpb.Type{decls.Duration}, decls.String)),
 | 
			
		||||
 | 
			
		||||
		// Conversions to bytes.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TypeConvertBytes,
 | 
			
		||||
			decls.NewOverload(overloads.BytesToBytes, []*exprpb.Type{decls.Bytes}, decls.Bytes),
 | 
			
		||||
			decls.NewOverload(overloads.StringToBytes, []*exprpb.Type{decls.String}, decls.Bytes)),
 | 
			
		||||
 | 
			
		||||
		// Conversions to timestamps.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TypeConvertTimestamp,
 | 
			
		||||
			decls.NewOverload(overloads.TimestampToTimestamp,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Timestamp),
 | 
			
		||||
			decls.NewOverload(overloads.StringToTimestamp,
 | 
			
		||||
				[]*exprpb.Type{decls.String}, decls.Timestamp),
 | 
			
		||||
			decls.NewOverload(overloads.IntToTimestamp,
 | 
			
		||||
				[]*exprpb.Type{decls.Int}, decls.Timestamp)),
 | 
			
		||||
 | 
			
		||||
		// Conversions to durations.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TypeConvertDuration,
 | 
			
		||||
			decls.NewOverload(overloads.DurationToDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration}, decls.Duration),
 | 
			
		||||
			decls.NewOverload(overloads.StringToDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.String}, decls.Duration),
 | 
			
		||||
			decls.NewOverload(overloads.IntToDuration,
 | 
			
		||||
				[]*exprpb.Type{decls.Int}, decls.Duration)),
 | 
			
		||||
 | 
			
		||||
		// Conversions to Dyn.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TypeConvertDyn,
 | 
			
		||||
			decls.NewParameterizedOverload(overloads.ToDyn,
 | 
			
		||||
				[]*exprpb.Type{paramA}, decls.Dyn,
 | 
			
		||||
				typeParamAList)),
 | 
			
		||||
 | 
			
		||||
		// String functions.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.Contains,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.ContainsString,
 | 
			
		||||
				[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
 | 
			
		||||
		decls.NewFunction(overloads.EndsWith,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.EndsWithString,
 | 
			
		||||
				[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
 | 
			
		||||
		decls.NewFunction(overloads.Matches,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.MatchesString,
 | 
			
		||||
				[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
 | 
			
		||||
		decls.NewFunction(overloads.StartsWith,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.StartsWithString,
 | 
			
		||||
				[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
 | 
			
		||||
 | 
			
		||||
		// Date/time functions.
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TimeGetFullYear,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToYear,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToYearWithTz,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TimeGetMonth,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToMonth,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToMonthWithTz,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TimeGetDayOfYear,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToDayOfYear,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToDayOfYearWithTz,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TimeGetDayOfMonth,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBased,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TimeGetDate,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBased,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBasedWithTz,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TimeGetDayOfWeek,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToDayOfWeek,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToDayOfWeekWithTz,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TimeGetHours,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToHours,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToHoursWithTz,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.DurationToHours,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration}, decls.Int)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TimeGetMinutes,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToMinutes,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToMinutesWithTz,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.DurationToMinutes,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration}, decls.Int)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TimeGetSeconds,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToSeconds,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToSecondsWithTz,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.DurationToSeconds,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration}, decls.Int)),
 | 
			
		||||
 | 
			
		||||
		decls.NewFunction(overloads.TimeGetMilliseconds,
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToMilliseconds,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.TimestampToMillisecondsWithTz,
 | 
			
		||||
				[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
 | 
			
		||||
			decls.NewInstanceOverload(overloads.DurationToMilliseconds,
 | 
			
		||||
				[]*exprpb.Type{decls.Duration}, decls.Int))}...)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										505
									
								
								vendor/github.com/google/cel-go/checker/types.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										505
									
								
								vendor/github.com/google/cel-go/checker/types.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,505 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package checker
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/checker/decls"
 | 
			
		||||
 | 
			
		||||
	"google.golang.org/protobuf/proto"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	kindUnknown = iota + 1
 | 
			
		||||
	kindError
 | 
			
		||||
	kindFunction
 | 
			
		||||
	kindDyn
 | 
			
		||||
	kindPrimitive
 | 
			
		||||
	kindWellKnown
 | 
			
		||||
	kindWrapper
 | 
			
		||||
	kindNull
 | 
			
		||||
	kindAbstract
 | 
			
		||||
	kindType
 | 
			
		||||
	kindList
 | 
			
		||||
	kindMap
 | 
			
		||||
	kindObject
 | 
			
		||||
	kindTypeParam
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// FormatCheckedType converts a type message into a string representation.
 | 
			
		||||
func FormatCheckedType(t *exprpb.Type) string {
 | 
			
		||||
	switch kindOf(t) {
 | 
			
		||||
	case kindDyn:
 | 
			
		||||
		return "dyn"
 | 
			
		||||
	case kindFunction:
 | 
			
		||||
		return formatFunction(t.GetFunction().GetResultType(),
 | 
			
		||||
			t.GetFunction().GetArgTypes(),
 | 
			
		||||
			false)
 | 
			
		||||
	case kindList:
 | 
			
		||||
		return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().ElemType))
 | 
			
		||||
	case kindObject:
 | 
			
		||||
		return t.GetMessageType()
 | 
			
		||||
	case kindMap:
 | 
			
		||||
		return fmt.Sprintf("map(%s, %s)",
 | 
			
		||||
			FormatCheckedType(t.GetMapType().KeyType),
 | 
			
		||||
			FormatCheckedType(t.GetMapType().ValueType))
 | 
			
		||||
	case kindNull:
 | 
			
		||||
		return "null"
 | 
			
		||||
	case kindPrimitive:
 | 
			
		||||
		switch t.GetPrimitive() {
 | 
			
		||||
		case exprpb.Type_UINT64:
 | 
			
		||||
			return "uint"
 | 
			
		||||
		case exprpb.Type_INT64:
 | 
			
		||||
			return "int"
 | 
			
		||||
		}
 | 
			
		||||
		return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ")
 | 
			
		||||
	case kindType:
 | 
			
		||||
		if t.GetType() == nil {
 | 
			
		||||
			return "type"
 | 
			
		||||
		}
 | 
			
		||||
		return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType()))
 | 
			
		||||
	case kindWellKnown:
 | 
			
		||||
		switch t.GetWellKnown() {
 | 
			
		||||
		case exprpb.Type_ANY:
 | 
			
		||||
			return "any"
 | 
			
		||||
		case exprpb.Type_DURATION:
 | 
			
		||||
			return "duration"
 | 
			
		||||
		case exprpb.Type_TIMESTAMP:
 | 
			
		||||
			return "timestamp"
 | 
			
		||||
		}
 | 
			
		||||
	case kindWrapper:
 | 
			
		||||
		return fmt.Sprintf("wrapper(%s)",
 | 
			
		||||
			FormatCheckedType(decls.NewPrimitiveType(t.GetWrapper())))
 | 
			
		||||
	case kindError:
 | 
			
		||||
		return "!error!"
 | 
			
		||||
	}
 | 
			
		||||
	return t.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isDyn returns true if the input t is either type DYN or a well-known ANY message.
 | 
			
		||||
func isDyn(t *exprpb.Type) bool {
 | 
			
		||||
	// Note: object type values that are well-known and map to a DYN value in practice
 | 
			
		||||
	// are sanitized prior to being added to the environment.
 | 
			
		||||
	switch kindOf(t) {
 | 
			
		||||
	case kindDyn:
 | 
			
		||||
		return true
 | 
			
		||||
	case kindWellKnown:
 | 
			
		||||
		return t.GetWellKnown() == exprpb.Type_ANY
 | 
			
		||||
	default:
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
 | 
			
		||||
func isDynOrError(t *exprpb.Type) bool {
 | 
			
		||||
	switch kindOf(t) {
 | 
			
		||||
	case kindError:
 | 
			
		||||
		return true
 | 
			
		||||
	default:
 | 
			
		||||
		return isDyn(t)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
 | 
			
		||||
// A type is less specific if it matches the other type using the DYN type.
 | 
			
		||||
func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool {
 | 
			
		||||
	kind1, kind2 := kindOf(t1), kindOf(t2)
 | 
			
		||||
	// The first type is less specific.
 | 
			
		||||
	if isDyn(t1) || kind1 == kindTypeParam {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	// The first type is not less specific.
 | 
			
		||||
	if isDyn(t2) || kind2 == kindTypeParam {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	// Types must be of the same kind to be equal.
 | 
			
		||||
	if kind1 != kind2 {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// With limited exceptions for ANY and JSON values, the types must agree and be equivalent in
 | 
			
		||||
	// order to return true.
 | 
			
		||||
	switch kind1 {
 | 
			
		||||
	case kindAbstract:
 | 
			
		||||
		a1 := t1.GetAbstractType()
 | 
			
		||||
		a2 := t2.GetAbstractType()
 | 
			
		||||
		if a1.GetName() != a2.GetName() ||
 | 
			
		||||
			len(a1.GetParameterTypes()) != len(a2.GetParameterTypes()) {
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
		for i, p1 := range a1.GetParameterTypes() {
 | 
			
		||||
			if !isEqualOrLessSpecific(p1, a2.GetParameterTypes()[i]) {
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return true
 | 
			
		||||
	case kindFunction:
 | 
			
		||||
		fn1 := t1.GetFunction()
 | 
			
		||||
		fn2 := t2.GetFunction()
 | 
			
		||||
		if len(fn1.ArgTypes) != len(fn2.ArgTypes) {
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
		if !isEqualOrLessSpecific(fn1.ResultType, fn2.ResultType) {
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
		for i, a1 := range fn1.ArgTypes {
 | 
			
		||||
			if !isEqualOrLessSpecific(a1, fn2.ArgTypes[i]) {
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return true
 | 
			
		||||
	case kindList:
 | 
			
		||||
		return isEqualOrLessSpecific(t1.GetListType().ElemType, t2.GetListType().ElemType)
 | 
			
		||||
	case kindMap:
 | 
			
		||||
		m1 := t1.GetMapType()
 | 
			
		||||
		m2 := t2.GetMapType()
 | 
			
		||||
		return isEqualOrLessSpecific(m1.KeyType, m2.KeyType) &&
 | 
			
		||||
			isEqualOrLessSpecific(m1.ValueType, m2.ValueType)
 | 
			
		||||
	case kindType:
 | 
			
		||||
		return true
 | 
			
		||||
	default:
 | 
			
		||||
		return proto.Equal(t1, t2)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// internalIsAssignable returns true if t1 is assignable to t2.
 | 
			
		||||
func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
 | 
			
		||||
	// A type is always assignable to itself.
 | 
			
		||||
	// Early terminate the call to avoid cases of infinite recursion.
 | 
			
		||||
	if proto.Equal(t1, t2) {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	// Process type parameters.
 | 
			
		||||
	kind1, kind2 := kindOf(t1), kindOf(t2)
 | 
			
		||||
	if kind2 == kindTypeParam {
 | 
			
		||||
		if t2Sub, found := m.find(t2); found {
 | 
			
		||||
			// If the types are compatible, pick the more general type and return true
 | 
			
		||||
			if !internalIsAssignable(m, t1, t2Sub) {
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
			m.add(t2, mostGeneral(t1, t2Sub))
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
		if notReferencedIn(m, t2, t1) {
 | 
			
		||||
			m.add(t2, t1)
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if kind1 == kindTypeParam {
 | 
			
		||||
		// For the lower type bound, we currently do not perform adjustment. The restricted
 | 
			
		||||
		// way we use type parameters in lower type bounds, it is not necessary, but may
 | 
			
		||||
		// become if we generalize type unification.
 | 
			
		||||
		if t1Sub, found := m.find(t1); found {
 | 
			
		||||
			// If the types are compatible, pick the more general type and return true
 | 
			
		||||
			if !internalIsAssignable(m, t1Sub, t2) {
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
			m.add(t1, mostGeneral(t1Sub, t2))
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
		if notReferencedIn(m, t1, t2) {
 | 
			
		||||
			m.add(t1, t2)
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Next check for wildcard types.
 | 
			
		||||
	if isDynOrError(t1) || isDynOrError(t2) {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Test for when the types do not need to agree, but are more specific than dyn.
 | 
			
		||||
	switch kind1 {
 | 
			
		||||
	case kindNull:
 | 
			
		||||
		return internalIsAssignableNull(t2)
 | 
			
		||||
	case kindPrimitive:
 | 
			
		||||
		return internalIsAssignablePrimitive(t1.GetPrimitive(), t2)
 | 
			
		||||
	case kindWrapper:
 | 
			
		||||
		return internalIsAssignable(m, decls.NewPrimitiveType(t1.GetWrapper()), t2)
 | 
			
		||||
	default:
 | 
			
		||||
		if kind1 != kind2 {
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Test for when the types must agree.
 | 
			
		||||
	switch kind1 {
 | 
			
		||||
	// ERROR, TYPE_PARAM, and DYN handled above.
 | 
			
		||||
	case kindAbstract:
 | 
			
		||||
		return internalIsAssignableAbstractType(m, t1.GetAbstractType(), t2.GetAbstractType())
 | 
			
		||||
	case kindFunction:
 | 
			
		||||
		return internalIsAssignableFunction(m, t1.GetFunction(), t2.GetFunction())
 | 
			
		||||
	case kindList:
 | 
			
		||||
		return internalIsAssignable(m, t1.GetListType().GetElemType(), t2.GetListType().GetElemType())
 | 
			
		||||
	case kindMap:
 | 
			
		||||
		return internalIsAssignableMap(m, t1.GetMapType(), t2.GetMapType())
 | 
			
		||||
	case kindObject:
 | 
			
		||||
		return t1.GetMessageType() == t2.GetMessageType()
 | 
			
		||||
	case kindType:
 | 
			
		||||
		// A type is a type is a type, any additional parameterization of the
 | 
			
		||||
		// type cannot affect method resolution or assignability.
 | 
			
		||||
		return true
 | 
			
		||||
	case kindWellKnown:
 | 
			
		||||
		return t1.GetWellKnown() == t2.GetWellKnown()
 | 
			
		||||
	default:
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// internalIsAssignableAbstractType returns true if the abstract type names agree and all type
 | 
			
		||||
// parameters are assignable.
 | 
			
		||||
func internalIsAssignableAbstractType(m *mapping,
 | 
			
		||||
	a1 *exprpb.Type_AbstractType,
 | 
			
		||||
	a2 *exprpb.Type_AbstractType) bool {
 | 
			
		||||
	if a1.GetName() != a2.GetName() {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	if internalIsAssignableList(m, a1.GetParameterTypes(), a2.GetParameterTypes()) {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// internalIsAssignableFunction returns true if the function return type and arg types are
 | 
			
		||||
// assignable.
 | 
			
		||||
func internalIsAssignableFunction(m *mapping,
 | 
			
		||||
	f1 *exprpb.Type_FunctionType,
 | 
			
		||||
	f2 *exprpb.Type_FunctionType) bool {
 | 
			
		||||
	f1ArgTypes := flattenFunctionTypes(f1)
 | 
			
		||||
	f2ArgTypes := flattenFunctionTypes(f2)
 | 
			
		||||
	if internalIsAssignableList(m, f1ArgTypes, f2ArgTypes) {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// internalIsAssignableList returns true if the element types at each index in the list are
 | 
			
		||||
// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be
 | 
			
		||||
// assignable.
 | 
			
		||||
func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) bool {
 | 
			
		||||
	if len(l1) != len(l2) {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	for i, t1 := range l1 {
 | 
			
		||||
		if !internalIsAssignable(m, t1, l2[i]) {
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// internalIsAssignableMap returns true if map m1 may be assigned to map m2.
 | 
			
		||||
func internalIsAssignableMap(m *mapping, m1 *exprpb.Type_MapType, m2 *exprpb.Type_MapType) bool {
 | 
			
		||||
	if internalIsAssignableList(m,
 | 
			
		||||
		[]*exprpb.Type{m1.GetKeyType(), m1.GetValueType()},
 | 
			
		||||
		[]*exprpb.Type{m2.GetKeyType(), m2.GetValueType()}) {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// internalIsAssignableNull returns true if the type is nullable.
 | 
			
		||||
func internalIsAssignableNull(t *exprpb.Type) bool {
 | 
			
		||||
	switch kindOf(t) {
 | 
			
		||||
	case kindAbstract, kindObject, kindNull, kindWellKnown, kindWrapper:
 | 
			
		||||
		return true
 | 
			
		||||
	default:
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// internalIsAssignablePrimitive returns true if the target type is the same or if it is a wrapper
 | 
			
		||||
// for the primitive type.
 | 
			
		||||
func internalIsAssignablePrimitive(p exprpb.Type_PrimitiveType, target *exprpb.Type) bool {
 | 
			
		||||
	switch kindOf(target) {
 | 
			
		||||
	case kindPrimitive:
 | 
			
		||||
		return p == target.GetPrimitive()
 | 
			
		||||
	case kindWrapper:
 | 
			
		||||
		return p == target.GetWrapper()
 | 
			
		||||
	default:
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isAssignable returns an updated type substitution mapping if t1 is assignable to t2.
 | 
			
		||||
func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping {
 | 
			
		||||
	mCopy := m.copy()
 | 
			
		||||
	if internalIsAssignable(mCopy, t1, t2) {
 | 
			
		||||
		return mCopy
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2.
 | 
			
		||||
func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping {
 | 
			
		||||
	mCopy := m.copy()
 | 
			
		||||
	if internalIsAssignableList(mCopy, l1, l2) {
 | 
			
		||||
		return mCopy
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// kindOf returns the kind of the type as defined in the checked.proto.
 | 
			
		||||
func kindOf(t *exprpb.Type) int {
 | 
			
		||||
	if t == nil || t.TypeKind == nil {
 | 
			
		||||
		return kindUnknown
 | 
			
		||||
	}
 | 
			
		||||
	switch t.TypeKind.(type) {
 | 
			
		||||
	case *exprpb.Type_Error:
 | 
			
		||||
		return kindError
 | 
			
		||||
	case *exprpb.Type_Function:
 | 
			
		||||
		return kindFunction
 | 
			
		||||
	case *exprpb.Type_Dyn:
 | 
			
		||||
		return kindDyn
 | 
			
		||||
	case *exprpb.Type_Primitive:
 | 
			
		||||
		return kindPrimitive
 | 
			
		||||
	case *exprpb.Type_WellKnown:
 | 
			
		||||
		return kindWellKnown
 | 
			
		||||
	case *exprpb.Type_Wrapper:
 | 
			
		||||
		return kindWrapper
 | 
			
		||||
	case *exprpb.Type_Null:
 | 
			
		||||
		return kindNull
 | 
			
		||||
	case *exprpb.Type_Type:
 | 
			
		||||
		return kindType
 | 
			
		||||
	case *exprpb.Type_ListType_:
 | 
			
		||||
		return kindList
 | 
			
		||||
	case *exprpb.Type_MapType_:
 | 
			
		||||
		return kindMap
 | 
			
		||||
	case *exprpb.Type_MessageType:
 | 
			
		||||
		return kindObject
 | 
			
		||||
	case *exprpb.Type_TypeParam:
 | 
			
		||||
		return kindTypeParam
 | 
			
		||||
	case *exprpb.Type_AbstractType_:
 | 
			
		||||
		return kindAbstract
 | 
			
		||||
	}
 | 
			
		||||
	return kindUnknown
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// mostGeneral returns the more general of two types which are known to unify.
 | 
			
		||||
func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type {
 | 
			
		||||
	if isEqualOrLessSpecific(t1, t2) {
 | 
			
		||||
		return t1
 | 
			
		||||
	}
 | 
			
		||||
	return t2
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// notReferencedIn checks whether the type doesn't appear directly or transitively within the other
 | 
			
		||||
// type. This is a standard requirement for type unification, commonly referred to as the "occurs
 | 
			
		||||
// check".
 | 
			
		||||
func notReferencedIn(m *mapping, t *exprpb.Type, withinType *exprpb.Type) bool {
 | 
			
		||||
	if proto.Equal(t, withinType) {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	withinKind := kindOf(withinType)
 | 
			
		||||
	switch withinKind {
 | 
			
		||||
	case kindTypeParam:
 | 
			
		||||
		wtSub, found := m.find(withinType)
 | 
			
		||||
		if !found {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
		return notReferencedIn(m, t, wtSub)
 | 
			
		||||
	case kindAbstract:
 | 
			
		||||
		for _, pt := range withinType.GetAbstractType().GetParameterTypes() {
 | 
			
		||||
			if !notReferencedIn(m, t, pt) {
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return true
 | 
			
		||||
	case kindFunction:
 | 
			
		||||
		fn := withinType.GetFunction()
 | 
			
		||||
		types := flattenFunctionTypes(fn)
 | 
			
		||||
		for _, a := range types {
 | 
			
		||||
			if !notReferencedIn(m, t, a) {
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return true
 | 
			
		||||
	case kindList:
 | 
			
		||||
		return notReferencedIn(m, t, withinType.GetListType().ElemType)
 | 
			
		||||
	case kindMap:
 | 
			
		||||
		mt := withinType.GetMapType()
 | 
			
		||||
		return notReferencedIn(m, t, mt.KeyType) && notReferencedIn(m, t, mt.ValueType)
 | 
			
		||||
	case kindWrapper:
 | 
			
		||||
		return notReferencedIn(m, t, decls.NewPrimitiveType(withinType.GetWrapper()))
 | 
			
		||||
	default:
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type
 | 
			
		||||
// parameters are replaced by DYN if typeParamToDyn is true.
 | 
			
		||||
func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type {
 | 
			
		||||
	if tSub, found := m.find(t); found {
 | 
			
		||||
		return substitute(m, tSub, typeParamToDyn)
 | 
			
		||||
	}
 | 
			
		||||
	kind := kindOf(t)
 | 
			
		||||
	if typeParamToDyn && kind == kindTypeParam {
 | 
			
		||||
		return decls.Dyn
 | 
			
		||||
	}
 | 
			
		||||
	switch kind {
 | 
			
		||||
	case kindAbstract:
 | 
			
		||||
		// TODO: implement!
 | 
			
		||||
		at := t.GetAbstractType()
 | 
			
		||||
		params := make([]*exprpb.Type, len(at.GetParameterTypes()))
 | 
			
		||||
		for i, p := range at.GetParameterTypes() {
 | 
			
		||||
			params[i] = substitute(m, p, typeParamToDyn)
 | 
			
		||||
		}
 | 
			
		||||
		return decls.NewAbstractType(at.GetName(), params...)
 | 
			
		||||
	case kindFunction:
 | 
			
		||||
		fn := t.GetFunction()
 | 
			
		||||
		rt := substitute(m, fn.ResultType, typeParamToDyn)
 | 
			
		||||
		args := make([]*exprpb.Type, len(fn.ArgTypes))
 | 
			
		||||
		for i, a := range fn.ArgTypes {
 | 
			
		||||
			args[i] = substitute(m, a, typeParamToDyn)
 | 
			
		||||
		}
 | 
			
		||||
		return decls.NewFunctionType(rt, args...)
 | 
			
		||||
	case kindList:
 | 
			
		||||
		return decls.NewListType(substitute(m, t.GetListType().ElemType, typeParamToDyn))
 | 
			
		||||
	case kindMap:
 | 
			
		||||
		mt := t.GetMapType()
 | 
			
		||||
		return decls.NewMapType(substitute(m, mt.KeyType, typeParamToDyn),
 | 
			
		||||
			substitute(m, mt.ValueType, typeParamToDyn))
 | 
			
		||||
	case kindType:
 | 
			
		||||
		if t.GetType() != nil {
 | 
			
		||||
			return decls.NewTypeType(substitute(m, t.GetType(), typeParamToDyn))
 | 
			
		||||
		}
 | 
			
		||||
		return t
 | 
			
		||||
	default:
 | 
			
		||||
		return t
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func typeKey(t *exprpb.Type) string {
 | 
			
		||||
	return FormatCheckedType(t)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// flattenFunctionTypes takes a function with arg types T1, T2, ..., TN and result type TR
 | 
			
		||||
// and returns a slice containing {T1, T2, ..., TN, TR}.
 | 
			
		||||
func flattenFunctionTypes(f *exprpb.Type_FunctionType) []*exprpb.Type {
 | 
			
		||||
	argTypes := f.GetArgTypes()
 | 
			
		||||
	if len(argTypes) == 0 {
 | 
			
		||||
		return []*exprpb.Type{f.GetResultType()}
 | 
			
		||||
	}
 | 
			
		||||
	flattend := make([]*exprpb.Type, len(argTypes)+1, len(argTypes)+1)
 | 
			
		||||
	for i, at := range argTypes {
 | 
			
		||||
		flattend[i] = at
 | 
			
		||||
	}
 | 
			
		||||
	flattend[len(argTypes)] = f.GetResultType()
 | 
			
		||||
	return flattend
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										34
									
								
								vendor/github.com/google/cel-go/common/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								vendor/github.com/google/cel-go/common/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,34 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    default_visibility = ["//visibility:public"],
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "error.go",
 | 
			
		||||
        "errors.go",
 | 
			
		||||
        "location.go",
 | 
			
		||||
        "source.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/common",
 | 
			
		||||
    deps = [
 | 
			
		||||
        "//common/runes:go_default_library",
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
        "@org_golang_x_text//width:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_test(
 | 
			
		||||
    name = "go_default_test",
 | 
			
		||||
    size = "small",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "errors_test.go",
 | 
			
		||||
        "source_test.go",
 | 
			
		||||
    ],
 | 
			
		||||
    embed = [
 | 
			
		||||
        ":go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										31
									
								
								vendor/github.com/google/cel-go/common/containers/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								vendor/github.com/google/cel-go/common/containers/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,31 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    default_visibility = ["//visibility:public"],
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "container.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/common/containers",
 | 
			
		||||
    deps = [
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_test(
 | 
			
		||||
    name = "go_default_test",
 | 
			
		||||
    size = "small",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "container_test.go",
 | 
			
		||||
    ],
 | 
			
		||||
    embed = [
 | 
			
		||||
        ":go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
    deps = [
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										316
									
								
								vendor/github.com/google/cel-go/common/containers/container.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										316
									
								
								vendor/github.com/google/cel-go/common/containers/container.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,316 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Package containers defines types and functions for resolving qualified names within a namespace
 | 
			
		||||
// or type provided to CEL.
 | 
			
		||||
package containers
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// DefaultContainer has an empty container name.
 | 
			
		||||
	DefaultContainer *Container = nil
 | 
			
		||||
 | 
			
		||||
	// Empty map to search for aliases when needed.
 | 
			
		||||
	noAliases = make(map[string]string)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// NewContainer creates a new Container with the fully-qualified name.
 | 
			
		||||
func NewContainer(opts ...ContainerOption) (*Container, error) {
 | 
			
		||||
	var c *Container
 | 
			
		||||
	var err error
 | 
			
		||||
	for _, opt := range opts {
 | 
			
		||||
		c, err = opt(c)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return c, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Container holds a reference to an optional qualified container name and set of aliases.
 | 
			
		||||
//
 | 
			
		||||
// The program container can be used to simplify variable, function, and type specification within
 | 
			
		||||
// CEL programs and behaves more or less like a C++ namespace. See ResolveCandidateNames for more
 | 
			
		||||
// details.
 | 
			
		||||
type Container struct {
 | 
			
		||||
	name    string
 | 
			
		||||
	aliases map[string]string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Extend creates a new Container with the existing settings and applies a series of
 | 
			
		||||
// ContainerOptions to further configure the new container.
 | 
			
		||||
func (c *Container) Extend(opts ...ContainerOption) (*Container, error) {
 | 
			
		||||
	if c == nil {
 | 
			
		||||
		return NewContainer(opts...)
 | 
			
		||||
	}
 | 
			
		||||
	// Copy the name and aliases of the existing container.
 | 
			
		||||
	ext := &Container{name: c.Name()}
 | 
			
		||||
	if len(c.aliasSet()) > 0 {
 | 
			
		||||
		aliasSet := make(map[string]string, len(c.aliasSet()))
 | 
			
		||||
		for k, v := range c.aliasSet() {
 | 
			
		||||
			aliasSet[k] = v
 | 
			
		||||
		}
 | 
			
		||||
		ext.aliases = aliasSet
 | 
			
		||||
	}
 | 
			
		||||
	// Apply the new options to the container.
 | 
			
		||||
	var err error
 | 
			
		||||
	for _, opt := range opts {
 | 
			
		||||
		ext, err = opt(ext)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return ext, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Name returns the fully-qualified name of the container.
 | 
			
		||||
//
 | 
			
		||||
// The name may conceptually be a namespace, package, or type.
 | 
			
		||||
func (c *Container) Name() string {
 | 
			
		||||
	if c == nil {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
	return c.name
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ResolveCandidateNames returns the candidates name of namespaced identifiers in C++ resolution
 | 
			
		||||
// order.
 | 
			
		||||
//
 | 
			
		||||
// Names which shadow other names are returned first. If a name includes a leading dot ('.'),
 | 
			
		||||
// the name is treated as an absolute identifier which cannot be shadowed.
 | 
			
		||||
//
 | 
			
		||||
// Given a container name a.b.c.M.N and a type name R.s, this will deliver in order:
 | 
			
		||||
//
 | 
			
		||||
//     a.b.c.M.N.R.s
 | 
			
		||||
//     a.b.c.M.R.s
 | 
			
		||||
//     a.b.c.R.s
 | 
			
		||||
//     a.b.R.s
 | 
			
		||||
//     a.R.s
 | 
			
		||||
//     R.s
 | 
			
		||||
//
 | 
			
		||||
// If aliases or abbreviations are configured for the container, then alias names will take
 | 
			
		||||
// precedence over containerized names.
 | 
			
		||||
func (c *Container) ResolveCandidateNames(name string) []string {
 | 
			
		||||
	if strings.HasPrefix(name, ".") {
 | 
			
		||||
		qn := name[1:]
 | 
			
		||||
		alias, isAlias := c.findAlias(qn)
 | 
			
		||||
		if isAlias {
 | 
			
		||||
			return []string{alias}
 | 
			
		||||
		}
 | 
			
		||||
		return []string{qn}
 | 
			
		||||
	}
 | 
			
		||||
	alias, isAlias := c.findAlias(name)
 | 
			
		||||
	if isAlias {
 | 
			
		||||
		return []string{alias}
 | 
			
		||||
	}
 | 
			
		||||
	if c.Name() == "" {
 | 
			
		||||
		return []string{name}
 | 
			
		||||
	}
 | 
			
		||||
	nextCont := c.Name()
 | 
			
		||||
	candidates := []string{nextCont + "." + name}
 | 
			
		||||
	for i := strings.LastIndex(nextCont, "."); i >= 0; i = strings.LastIndex(nextCont, ".") {
 | 
			
		||||
		nextCont = nextCont[:i]
 | 
			
		||||
		candidates = append(candidates, nextCont+"."+name)
 | 
			
		||||
	}
 | 
			
		||||
	return append(candidates, name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// aliasSet returns the alias to fully-qualified name mapping stored in the container.
 | 
			
		||||
func (c *Container) aliasSet() map[string]string {
 | 
			
		||||
	if c == nil || c.aliases == nil {
 | 
			
		||||
		return noAliases
 | 
			
		||||
	}
 | 
			
		||||
	return c.aliases
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// findAlias takes a name as input and returns an alias expansion if one exists.
 | 
			
		||||
//
 | 
			
		||||
// If the name is qualified, the first component of the qualified name is checked against known
 | 
			
		||||
// aliases. Any alias that is found in a qualified name is expanded in the result:
 | 
			
		||||
//
 | 
			
		||||
//     alias: R -> my.alias.R
 | 
			
		||||
//     name: R.S.T
 | 
			
		||||
//     output: my.alias.R.S.T
 | 
			
		||||
//
 | 
			
		||||
// Note, the name must not have a leading dot.
 | 
			
		||||
func (c *Container) findAlias(name string) (string, bool) {
 | 
			
		||||
	// If an alias exists for the name, ensure it is searched last.
 | 
			
		||||
	simple := name
 | 
			
		||||
	qualifier := ""
 | 
			
		||||
	dot := strings.Index(name, ".")
 | 
			
		||||
	if dot >= 0 {
 | 
			
		||||
		simple = name[0:dot]
 | 
			
		||||
		qualifier = name[dot:]
 | 
			
		||||
	}
 | 
			
		||||
	alias, found := c.aliasSet()[simple]
 | 
			
		||||
	if !found {
 | 
			
		||||
		return "", false
 | 
			
		||||
	}
 | 
			
		||||
	return alias + qualifier, true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ContainerOption specifies a functional configuration option for a Container.
 | 
			
		||||
//
 | 
			
		||||
// Note, ContainerOption implementations must be able to handle nil container inputs.
 | 
			
		||||
type ContainerOption func(*Container) (*Container, error)
 | 
			
		||||
 | 
			
		||||
// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
 | 
			
		||||
//
 | 
			
		||||
// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
 | 
			
		||||
// Abbreviations can be useful when working with variables, functions, and especially types from
 | 
			
		||||
// multiple namespaces:
 | 
			
		||||
//
 | 
			
		||||
//    // CEL object construction
 | 
			
		||||
//    qual.pkg.version.ObjTypeName{
 | 
			
		||||
//       field: alt.container.ver.FieldTypeName{value: ...}
 | 
			
		||||
//    }
 | 
			
		||||
//
 | 
			
		||||
// Only one the qualified names above may be used as the CEL container, so at least one of these
 | 
			
		||||
// references must be a long qualified name within an otherwise short CEL program. Using the
 | 
			
		||||
// following abbreviations, the program becomes much simpler:
 | 
			
		||||
//
 | 
			
		||||
//    // CEL Go option
 | 
			
		||||
//    Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
 | 
			
		||||
//    // Simplified Object construction
 | 
			
		||||
//    ObjTypeName{field: FieldTypeName{value: ...}}
 | 
			
		||||
//
 | 
			
		||||
// There are a few rules for the qualified names and the simple abbreviations generated from them:
 | 
			
		||||
// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
 | 
			
		||||
// - The last element in the qualified name is the abbreviation.
 | 
			
		||||
// - Abbreviations must not collide with each other.
 | 
			
		||||
// - The abbreviation must not collide with unqualified names in use.
 | 
			
		||||
//
 | 
			
		||||
// Abbreviations are distinct from container-based references in the following important ways:
 | 
			
		||||
// - Abbreviations must expand to a fully-qualified name.
 | 
			
		||||
// - Expanded abbreviations do not participate in namespace resolution.
 | 
			
		||||
// - Abbreviation expansion is done instead of the container search for a matching identifier.
 | 
			
		||||
// - Containers follow C++ namespace resolution rules with searches from the most qualified name
 | 
			
		||||
//   to the least qualified name.
 | 
			
		||||
// - Container references within the CEL program may be relative, and are resolved to fully
 | 
			
		||||
//   qualified names at either type-check time or program plan time, whichever comes first.
 | 
			
		||||
//
 | 
			
		||||
// If there is ever a case where an identifier could be in both the container and as an
 | 
			
		||||
// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
 | 
			
		||||
// preserved between compilations even as the container evolves.
 | 
			
		||||
func Abbrevs(qualifiedNames ...string) ContainerOption {
 | 
			
		||||
	return func(c *Container) (*Container, error) {
 | 
			
		||||
		for _, qn := range qualifiedNames {
 | 
			
		||||
			ind := strings.LastIndex(qn, ".")
 | 
			
		||||
			if ind <= 0 || ind >= len(qn)-1 {
 | 
			
		||||
				return nil, fmt.Errorf(
 | 
			
		||||
					"invalid qualified name: %s, wanted name of the form 'qualified.name'", qn)
 | 
			
		||||
			}
 | 
			
		||||
			alias := qn[ind+1:]
 | 
			
		||||
			var err error
 | 
			
		||||
			c, err = aliasAs("abbreviation", qn, alias)(c)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return c, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Alias associates a fully-qualified name with a user-defined alias.
 | 
			
		||||
//
 | 
			
		||||
// In general, Abbrevs is preferred to Alias since the names generated from the Abbrevs option
 | 
			
		||||
// are more easily traced back to source code. The Alias option is useful for propagating alias
 | 
			
		||||
// configuration from one Container instance to another, and may also be useful for remapping
 | 
			
		||||
// poorly chosen protobuf message / package names.
 | 
			
		||||
//
 | 
			
		||||
// Note: all of the rules that apply to Abbrevs also apply to Alias.
 | 
			
		||||
func Alias(qualifiedName, alias string) ContainerOption {
 | 
			
		||||
	return aliasAs("alias", qualifiedName, alias)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func aliasAs(kind, qualifiedName, alias string) ContainerOption {
 | 
			
		||||
	return func(c *Container) (*Container, error) {
 | 
			
		||||
		if len(alias) == 0 || strings.Contains(alias, ".") {
 | 
			
		||||
			return nil, fmt.Errorf(
 | 
			
		||||
				"%s must be non-empty and simple (not qualified): %s=%s", kind, kind, alias)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if qualifiedName[0:1] == "." {
 | 
			
		||||
			return nil, fmt.Errorf("qualified name must not begin with a leading '.': %s",
 | 
			
		||||
				qualifiedName)
 | 
			
		||||
		}
 | 
			
		||||
		ind := strings.LastIndex(qualifiedName, ".")
 | 
			
		||||
		if ind <= 0 || ind == len(qualifiedName)-1 {
 | 
			
		||||
			return nil, fmt.Errorf("%s must refer to a valid qualified name: %s",
 | 
			
		||||
				kind, qualifiedName)
 | 
			
		||||
		}
 | 
			
		||||
		aliasRef, found := c.aliasSet()[alias]
 | 
			
		||||
		if found {
 | 
			
		||||
			return nil, fmt.Errorf(
 | 
			
		||||
				"%s collides with existing reference: name=%s, %s=%s, existing=%s",
 | 
			
		||||
				kind, qualifiedName, kind, alias, aliasRef)
 | 
			
		||||
		}
 | 
			
		||||
		if strings.HasPrefix(c.Name(), alias+".") || c.Name() == alias {
 | 
			
		||||
			return nil, fmt.Errorf(
 | 
			
		||||
				"%s collides with container name: name=%s, %s=%s, container=%s",
 | 
			
		||||
				kind, qualifiedName, kind, alias, c.Name())
 | 
			
		||||
		}
 | 
			
		||||
		if c == nil {
 | 
			
		||||
			c = &Container{}
 | 
			
		||||
		}
 | 
			
		||||
		if c.aliases == nil {
 | 
			
		||||
			c.aliases = make(map[string]string)
 | 
			
		||||
		}
 | 
			
		||||
		c.aliases[alias] = qualifiedName
 | 
			
		||||
		return c, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Name sets the fully-qualified name of the Container.
 | 
			
		||||
func Name(name string) ContainerOption {
 | 
			
		||||
	return func(c *Container) (*Container, error) {
 | 
			
		||||
		if len(name) > 0 && name[0:1] == "." {
 | 
			
		||||
			return nil, fmt.Errorf("container name must not contain a leading '.': %s", name)
 | 
			
		||||
		}
 | 
			
		||||
		if c.Name() == name {
 | 
			
		||||
			return c, nil
 | 
			
		||||
		}
 | 
			
		||||
		if c == nil {
 | 
			
		||||
			return &Container{name: name}, nil
 | 
			
		||||
		}
 | 
			
		||||
		c.name = name
 | 
			
		||||
		return c, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean
 | 
			
		||||
// 'found' value that indicates if the conversion is successful.
 | 
			
		||||
func ToQualifiedName(e *exprpb.Expr) (string, bool) {
 | 
			
		||||
	switch e.ExprKind.(type) {
 | 
			
		||||
	case *exprpb.Expr_IdentExpr:
 | 
			
		||||
		id := e.GetIdentExpr()
 | 
			
		||||
		return id.Name, true
 | 
			
		||||
	case *exprpb.Expr_SelectExpr:
 | 
			
		||||
		sel := e.GetSelectExpr()
 | 
			
		||||
		// Test only expressions are not valid as qualified names.
 | 
			
		||||
		if sel.GetTestOnly() {
 | 
			
		||||
			return "", false
 | 
			
		||||
		}
 | 
			
		||||
		if qual, found := ToQualifiedName(sel.Operand); found {
 | 
			
		||||
			return qual + "." + sel.Field, true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return "", false
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										18
									
								
								vendor/github.com/google/cel-go/common/debug/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								vendor/github.com/google/cel-go/common/debug/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,18 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    default_visibility = ["//visibility:public"],
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "debug.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/common/debug",
 | 
			
		||||
    deps = [
 | 
			
		||||
        "//common:go_default_library",
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										305
									
								
								vendor/github.com/google/cel-go/common/debug/debug.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										305
									
								
								vendor/github.com/google/cel-go/common/debug/debug.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,305 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Package debug provides tools to print a parsed expression graph and
 | 
			
		||||
// adorn each expression element with additional metadata.
 | 
			
		||||
package debug
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Adorner returns debug metadata that will be tacked on to the string
 | 
			
		||||
// representation of an expression.
 | 
			
		||||
type Adorner interface {
 | 
			
		||||
	// GetMetadata for the input context.
 | 
			
		||||
	GetMetadata(ctx interface{}) string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Writer manages writing expressions to an internal string.
 | 
			
		||||
type Writer interface {
 | 
			
		||||
	fmt.Stringer
 | 
			
		||||
 | 
			
		||||
	// Buffer pushes an expression into an internal queue of expressions to
 | 
			
		||||
	// write to a string.
 | 
			
		||||
	Buffer(e *exprpb.Expr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type emptyDebugAdorner struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var emptyAdorner Adorner = &emptyDebugAdorner{}
 | 
			
		||||
 | 
			
		||||
func (a *emptyDebugAdorner) GetMetadata(e interface{}) string {
 | 
			
		||||
	return ""
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ToDebugString gives the unadorned string representation of the Expr.
 | 
			
		||||
func ToDebugString(e *exprpb.Expr) string {
 | 
			
		||||
	return ToAdornedDebugString(e, emptyAdorner)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ToAdornedDebugString gives the adorned string representation of the Expr.
 | 
			
		||||
func ToAdornedDebugString(e *exprpb.Expr, adorner Adorner) string {
 | 
			
		||||
	w := newDebugWriter(adorner)
 | 
			
		||||
	w.Buffer(e)
 | 
			
		||||
	return w.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// debugWriter is used to print out pretty-printed debug strings.
 | 
			
		||||
type debugWriter struct {
 | 
			
		||||
	adorner   Adorner
 | 
			
		||||
	buffer    bytes.Buffer
 | 
			
		||||
	indent    int
 | 
			
		||||
	lineStart bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newDebugWriter(a Adorner) *debugWriter {
 | 
			
		||||
	return &debugWriter{
 | 
			
		||||
		adorner:   a,
 | 
			
		||||
		indent:    0,
 | 
			
		||||
		lineStart: true,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) Buffer(e *exprpb.Expr) {
 | 
			
		||||
	if e == nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	switch e.ExprKind.(type) {
 | 
			
		||||
	case *exprpb.Expr_ConstExpr:
 | 
			
		||||
		w.append(formatLiteral(e.GetConstExpr()))
 | 
			
		||||
	case *exprpb.Expr_IdentExpr:
 | 
			
		||||
		w.append(e.GetIdentExpr().Name)
 | 
			
		||||
	case *exprpb.Expr_SelectExpr:
 | 
			
		||||
		w.appendSelect(e.GetSelectExpr())
 | 
			
		||||
	case *exprpb.Expr_CallExpr:
 | 
			
		||||
		w.appendCall(e.GetCallExpr())
 | 
			
		||||
	case *exprpb.Expr_ListExpr:
 | 
			
		||||
		w.appendList(e.GetListExpr())
 | 
			
		||||
	case *exprpb.Expr_StructExpr:
 | 
			
		||||
		w.appendStruct(e.GetStructExpr())
 | 
			
		||||
	case *exprpb.Expr_ComprehensionExpr:
 | 
			
		||||
		w.appendComprehension(e.GetComprehensionExpr())
 | 
			
		||||
	}
 | 
			
		||||
	w.adorn(e)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) appendSelect(sel *exprpb.Expr_Select) {
 | 
			
		||||
	w.Buffer(sel.Operand)
 | 
			
		||||
	w.append(".")
 | 
			
		||||
	w.append(sel.Field)
 | 
			
		||||
	if sel.TestOnly {
 | 
			
		||||
		w.append("~test-only~")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) appendCall(call *exprpb.Expr_Call) {
 | 
			
		||||
	if call.Target != nil {
 | 
			
		||||
		w.Buffer(call.Target)
 | 
			
		||||
		w.append(".")
 | 
			
		||||
	}
 | 
			
		||||
	w.append(call.Function)
 | 
			
		||||
	w.append("(")
 | 
			
		||||
	if len(call.GetArgs()) > 0 {
 | 
			
		||||
		w.addIndent()
 | 
			
		||||
		w.appendLine()
 | 
			
		||||
		for i, arg := range call.Args {
 | 
			
		||||
			if i > 0 {
 | 
			
		||||
				w.append(",")
 | 
			
		||||
				w.appendLine()
 | 
			
		||||
			}
 | 
			
		||||
			w.Buffer(arg)
 | 
			
		||||
		}
 | 
			
		||||
		w.removeIndent()
 | 
			
		||||
		w.appendLine()
 | 
			
		||||
	}
 | 
			
		||||
	w.append(")")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) {
 | 
			
		||||
	w.append("[")
 | 
			
		||||
	if len(list.GetElements()) > 0 {
 | 
			
		||||
		w.appendLine()
 | 
			
		||||
		w.addIndent()
 | 
			
		||||
		for i, elem := range list.Elements {
 | 
			
		||||
			if i > 0 {
 | 
			
		||||
				w.append(",")
 | 
			
		||||
				w.appendLine()
 | 
			
		||||
			}
 | 
			
		||||
			w.Buffer(elem)
 | 
			
		||||
		}
 | 
			
		||||
		w.removeIndent()
 | 
			
		||||
		w.appendLine()
 | 
			
		||||
	}
 | 
			
		||||
	w.append("]")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) appendStruct(obj *exprpb.Expr_CreateStruct) {
 | 
			
		||||
	if obj.MessageName != "" {
 | 
			
		||||
		w.appendObject(obj)
 | 
			
		||||
	} else {
 | 
			
		||||
		w.appendMap(obj)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) {
 | 
			
		||||
	w.append(obj.MessageName)
 | 
			
		||||
	w.append("{")
 | 
			
		||||
	if len(obj.Entries) > 0 {
 | 
			
		||||
		w.appendLine()
 | 
			
		||||
		w.addIndent()
 | 
			
		||||
		for i, entry := range obj.Entries {
 | 
			
		||||
			if i > 0 {
 | 
			
		||||
				w.append(",")
 | 
			
		||||
				w.appendLine()
 | 
			
		||||
			}
 | 
			
		||||
			w.append(entry.GetFieldKey())
 | 
			
		||||
			w.append(":")
 | 
			
		||||
			w.Buffer(entry.Value)
 | 
			
		||||
			w.adorn(entry)
 | 
			
		||||
		}
 | 
			
		||||
		w.removeIndent()
 | 
			
		||||
		w.appendLine()
 | 
			
		||||
	}
 | 
			
		||||
	w.append("}")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) {
 | 
			
		||||
	w.append("{")
 | 
			
		||||
	if len(obj.Entries) > 0 {
 | 
			
		||||
		w.appendLine()
 | 
			
		||||
		w.addIndent()
 | 
			
		||||
		for i, entry := range obj.Entries {
 | 
			
		||||
			if i > 0 {
 | 
			
		||||
				w.append(",")
 | 
			
		||||
				w.appendLine()
 | 
			
		||||
			}
 | 
			
		||||
			w.Buffer(entry.GetMapKey())
 | 
			
		||||
			w.append(":")
 | 
			
		||||
			w.Buffer(entry.Value)
 | 
			
		||||
			w.adorn(entry)
 | 
			
		||||
		}
 | 
			
		||||
		w.removeIndent()
 | 
			
		||||
		w.appendLine()
 | 
			
		||||
	}
 | 
			
		||||
	w.append("}")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) appendComprehension(comprehension *exprpb.Expr_Comprehension) {
 | 
			
		||||
	w.append("__comprehension__(")
 | 
			
		||||
	w.addIndent()
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.append("// Variable")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.append(comprehension.IterVar)
 | 
			
		||||
	w.append(",")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.append("// Target")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.Buffer(comprehension.IterRange)
 | 
			
		||||
	w.append(",")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.append("// Accumulator")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.append(comprehension.AccuVar)
 | 
			
		||||
	w.append(",")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.append("// Init")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.Buffer(comprehension.AccuInit)
 | 
			
		||||
	w.append(",")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.append("// LoopCondition")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.Buffer(comprehension.LoopCondition)
 | 
			
		||||
	w.append(",")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.append("// LoopStep")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.Buffer(comprehension.LoopStep)
 | 
			
		||||
	w.append(",")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.append("// Result")
 | 
			
		||||
	w.appendLine()
 | 
			
		||||
	w.Buffer(comprehension.Result)
 | 
			
		||||
	w.append(")")
 | 
			
		||||
	w.removeIndent()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func formatLiteral(c *exprpb.Constant) string {
 | 
			
		||||
	switch c.ConstantKind.(type) {
 | 
			
		||||
	case *exprpb.Constant_BoolValue:
 | 
			
		||||
		return fmt.Sprintf("%t", c.GetBoolValue())
 | 
			
		||||
	case *exprpb.Constant_BytesValue:
 | 
			
		||||
		return fmt.Sprintf("b\"%s\"", string(c.GetBytesValue()))
 | 
			
		||||
	case *exprpb.Constant_DoubleValue:
 | 
			
		||||
		return fmt.Sprintf("%v", c.GetDoubleValue())
 | 
			
		||||
	case *exprpb.Constant_Int64Value:
 | 
			
		||||
		return fmt.Sprintf("%d", c.GetInt64Value())
 | 
			
		||||
	case *exprpb.Constant_StringValue:
 | 
			
		||||
		return strconv.Quote(c.GetStringValue())
 | 
			
		||||
	case *exprpb.Constant_Uint64Value:
 | 
			
		||||
		return fmt.Sprintf("%du", c.GetUint64Value())
 | 
			
		||||
	case *exprpb.Constant_NullValue:
 | 
			
		||||
		return "null"
 | 
			
		||||
	default:
 | 
			
		||||
		panic("Unknown constant type")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) append(s string) {
 | 
			
		||||
	w.doIndent()
 | 
			
		||||
	w.buffer.WriteString(s)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) appendFormat(f string, args ...interface{}) {
 | 
			
		||||
	w.append(fmt.Sprintf(f, args...))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) doIndent() {
 | 
			
		||||
	if w.lineStart {
 | 
			
		||||
		w.lineStart = false
 | 
			
		||||
		w.buffer.WriteString(strings.Repeat("  ", w.indent))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) adorn(e interface{}) {
 | 
			
		||||
	w.append(w.adorner.GetMetadata(e))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) appendLine() {
 | 
			
		||||
	w.buffer.WriteString("\n")
 | 
			
		||||
	w.lineStart = true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) addIndent() {
 | 
			
		||||
	w.indent++
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) removeIndent() {
 | 
			
		||||
	w.indent--
 | 
			
		||||
	if w.indent < 0 {
 | 
			
		||||
		panic("negative indent")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (w *debugWriter) String() string {
 | 
			
		||||
	return w.buffer.String()
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										17
									
								
								vendor/github.com/google/cel-go/common/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								vendor/github.com/google/cel-go/common/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,17 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Package common defines types and utilities common to expression parsing,
 | 
			
		||||
// checking, and interpretation
 | 
			
		||||
package common
 | 
			
		||||
							
								
								
									
										70
									
								
								vendor/github.com/google/cel-go/common/error.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								vendor/github.com/google/cel-go/common/error.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,70 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package common
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"unicode/utf8"
 | 
			
		||||
 | 
			
		||||
	"golang.org/x/text/width"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Error type which references a location within source and a message.
 | 
			
		||||
type Error struct {
 | 
			
		||||
	Location Location
 | 
			
		||||
	Message  string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	dot = "."
 | 
			
		||||
	ind = "^"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	wideDot = width.Widen.String(dot)
 | 
			
		||||
	wideInd = width.Widen.String(ind)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// ToDisplayString decorates the error message with the source location.
 | 
			
		||||
func (e *Error) ToDisplayString(source Source) string {
 | 
			
		||||
	var result = fmt.Sprintf("ERROR: %s:%d:%d: %s",
 | 
			
		||||
		source.Description(),
 | 
			
		||||
		e.Location.Line(),
 | 
			
		||||
		e.Location.Column()+1, // add one to the 0-based column for display
 | 
			
		||||
		e.Message)
 | 
			
		||||
	if snippet, found := source.Snippet(e.Location.Line()); found {
 | 
			
		||||
		snippet := strings.Replace(snippet, "\t", " ", -1)
 | 
			
		||||
		srcLine := "\n | " + snippet
 | 
			
		||||
		var bytes = []byte(snippet)
 | 
			
		||||
		var indLine = "\n | "
 | 
			
		||||
		for i := 0; i < e.Location.Column() && len(bytes) > 0; i++ {
 | 
			
		||||
			_, sz := utf8.DecodeRune(bytes)
 | 
			
		||||
			bytes = bytes[sz:]
 | 
			
		||||
			if sz > 1 {
 | 
			
		||||
				indLine += wideDot
 | 
			
		||||
			} else {
 | 
			
		||||
				indLine += dot
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if _, sz := utf8.DecodeRune(bytes); sz > 1 {
 | 
			
		||||
			indLine += wideInd
 | 
			
		||||
		} else {
 | 
			
		||||
			indLine += ind
 | 
			
		||||
		}
 | 
			
		||||
		result += srcLine + indLine
 | 
			
		||||
	}
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										74
									
								
								vendor/github.com/google/cel-go/common/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										74
									
								
								vendor/github.com/google/cel-go/common/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,74 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package common
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"sort"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Errors type which contains a list of errors observed during parsing.
 | 
			
		||||
type Errors struct {
 | 
			
		||||
	errors []Error
 | 
			
		||||
	source Source
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewErrors creates a new instance of the Errors type.
 | 
			
		||||
func NewErrors(source Source) *Errors {
 | 
			
		||||
	return &Errors{
 | 
			
		||||
		errors: []Error{},
 | 
			
		||||
		source: source}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ReportError records an error at a source location.
 | 
			
		||||
func (e *Errors) ReportError(l Location, format string, args ...interface{}) {
 | 
			
		||||
	err := Error{
 | 
			
		||||
		Location: l,
 | 
			
		||||
		Message:  fmt.Sprintf(format, args...),
 | 
			
		||||
	}
 | 
			
		||||
	e.errors = append(e.errors, err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetErrors returns the list of observed errors.
 | 
			
		||||
func (e *Errors) GetErrors() []Error {
 | 
			
		||||
	return e.errors[:]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Append takes an Errors object as input creates a new Errors object with the current and input
 | 
			
		||||
// errors.
 | 
			
		||||
func (e *Errors) Append(errs []Error) *Errors {
 | 
			
		||||
	return &Errors{
 | 
			
		||||
		errors: append(e.errors, errs...),
 | 
			
		||||
		source: e.source,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ToDisplayString returns the error set to a newline delimited string.
 | 
			
		||||
func (e *Errors) ToDisplayString() string {
 | 
			
		||||
	var result = ""
 | 
			
		||||
	sort.SliceStable(e.errors, func(i, j int) bool {
 | 
			
		||||
		ei := e.errors[i].Location
 | 
			
		||||
		ej := e.errors[j].Location
 | 
			
		||||
		return ei.Line() < ej.Line() ||
 | 
			
		||||
			(ei.Line() == ej.Line() && ei.Column() < ej.Column())
 | 
			
		||||
	})
 | 
			
		||||
	for i, err := range e.errors {
 | 
			
		||||
		if i >= 1 {
 | 
			
		||||
			result += "\n"
 | 
			
		||||
		}
 | 
			
		||||
		result += err.ToDisplayString(e.source)
 | 
			
		||||
	}
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										51
									
								
								vendor/github.com/google/cel-go/common/location.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								vendor/github.com/google/cel-go/common/location.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,51 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package common
 | 
			
		||||
 | 
			
		||||
// Location interface to represent a location within Source.
 | 
			
		||||
type Location interface {
 | 
			
		||||
	Line() int   // 1-based line number within source.
 | 
			
		||||
	Column() int // 0-based column number within source.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SourceLocation helper type to manually construct a location.
 | 
			
		||||
type SourceLocation struct {
 | 
			
		||||
	line   int
 | 
			
		||||
	column int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// Location implements the SourcceLocation interface.
 | 
			
		||||
	_ Location = &SourceLocation{}
 | 
			
		||||
	// NoLocation is a particular illegal location.
 | 
			
		||||
	NoLocation = &SourceLocation{-1, -1}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// NewLocation creates a new location.
 | 
			
		||||
func NewLocation(line, column int) Location {
 | 
			
		||||
	return &SourceLocation{
 | 
			
		||||
		line:   line,
 | 
			
		||||
		column: column}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Line returns the 1-based line of the location.
 | 
			
		||||
func (l *SourceLocation) Line() int {
 | 
			
		||||
	return l.line
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Column returns the 0-based column number of the location.
 | 
			
		||||
func (l *SourceLocation) Column() int {
 | 
			
		||||
	return l.column
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										14
									
								
								vendor/github.com/google/cel-go/common/operators/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/google/cel-go/common/operators/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    default_visibility = ["//visibility:public"],
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "operators.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/common/operators",
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										145
									
								
								vendor/github.com/google/cel-go/common/operators/operators.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										145
									
								
								vendor/github.com/google/cel-go/common/operators/operators.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,145 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Package operators defines the internal function names of operators.
 | 
			
		||||
//
 | 
			
		||||
// ALl operators in the expression language are modelled as function calls.
 | 
			
		||||
package operators
 | 
			
		||||
 | 
			
		||||
// String "names" for CEL operators.
 | 
			
		||||
const (
 | 
			
		||||
	// Symbolic operators.
 | 
			
		||||
	Conditional   = "_?_:_"
 | 
			
		||||
	LogicalAnd    = "_&&_"
 | 
			
		||||
	LogicalOr     = "_||_"
 | 
			
		||||
	LogicalNot    = "!_"
 | 
			
		||||
	Equals        = "_==_"
 | 
			
		||||
	NotEquals     = "_!=_"
 | 
			
		||||
	Less          = "_<_"
 | 
			
		||||
	LessEquals    = "_<=_"
 | 
			
		||||
	Greater       = "_>_"
 | 
			
		||||
	GreaterEquals = "_>=_"
 | 
			
		||||
	Add           = "_+_"
 | 
			
		||||
	Subtract      = "_-_"
 | 
			
		||||
	Multiply      = "_*_"
 | 
			
		||||
	Divide        = "_/_"
 | 
			
		||||
	Modulo        = "_%_"
 | 
			
		||||
	Negate        = "-_"
 | 
			
		||||
	Index         = "_[_]"
 | 
			
		||||
 | 
			
		||||
	// Macros, must have a valid identifier.
 | 
			
		||||
	Has       = "has"
 | 
			
		||||
	All       = "all"
 | 
			
		||||
	Exists    = "exists"
 | 
			
		||||
	ExistsOne = "exists_one"
 | 
			
		||||
	Map       = "map"
 | 
			
		||||
	Filter    = "filter"
 | 
			
		||||
 | 
			
		||||
	// Named operators, must not have be valid identifiers.
 | 
			
		||||
	NotStrictlyFalse = "@not_strictly_false"
 | 
			
		||||
	In               = "@in"
 | 
			
		||||
 | 
			
		||||
	// Deprecated: named operators with valid identifiers.
 | 
			
		||||
	OldNotStrictlyFalse = "__not_strictly_false__"
 | 
			
		||||
	OldIn               = "_in_"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	operators = map[string]string{
 | 
			
		||||
		"+":  Add,
 | 
			
		||||
		"/":  Divide,
 | 
			
		||||
		"==": Equals,
 | 
			
		||||
		">":  Greater,
 | 
			
		||||
		">=": GreaterEquals,
 | 
			
		||||
		"in": In,
 | 
			
		||||
		"<":  Less,
 | 
			
		||||
		"<=": LessEquals,
 | 
			
		||||
		"%":  Modulo,
 | 
			
		||||
		"*":  Multiply,
 | 
			
		||||
		"!=": NotEquals,
 | 
			
		||||
		"-":  Subtract,
 | 
			
		||||
	}
 | 
			
		||||
	reverseOperators = map[string]string{
 | 
			
		||||
		Add:           "+",
 | 
			
		||||
		Divide:        "/",
 | 
			
		||||
		Equals:        "==",
 | 
			
		||||
		Greater:       ">",
 | 
			
		||||
		GreaterEquals: ">=",
 | 
			
		||||
		In:            "in",
 | 
			
		||||
		Less:          "<",
 | 
			
		||||
		LessEquals:    "<=",
 | 
			
		||||
		LogicalAnd:    "&&",
 | 
			
		||||
		LogicalNot:    "!",
 | 
			
		||||
		LogicalOr:     "||",
 | 
			
		||||
		Modulo:        "%",
 | 
			
		||||
		Multiply:      "*",
 | 
			
		||||
		Negate:        "-",
 | 
			
		||||
		NotEquals:     "!=",
 | 
			
		||||
		OldIn:         "in",
 | 
			
		||||
		Subtract:      "-",
 | 
			
		||||
	}
 | 
			
		||||
	// precedence of the operator, where the higher value means higher.
 | 
			
		||||
	precedence = map[string]int{
 | 
			
		||||
		Conditional:   8,
 | 
			
		||||
		LogicalOr:     7,
 | 
			
		||||
		LogicalAnd:    6,
 | 
			
		||||
		Equals:        5,
 | 
			
		||||
		Greater:       5,
 | 
			
		||||
		GreaterEquals: 5,
 | 
			
		||||
		In:            5,
 | 
			
		||||
		Less:          5,
 | 
			
		||||
		LessEquals:    5,
 | 
			
		||||
		NotEquals:     5,
 | 
			
		||||
		OldIn:         5,
 | 
			
		||||
		Add:           4,
 | 
			
		||||
		Subtract:      4,
 | 
			
		||||
		Divide:        3,
 | 
			
		||||
		Modulo:        3,
 | 
			
		||||
		Multiply:      3,
 | 
			
		||||
		LogicalNot:    2,
 | 
			
		||||
		Negate:        2,
 | 
			
		||||
		Index:         1,
 | 
			
		||||
	}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Find the internal function name for an operator, if the input text is one.
 | 
			
		||||
func Find(text string) (string, bool) {
 | 
			
		||||
	op, found := operators[text]
 | 
			
		||||
	return op, found
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FindReverse returns the unmangled, text representation of the operator.
 | 
			
		||||
func FindReverse(op string) (string, bool) {
 | 
			
		||||
	txt, found := reverseOperators[op]
 | 
			
		||||
	return txt, found
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FindReverseBinaryOperator returns the unmangled, text representation of a binary operator.
 | 
			
		||||
func FindReverseBinaryOperator(op string) (string, bool) {
 | 
			
		||||
	if op == LogicalNot || op == Negate {
 | 
			
		||||
		return "", false
 | 
			
		||||
	}
 | 
			
		||||
	txt, found := reverseOperators[op]
 | 
			
		||||
	return txt, found
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Precedence returns the operator precedence, where the higher the number indicates
 | 
			
		||||
// higher precedence operations.
 | 
			
		||||
func Precedence(op string) int {
 | 
			
		||||
	p, found := precedence[op]
 | 
			
		||||
	if found {
 | 
			
		||||
		return p
 | 
			
		||||
	}
 | 
			
		||||
	return 0
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										14
									
								
								vendor/github.com/google/cel-go/common/overloads/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/google/cel-go/common/overloads/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    default_visibility = ["//visibility:public"],
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "overloads.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/common/overloads",
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										293
									
								
								vendor/github.com/google/cel-go/common/overloads/overloads.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										293
									
								
								vendor/github.com/google/cel-go/common/overloads/overloads.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,293 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Package overloads defines the internal overload identifiers for function and
 | 
			
		||||
// operator overloads.
 | 
			
		||||
package overloads
 | 
			
		||||
 | 
			
		||||
// Boolean logic overloads
 | 
			
		||||
const (
 | 
			
		||||
	Conditional            = "conditional"
 | 
			
		||||
	LogicalAnd             = "logical_and"
 | 
			
		||||
	LogicalOr              = "logical_or"
 | 
			
		||||
	LogicalNot             = "logical_not"
 | 
			
		||||
	NotStrictlyFalse       = "not_strictly_false"
 | 
			
		||||
	Equals                 = "equals"
 | 
			
		||||
	NotEquals              = "not_equals"
 | 
			
		||||
	LessBool               = "less_bool"
 | 
			
		||||
	LessInt64              = "less_int64"
 | 
			
		||||
	LessUint64             = "less_uint64"
 | 
			
		||||
	LessDouble             = "less_double"
 | 
			
		||||
	LessString             = "less_string"
 | 
			
		||||
	LessBytes              = "less_bytes"
 | 
			
		||||
	LessTimestamp          = "less_timestamp"
 | 
			
		||||
	LessDuration           = "less_duration"
 | 
			
		||||
	LessEqualsBool         = "less_equals_bool"
 | 
			
		||||
	LessEqualsInt64        = "less_equals_int64"
 | 
			
		||||
	LessEqualsUint64       = "less_equals_uint64"
 | 
			
		||||
	LessEqualsDouble       = "less_equals_double"
 | 
			
		||||
	LessEqualsString       = "less_equals_string"
 | 
			
		||||
	LessEqualsBytes        = "less_equals_bytes"
 | 
			
		||||
	LessEqualsTimestamp    = "less_equals_timestamp"
 | 
			
		||||
	LessEqualsDuration     = "less_equals_duration"
 | 
			
		||||
	GreaterBool            = "greater_bool"
 | 
			
		||||
	GreaterInt64           = "greater_int64"
 | 
			
		||||
	GreaterUint64          = "greater_uint64"
 | 
			
		||||
	GreaterDouble          = "greater_double"
 | 
			
		||||
	GreaterString          = "greater_string"
 | 
			
		||||
	GreaterBytes           = "greater_bytes"
 | 
			
		||||
	GreaterTimestamp       = "greater_timestamp"
 | 
			
		||||
	GreaterDuration        = "greater_duration"
 | 
			
		||||
	GreaterEqualsBool      = "greater_equals_bool"
 | 
			
		||||
	GreaterEqualsInt64     = "greater_equals_int64"
 | 
			
		||||
	GreaterEqualsUint64    = "greater_equals_uint64"
 | 
			
		||||
	GreaterEqualsDouble    = "greater_equals_double"
 | 
			
		||||
	GreaterEqualsString    = "greater_equals_string"
 | 
			
		||||
	GreaterEqualsBytes     = "greater_equals_bytes"
 | 
			
		||||
	GreaterEqualsTimestamp = "greater_equals_timestamp"
 | 
			
		||||
	GreaterEqualsDuration  = "greater_equals_duration"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Math overloads
 | 
			
		||||
const (
 | 
			
		||||
	AddInt64                   = "add_int64"
 | 
			
		||||
	AddUint64                  = "add_uint64"
 | 
			
		||||
	AddDouble                  = "add_double"
 | 
			
		||||
	AddString                  = "add_string"
 | 
			
		||||
	AddBytes                   = "add_bytes"
 | 
			
		||||
	AddList                    = "add_list"
 | 
			
		||||
	AddTimestampDuration       = "add_timestamp_duration"
 | 
			
		||||
	AddDurationTimestamp       = "add_duration_timestamp"
 | 
			
		||||
	AddDurationDuration        = "add_duration_duration"
 | 
			
		||||
	SubtractInt64              = "subtract_int64"
 | 
			
		||||
	SubtractUint64             = "subtract_uint64"
 | 
			
		||||
	SubtractDouble             = "subtract_double"
 | 
			
		||||
	SubtractTimestampTimestamp = "subtract_timestamp_timestamp"
 | 
			
		||||
	SubtractTimestampDuration  = "subtract_timestamp_duration"
 | 
			
		||||
	SubtractDurationDuration   = "subtract_duration_duration"
 | 
			
		||||
	MultiplyInt64              = "multiply_int64"
 | 
			
		||||
	MultiplyUint64             = "multiply_uint64"
 | 
			
		||||
	MultiplyDouble             = "multiply_double"
 | 
			
		||||
	DivideInt64                = "divide_int64"
 | 
			
		||||
	DivideUint64               = "divide_uint64"
 | 
			
		||||
	DivideDouble               = "divide_double"
 | 
			
		||||
	ModuloInt64                = "modulo_int64"
 | 
			
		||||
	ModuloUint64               = "modulo_uint64"
 | 
			
		||||
	NegateInt64                = "negate_int64"
 | 
			
		||||
	NegateDouble               = "negate_double"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Index overloads
 | 
			
		||||
const (
 | 
			
		||||
	IndexList    = "index_list"
 | 
			
		||||
	IndexMap     = "index_map"
 | 
			
		||||
	IndexMessage = "index_message" // TODO: introduce concept of types.Message
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// In operators
 | 
			
		||||
const (
 | 
			
		||||
	DeprecatedIn = "in"
 | 
			
		||||
	InList       = "in_list"
 | 
			
		||||
	InMap        = "in_map"
 | 
			
		||||
	InMessage    = "in_message" // TODO: introduce concept of types.Message
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Size overloads
 | 
			
		||||
const (
 | 
			
		||||
	Size           = "size"
 | 
			
		||||
	SizeString     = "size_string"
 | 
			
		||||
	SizeBytes      = "size_bytes"
 | 
			
		||||
	SizeList       = "size_list"
 | 
			
		||||
	SizeMap        = "size_map"
 | 
			
		||||
	SizeStringInst = "string_size"
 | 
			
		||||
	SizeBytesInst  = "bytes_size"
 | 
			
		||||
	SizeListInst   = "list_size"
 | 
			
		||||
	SizeMapInst    = "map_size"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// String function names.
 | 
			
		||||
const (
 | 
			
		||||
	Contains   = "contains"
 | 
			
		||||
	EndsWith   = "endsWith"
 | 
			
		||||
	Matches    = "matches"
 | 
			
		||||
	StartsWith = "startsWith"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// String function overload names.
 | 
			
		||||
const (
 | 
			
		||||
	ContainsString   = "contains_string"
 | 
			
		||||
	EndsWithString   = "ends_with_string"
 | 
			
		||||
	MatchesString    = "matches_string"
 | 
			
		||||
	StartsWithString = "starts_with_string"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Time-based functions.
 | 
			
		||||
const (
 | 
			
		||||
	TimeGetFullYear     = "getFullYear"
 | 
			
		||||
	TimeGetMonth        = "getMonth"
 | 
			
		||||
	TimeGetDayOfYear    = "getDayOfYear"
 | 
			
		||||
	TimeGetDate         = "getDate"
 | 
			
		||||
	TimeGetDayOfMonth   = "getDayOfMonth"
 | 
			
		||||
	TimeGetDayOfWeek    = "getDayOfWeek"
 | 
			
		||||
	TimeGetHours        = "getHours"
 | 
			
		||||
	TimeGetMinutes      = "getMinutes"
 | 
			
		||||
	TimeGetSeconds      = "getSeconds"
 | 
			
		||||
	TimeGetMilliseconds = "getMilliseconds"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Timestamp overloads for time functions without timezones.
 | 
			
		||||
const (
 | 
			
		||||
	TimestampToYear                = "timestamp_to_year"
 | 
			
		||||
	TimestampToMonth               = "timestamp_to_month"
 | 
			
		||||
	TimestampToDayOfYear           = "timestamp_to_day_of_year"
 | 
			
		||||
	TimestampToDayOfMonthZeroBased = "timestamp_to_day_of_month"
 | 
			
		||||
	TimestampToDayOfMonthOneBased  = "timestamp_to_day_of_month_1_based"
 | 
			
		||||
	TimestampToDayOfWeek           = "timestamp_to_day_of_week"
 | 
			
		||||
	TimestampToHours               = "timestamp_to_hours"
 | 
			
		||||
	TimestampToMinutes             = "timestamp_to_minutes"
 | 
			
		||||
	TimestampToSeconds             = "timestamp_to_seconds"
 | 
			
		||||
	TimestampToMilliseconds        = "timestamp_to_milliseconds"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Timestamp overloads for time functions with timezones.
 | 
			
		||||
const (
 | 
			
		||||
	TimestampToYearWithTz                = "timestamp_to_year_with_tz"
 | 
			
		||||
	TimestampToMonthWithTz               = "timestamp_to_month_with_tz"
 | 
			
		||||
	TimestampToDayOfYearWithTz           = "timestamp_to_day_of_year_with_tz"
 | 
			
		||||
	TimestampToDayOfMonthZeroBasedWithTz = "timestamp_to_day_of_month_with_tz"
 | 
			
		||||
	TimestampToDayOfMonthOneBasedWithTz  = "timestamp_to_day_of_month_1_based_with_tz"
 | 
			
		||||
	TimestampToDayOfWeekWithTz           = "timestamp_to_day_of_week_with_tz"
 | 
			
		||||
	TimestampToHoursWithTz               = "timestamp_to_hours_with_tz"
 | 
			
		||||
	TimestampToMinutesWithTz             = "timestamp_to_minutes_with_tz"
 | 
			
		||||
	TimestampToSecondsWithTz             = "timestamp_to_seconds_tz"
 | 
			
		||||
	TimestampToMillisecondsWithTz        = "timestamp_to_milliseconds_with_tz"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Duration overloads for time functions.
 | 
			
		||||
const (
 | 
			
		||||
	DurationToHours        = "duration_to_hours"
 | 
			
		||||
	DurationToMinutes      = "duration_to_minutes"
 | 
			
		||||
	DurationToSeconds      = "duration_to_seconds"
 | 
			
		||||
	DurationToMilliseconds = "duration_to_milliseconds"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Type conversion methods and overloads
 | 
			
		||||
const (
 | 
			
		||||
	TypeConvertInt       = "int"
 | 
			
		||||
	TypeConvertUint      = "uint"
 | 
			
		||||
	TypeConvertDouble    = "double"
 | 
			
		||||
	TypeConvertBool      = "bool"
 | 
			
		||||
	TypeConvertString    = "string"
 | 
			
		||||
	TypeConvertBytes     = "bytes"
 | 
			
		||||
	TypeConvertTimestamp = "timestamp"
 | 
			
		||||
	TypeConvertDuration  = "duration"
 | 
			
		||||
	TypeConvertType      = "type"
 | 
			
		||||
	TypeConvertDyn       = "dyn"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Int conversion functions.
 | 
			
		||||
const (
 | 
			
		||||
	IntToInt       = "int64_to_int64"
 | 
			
		||||
	UintToInt      = "uint64_to_int64"
 | 
			
		||||
	DoubleToInt    = "double_to_int64"
 | 
			
		||||
	StringToInt    = "string_to_int64"
 | 
			
		||||
	TimestampToInt = "timestamp_to_int64"
 | 
			
		||||
	DurationToInt  = "duration_to_int64"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Uint conversion functions.
 | 
			
		||||
const (
 | 
			
		||||
	UintToUint   = "uint64_to_uint64"
 | 
			
		||||
	IntToUint    = "int64_to_uint64"
 | 
			
		||||
	DoubleToUint = "double_to_uint64"
 | 
			
		||||
	StringToUint = "string_to_uint64"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Double conversion functions.
 | 
			
		||||
const (
 | 
			
		||||
	DoubleToDouble = "double_to_double"
 | 
			
		||||
	IntToDouble    = "int64_to_double"
 | 
			
		||||
	UintToDouble   = "uint64_to_double"
 | 
			
		||||
	StringToDouble = "string_to_double"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Bool conversion functions.
 | 
			
		||||
const (
 | 
			
		||||
	BoolToBool   = "bool_to_bool"
 | 
			
		||||
	StringToBool = "string_to_bool"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Bytes conversion functions.
 | 
			
		||||
const (
 | 
			
		||||
	BytesToBytes  = "bytes_to_bytes"
 | 
			
		||||
	StringToBytes = "string_to_bytes"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// String conversion functions.
 | 
			
		||||
const (
 | 
			
		||||
	StringToString    = "string_to_string"
 | 
			
		||||
	BoolToString      = "bool_to_string"
 | 
			
		||||
	IntToString       = "int64_to_string"
 | 
			
		||||
	UintToString      = "uint64_to_string"
 | 
			
		||||
	DoubleToString    = "double_to_string"
 | 
			
		||||
	BytesToString     = "bytes_to_string"
 | 
			
		||||
	TimestampToString = "timestamp_to_string"
 | 
			
		||||
	DurationToString  = "duration_to_string"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Timestamp conversion functions
 | 
			
		||||
const (
 | 
			
		||||
	TimestampToTimestamp = "timestamp_to_timestamp"
 | 
			
		||||
	StringToTimestamp    = "string_to_timestamp"
 | 
			
		||||
	IntToTimestamp       = "int64_to_timestamp"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Convert duration from string
 | 
			
		||||
const (
 | 
			
		||||
	DurationToDuration = "duration_to_duration"
 | 
			
		||||
	StringToDuration   = "string_to_duration"
 | 
			
		||||
	IntToDuration      = "int64_to_duration"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Convert to dyn
 | 
			
		||||
const (
 | 
			
		||||
	ToDyn = "to_dyn"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Comprehensions helper methods, not directly accessible via a developer.
 | 
			
		||||
const (
 | 
			
		||||
	Iterator = "@iterator"
 | 
			
		||||
	HasNext  = "@hasNext"
 | 
			
		||||
	Next     = "@next"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// IsTypeConversionFunction returns whether the input function is a standard library type
 | 
			
		||||
// conversion function.
 | 
			
		||||
func IsTypeConversionFunction(function string) bool {
 | 
			
		||||
	switch function {
 | 
			
		||||
	case TypeConvertBool,
 | 
			
		||||
		TypeConvertBytes,
 | 
			
		||||
		TypeConvertDouble,
 | 
			
		||||
		TypeConvertDuration,
 | 
			
		||||
		TypeConvertDyn,
 | 
			
		||||
		TypeConvertInt,
 | 
			
		||||
		TypeConvertString,
 | 
			
		||||
		TypeConvertTimestamp,
 | 
			
		||||
		TypeConvertType,
 | 
			
		||||
		TypeConvertUint:
 | 
			
		||||
		return true
 | 
			
		||||
	default:
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										25
									
								
								vendor/github.com/google/cel-go/common/runes/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								vendor/github.com/google/cel-go/common/runes/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,25 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    default_visibility = ["//visibility:public"],
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "buffer.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/common/runes",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_test(
 | 
			
		||||
    name = "go_default_test",
 | 
			
		||||
    size = "small",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "buffer_test.go",
 | 
			
		||||
    ],
 | 
			
		||||
    embed = [
 | 
			
		||||
        ":go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										194
									
								
								vendor/github.com/google/cel-go/common/runes/buffer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										194
									
								
								vendor/github.com/google/cel-go/common/runes/buffer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,194 @@
 | 
			
		||||
// Copyright 2021 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Package runes provides interfaces and utilities for working with runes.
 | 
			
		||||
package runes
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"strings"
 | 
			
		||||
	"unicode/utf8"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Buffer is an interface for accessing a contiguous array of code points.
 | 
			
		||||
type Buffer interface {
 | 
			
		||||
	Get(i int) rune
 | 
			
		||||
	Slice(i, j int) string
 | 
			
		||||
	Len() int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type emptyBuffer struct{}
 | 
			
		||||
 | 
			
		||||
func (e *emptyBuffer) Get(i int) rune {
 | 
			
		||||
	panic("slice index out of bounds")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *emptyBuffer) Slice(i, j int) string {
 | 
			
		||||
	if i != 0 || i != j {
 | 
			
		||||
		panic("slice index out of bounds")
 | 
			
		||||
	}
 | 
			
		||||
	return ""
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *emptyBuffer) Len() int {
 | 
			
		||||
	return 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ Buffer = &emptyBuffer{}
 | 
			
		||||
 | 
			
		||||
// asciiBuffer is an implementation for an array of code points that contain code points only from
 | 
			
		||||
// the ASCII character set.
 | 
			
		||||
type asciiBuffer struct {
 | 
			
		||||
	arr []byte
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *asciiBuffer) Get(i int) rune {
 | 
			
		||||
	return rune(uint32(a.arr[i]))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *asciiBuffer) Slice(i, j int) string {
 | 
			
		||||
	return string(a.arr[i:j])
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *asciiBuffer) Len() int {
 | 
			
		||||
	return len(a.arr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ Buffer = &asciiBuffer{}
 | 
			
		||||
 | 
			
		||||
// basicBuffer is an implementation for an array of code points that contain code points from both
 | 
			
		||||
// the Latin-1 character set and Basic Multilingual Plane.
 | 
			
		||||
type basicBuffer struct {
 | 
			
		||||
	arr []uint16
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *basicBuffer) Get(i int) rune {
 | 
			
		||||
	return rune(uint32(b.arr[i]))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *basicBuffer) Slice(i, j int) string {
 | 
			
		||||
	var str strings.Builder
 | 
			
		||||
	str.Grow((j - i) * 3) // Worst case encoding size for 0xffff is 3.
 | 
			
		||||
	for ; i < j; i++ {
 | 
			
		||||
		str.WriteRune(rune(uint32(b.arr[i])))
 | 
			
		||||
	}
 | 
			
		||||
	return str.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *basicBuffer) Len() int {
 | 
			
		||||
	return len(b.arr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ Buffer = &basicBuffer{}
 | 
			
		||||
 | 
			
		||||
// supplementalBuffer is an implementation for an array of code points that contain code points from
 | 
			
		||||
// the Latin-1 character set, Basic Multilingual Plane, or the Supplemental Multilingual Plane.
 | 
			
		||||
type supplementalBuffer struct {
 | 
			
		||||
	arr []rune
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *supplementalBuffer) Get(i int) rune {
 | 
			
		||||
	return rune(uint32(s.arr[i]))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *supplementalBuffer) Slice(i, j int) string {
 | 
			
		||||
	return string(s.arr[i:j])
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *supplementalBuffer) Len() int {
 | 
			
		||||
	return len(s.arr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ Buffer = &supplementalBuffer{}
 | 
			
		||||
 | 
			
		||||
var nilBuffer = &emptyBuffer{}
 | 
			
		||||
 | 
			
		||||
// NewBuffer returns an efficient implementation of Buffer for the given text based on the ranges of
 | 
			
		||||
// the encoded code points contained within.
 | 
			
		||||
//
 | 
			
		||||
// Code points are represented as an array of byte, uint16, or rune. This approach ensures that
 | 
			
		||||
// each index represents a code point by itself without needing to use an array of rune. At first
 | 
			
		||||
// we assume all code points are less than or equal to '\u007f'. If this holds true, the
 | 
			
		||||
// underlying storage is a byte array containing only ASCII characters. If we encountered a code
 | 
			
		||||
// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the
 | 
			
		||||
// elements of previous byte array to the uint16 array, and continue. If this holds true, the
 | 
			
		||||
// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual
 | 
			
		||||
// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous
 | 
			
		||||
// elements of the byte or uint16 array, and continue. The underlying storage is an rune array
 | 
			
		||||
// containing any Unicode character.
 | 
			
		||||
func NewBuffer(data string) Buffer {
 | 
			
		||||
	if len(data) == 0 {
 | 
			
		||||
		return nilBuffer
 | 
			
		||||
	}
 | 
			
		||||
	var (
 | 
			
		||||
		idx   = 0
 | 
			
		||||
		buf8  = make([]byte, 0, len(data))
 | 
			
		||||
		buf16 []uint16
 | 
			
		||||
		buf32 []rune
 | 
			
		||||
	)
 | 
			
		||||
	for idx < len(data) {
 | 
			
		||||
		r, s := utf8.DecodeRuneInString(data[idx:])
 | 
			
		||||
		idx += s
 | 
			
		||||
		if r < utf8.RuneSelf {
 | 
			
		||||
			buf8 = append(buf8, byte(r))
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if r <= 0xffff {
 | 
			
		||||
			buf16 = make([]uint16, len(buf8), len(data))
 | 
			
		||||
			for i, v := range buf8 {
 | 
			
		||||
				buf16[i] = uint16(v)
 | 
			
		||||
			}
 | 
			
		||||
			buf8 = nil
 | 
			
		||||
			buf16 = append(buf16, uint16(r))
 | 
			
		||||
			goto copy16
 | 
			
		||||
		}
 | 
			
		||||
		buf32 = make([]rune, len(buf8), len(data))
 | 
			
		||||
		for i, v := range buf8 {
 | 
			
		||||
			buf32[i] = rune(uint32(v))
 | 
			
		||||
		}
 | 
			
		||||
		buf8 = nil
 | 
			
		||||
		buf32 = append(buf32, r)
 | 
			
		||||
		goto copy32
 | 
			
		||||
	}
 | 
			
		||||
	return &asciiBuffer{
 | 
			
		||||
		arr: buf8,
 | 
			
		||||
	}
 | 
			
		||||
copy16:
 | 
			
		||||
	for idx < len(data) {
 | 
			
		||||
		r, s := utf8.DecodeRuneInString(data[idx:])
 | 
			
		||||
		idx += s
 | 
			
		||||
		if r <= 0xffff {
 | 
			
		||||
			buf16 = append(buf16, uint16(r))
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		buf32 = make([]rune, len(buf16), len(data))
 | 
			
		||||
		for i, v := range buf16 {
 | 
			
		||||
			buf32[i] = rune(uint32(v))
 | 
			
		||||
		}
 | 
			
		||||
		buf16 = nil
 | 
			
		||||
		buf32 = append(buf32, r)
 | 
			
		||||
		goto copy32
 | 
			
		||||
	}
 | 
			
		||||
	return &basicBuffer{
 | 
			
		||||
		arr: buf16,
 | 
			
		||||
	}
 | 
			
		||||
copy32:
 | 
			
		||||
	for idx < len(data) {
 | 
			
		||||
		r, s := utf8.DecodeRuneInString(data[idx:])
 | 
			
		||||
		idx += s
 | 
			
		||||
		buf32 = append(buf32, r)
 | 
			
		||||
	}
 | 
			
		||||
	return &supplementalBuffer{
 | 
			
		||||
		arr: buf32,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										186
									
								
								vendor/github.com/google/cel-go/common/source.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										186
									
								
								vendor/github.com/google/cel-go/common/source.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,186 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package common
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"strings"
 | 
			
		||||
	"unicode/utf8"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/runes"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Source interface for filter source contents.
 | 
			
		||||
type Source interface {
 | 
			
		||||
	// Content returns the source content represented as a string.
 | 
			
		||||
	// Examples contents are the single file contents, textbox field,
 | 
			
		||||
	// or url parameter.
 | 
			
		||||
	Content() string
 | 
			
		||||
 | 
			
		||||
	// Description gives a brief description of the source.
 | 
			
		||||
	// Example descriptions are a file name or ui element.
 | 
			
		||||
	Description() string
 | 
			
		||||
 | 
			
		||||
	// LineOffsets gives the character offsets at which lines occur.
 | 
			
		||||
	// The zero-th entry should refer to the break between the first
 | 
			
		||||
	// and second line, or EOF if there is only one line of source.
 | 
			
		||||
	LineOffsets() []int32
 | 
			
		||||
 | 
			
		||||
	// LocationOffset translates a Location to an offset.
 | 
			
		||||
	// Given the line and column of the Location returns the
 | 
			
		||||
	// Location's character offset in the Source, and a bool
 | 
			
		||||
	// indicating whether the Location was found.
 | 
			
		||||
	LocationOffset(location Location) (int32, bool)
 | 
			
		||||
 | 
			
		||||
	// OffsetLocation translates a character offset to a Location, or
 | 
			
		||||
	// false if the conversion was not feasible.
 | 
			
		||||
	OffsetLocation(offset int32) (Location, bool)
 | 
			
		||||
 | 
			
		||||
	// NewLocation takes an input line and column and produces a Location.
 | 
			
		||||
	// The default behavior is to treat the line and column as absolute,
 | 
			
		||||
	// but concrete derivations may use this method to convert a relative
 | 
			
		||||
	// line and column position into an absolute location.
 | 
			
		||||
	NewLocation(line, col int) Location
 | 
			
		||||
 | 
			
		||||
	// Snippet returns a line of content and whether the line was found.
 | 
			
		||||
	Snippet(line int) (string, bool)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// The sourceImpl type implementation of the Source interface.
 | 
			
		||||
type sourceImpl struct {
 | 
			
		||||
	runes.Buffer
 | 
			
		||||
	description string
 | 
			
		||||
	lineOffsets []int32
 | 
			
		||||
	idOffsets   map[int64]int32
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var _ runes.Buffer = &sourceImpl{}
 | 
			
		||||
 | 
			
		||||
// TODO(jimlarson) "Character offsets" should index the code points
 | 
			
		||||
// within the UTF-8 encoded string.  It currently indexes bytes.
 | 
			
		||||
// Can be accomplished by using rune[] instead of string for contents.
 | 
			
		||||
 | 
			
		||||
// NewTextSource creates a new Source from the input text string.
 | 
			
		||||
func NewTextSource(text string) Source {
 | 
			
		||||
	return NewStringSource(text, "<input>")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewStringSource creates a new Source from the given contents and description.
 | 
			
		||||
func NewStringSource(contents string, description string) Source {
 | 
			
		||||
	// Compute line offsets up front as they are referred to frequently.
 | 
			
		||||
	lines := strings.Split(contents, "\n")
 | 
			
		||||
	offsets := make([]int32, len(lines))
 | 
			
		||||
	var offset int32
 | 
			
		||||
	for i, line := range lines {
 | 
			
		||||
		offset = offset + int32(utf8.RuneCountInString(line)) + 1
 | 
			
		||||
		offsets[int32(i)] = offset
 | 
			
		||||
	}
 | 
			
		||||
	return &sourceImpl{
 | 
			
		||||
		Buffer:      runes.NewBuffer(contents),
 | 
			
		||||
		description: description,
 | 
			
		||||
		lineOffsets: offsets,
 | 
			
		||||
		idOffsets:   map[int64]int32{},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewInfoSource creates a new Source from a SourceInfo.
 | 
			
		||||
func NewInfoSource(info *exprpb.SourceInfo) Source {
 | 
			
		||||
	return &sourceImpl{
 | 
			
		||||
		Buffer:      runes.NewBuffer(""),
 | 
			
		||||
		description: info.GetLocation(),
 | 
			
		||||
		lineOffsets: info.GetLineOffsets(),
 | 
			
		||||
		idOffsets:   info.GetPositions(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Content implements the Source interface method.
 | 
			
		||||
func (s *sourceImpl) Content() string {
 | 
			
		||||
	return s.Slice(0, s.Len())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Description implements the Source interface method.
 | 
			
		||||
func (s *sourceImpl) Description() string {
 | 
			
		||||
	return s.description
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// LineOffsets implements the Source interface method.
 | 
			
		||||
func (s *sourceImpl) LineOffsets() []int32 {
 | 
			
		||||
	return s.lineOffsets
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// LocationOffset implements the Source interface method.
 | 
			
		||||
func (s *sourceImpl) LocationOffset(location Location) (int32, bool) {
 | 
			
		||||
	if lineOffset, found := s.findLineOffset(location.Line()); found {
 | 
			
		||||
		return lineOffset + int32(location.Column()), true
 | 
			
		||||
	}
 | 
			
		||||
	return -1, false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewLocation implements the Source interface method.
 | 
			
		||||
func (s *sourceImpl) NewLocation(line, col int) Location {
 | 
			
		||||
	return NewLocation(line, col)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// OffsetLocation implements the Source interface method.
 | 
			
		||||
func (s *sourceImpl) OffsetLocation(offset int32) (Location, bool) {
 | 
			
		||||
	line, lineOffset := s.findLine(offset)
 | 
			
		||||
	return NewLocation(int(line), int(offset-lineOffset)), true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Snippet implements the Source interface method.
 | 
			
		||||
func (s *sourceImpl) Snippet(line int) (string, bool) {
 | 
			
		||||
	charStart, found := s.findLineOffset(line)
 | 
			
		||||
	if !found || s.Len() == 0 {
 | 
			
		||||
		return "", false
 | 
			
		||||
	}
 | 
			
		||||
	charEnd, found := s.findLineOffset(line + 1)
 | 
			
		||||
	if found {
 | 
			
		||||
		return s.Slice(int(charStart), int(charEnd-1)), true
 | 
			
		||||
	}
 | 
			
		||||
	return s.Slice(int(charStart), s.Len()), true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// findLineOffset returns the offset where the (1-indexed) line begins,
 | 
			
		||||
// or false if line doesn't exist.
 | 
			
		||||
func (s *sourceImpl) findLineOffset(line int) (int32, bool) {
 | 
			
		||||
	if line == 1 {
 | 
			
		||||
		return 0, true
 | 
			
		||||
	}
 | 
			
		||||
	if line > 1 && line <= int(len(s.lineOffsets)) {
 | 
			
		||||
		offset := s.lineOffsets[line-2]
 | 
			
		||||
		return offset, true
 | 
			
		||||
	}
 | 
			
		||||
	return -1, false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// findLine finds the line that contains the given character offset and
 | 
			
		||||
// returns the line number and offset of the beginning of that line.
 | 
			
		||||
// Note that the last line is treated as if it contains all offsets
 | 
			
		||||
// beyond the end of the actual source.
 | 
			
		||||
func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) {
 | 
			
		||||
	var line int32 = 1
 | 
			
		||||
	for _, lineOffset := range s.lineOffsets {
 | 
			
		||||
		if lineOffset > characterOffset {
 | 
			
		||||
			break
 | 
			
		||||
		} else {
 | 
			
		||||
			line++
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if line == 1 {
 | 
			
		||||
		return line, 0
 | 
			
		||||
	}
 | 
			
		||||
	return line, s.lineOffsets[line-2]
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										84
									
								
								vendor/github.com/google/cel-go/common/types/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										84
									
								
								vendor/github.com/google/cel-go/common/types/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,84 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    default_visibility = ["//visibility:public"],
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "any_value.go",
 | 
			
		||||
        "bool.go",
 | 
			
		||||
        "bytes.go",
 | 
			
		||||
        "double.go",
 | 
			
		||||
        "duration.go",
 | 
			
		||||
        "err.go",
 | 
			
		||||
        "int.go",
 | 
			
		||||
        "iterator.go",
 | 
			
		||||
        "json_value.go",
 | 
			
		||||
        "list.go",
 | 
			
		||||
        "map.go",
 | 
			
		||||
        "null.go",
 | 
			
		||||
        "object.go",
 | 
			
		||||
        "overflow.go",
 | 
			
		||||
        "provider.go",
 | 
			
		||||
        "string.go",
 | 
			
		||||
        "timestamp.go",
 | 
			
		||||
        "type.go",
 | 
			
		||||
        "uint.go",
 | 
			
		||||
        "unknown.go",
 | 
			
		||||
        "util.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/common/types",
 | 
			
		||||
    deps = [
 | 
			
		||||
        "//common/overloads:go_default_library",
 | 
			
		||||
        "//common/types/pb:go_default_library",
 | 
			
		||||
        "//common/types/ref:go_default_library",
 | 
			
		||||
        "//common/types/traits:go_default_library",
 | 
			
		||||
        "@com_github_stoewer_go_strcase//:go_default_library",
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//encoding/protojson:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//proto:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/anypb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/structpb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_test(
 | 
			
		||||
    name = "go_default_test",
 | 
			
		||||
    size = "small",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "bool_test.go",
 | 
			
		||||
        "bytes_test.go",
 | 
			
		||||
        "double_test.go",
 | 
			
		||||
        "duration_test.go",
 | 
			
		||||
        "int_test.go",
 | 
			
		||||
        "json_list_test.go",
 | 
			
		||||
        "json_struct_test.go",
 | 
			
		||||
        "list_test.go",
 | 
			
		||||
        "map_test.go",
 | 
			
		||||
        "null_test.go",
 | 
			
		||||
        "object_test.go",
 | 
			
		||||
        "provider_test.go",
 | 
			
		||||
        "string_test.go",
 | 
			
		||||
        "timestamp_test.go",
 | 
			
		||||
        "type_test.go",
 | 
			
		||||
        "uint_test.go",
 | 
			
		||||
    ],
 | 
			
		||||
    embed = [":go_default_library"],
 | 
			
		||||
    deps = [
 | 
			
		||||
        "//common/types/ref:go_default_library",
 | 
			
		||||
        "//test:go_default_library",
 | 
			
		||||
        "//test/proto3pb:test_all_types_go_proto",
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//encoding/protojson:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/anypb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										24
									
								
								vendor/github.com/google/cel-go/common/types/any_value.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								vendor/github.com/google/cel-go/common/types/any_value.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,24 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"reflect"
 | 
			
		||||
 | 
			
		||||
	anypb "google.golang.org/protobuf/types/known/anypb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// anyValueType constant representing the reflected type of google.protobuf.Any.
 | 
			
		||||
var anyValueType = reflect.TypeOf(&anypb.Any{})
 | 
			
		||||
							
								
								
									
										144
									
								
								vendor/github.com/google/cel-go/common/types/bool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										144
									
								
								vendor/github.com/google/cel-go/common/types/bool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,144 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"strconv"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/common/types/traits"
 | 
			
		||||
 | 
			
		||||
	anypb "google.golang.org/protobuf/types/known/anypb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
	wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Bool type that implements ref.Val and supports comparison and negation.
 | 
			
		||||
type Bool bool
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// BoolType singleton.
 | 
			
		||||
	BoolType = NewTypeValue("bool",
 | 
			
		||||
		traits.ComparerType,
 | 
			
		||||
		traits.NegatorType)
 | 
			
		||||
 | 
			
		||||
	// boolWrapperType golang reflected type for protobuf bool wrapper type.
 | 
			
		||||
	boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{})
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Boolean constants
 | 
			
		||||
const (
 | 
			
		||||
	False = Bool(false)
 | 
			
		||||
	True  = Bool(true)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Compare implements the traits.Comparer interface method.
 | 
			
		||||
func (b Bool) Compare(other ref.Val) ref.Val {
 | 
			
		||||
	otherBool, ok := other.(Bool)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return ValOrErr(other, "no such overload")
 | 
			
		||||
	}
 | 
			
		||||
	if b == otherBool {
 | 
			
		||||
		return IntZero
 | 
			
		||||
	}
 | 
			
		||||
	if !b && otherBool {
 | 
			
		||||
		return IntNegOne
 | 
			
		||||
	}
 | 
			
		||||
	return IntOne
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements the ref.Val interface method.
 | 
			
		||||
func (b Bool) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	switch typeDesc.Kind() {
 | 
			
		||||
	case reflect.Bool:
 | 
			
		||||
		return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
 | 
			
		||||
	case reflect.Ptr:
 | 
			
		||||
		switch typeDesc {
 | 
			
		||||
		case anyValueType:
 | 
			
		||||
			// Primitives must be wrapped to a wrapperspb.BoolValue before being packed into an Any.
 | 
			
		||||
			return anypb.New(wrapperspb.Bool(bool(b)))
 | 
			
		||||
		case boolWrapperType:
 | 
			
		||||
			// Convert the bool to a wrapperspb.BoolValue.
 | 
			
		||||
			return wrapperspb.Bool(bool(b)), nil
 | 
			
		||||
		case jsonValueType:
 | 
			
		||||
			// Return the bool as a new structpb.Value.
 | 
			
		||||
			return structpb.NewBoolValue(bool(b)), nil
 | 
			
		||||
		default:
 | 
			
		||||
			if typeDesc.Elem().Kind() == reflect.Bool {
 | 
			
		||||
				p := bool(b)
 | 
			
		||||
				return &p, nil
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	case reflect.Interface:
 | 
			
		||||
		bv := b.Value()
 | 
			
		||||
		if reflect.TypeOf(bv).Implements(typeDesc) {
 | 
			
		||||
			return bv, nil
 | 
			
		||||
		}
 | 
			
		||||
		if reflect.TypeOf(b).Implements(typeDesc) {
 | 
			
		||||
			return b, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil, fmt.Errorf("type conversion error from bool to '%v'", typeDesc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements the ref.Val interface method.
 | 
			
		||||
func (b Bool) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	case StringType:
 | 
			
		||||
		return String(strconv.FormatBool(bool(b)))
 | 
			
		||||
	case BoolType:
 | 
			
		||||
		return b
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return BoolType
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%v' to '%v'", BoolType, typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements the ref.Val interface method.
 | 
			
		||||
func (b Bool) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	otherBool, ok := other.(Bool)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return ValOrErr(other, "no such overload")
 | 
			
		||||
	}
 | 
			
		||||
	return Bool(b == otherBool)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Negate implements the traits.Negater interface method.
 | 
			
		||||
func (b Bool) Negate() ref.Val {
 | 
			
		||||
	return !b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements the ref.Val interface method.
 | 
			
		||||
func (b Bool) Type() ref.Type {
 | 
			
		||||
	return BoolType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements the ref.Val interface method.
 | 
			
		||||
func (b Bool) Value() interface{} {
 | 
			
		||||
	return bool(b)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsBool returns whether the input ref.Val or ref.Type is equal to BoolType.
 | 
			
		||||
func IsBool(elem interface{}) bool {
 | 
			
		||||
	switch elem := elem.(type) {
 | 
			
		||||
	case ref.Type:
 | 
			
		||||
		return elem == BoolType
 | 
			
		||||
	case ref.Val:
 | 
			
		||||
		return IsBool(elem.Type())
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										135
									
								
								vendor/github.com/google/cel-go/common/types/bytes.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										135
									
								
								vendor/github.com/google/cel-go/common/types/bytes.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,135 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"encoding/base64"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"unicode/utf8"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/common/types/traits"
 | 
			
		||||
 | 
			
		||||
	anypb "google.golang.org/protobuf/types/known/anypb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
	wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Bytes type that implements ref.Val and supports add, compare, and size
 | 
			
		||||
// operations.
 | 
			
		||||
type Bytes []byte
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// BytesType singleton.
 | 
			
		||||
	BytesType = NewTypeValue("bytes",
 | 
			
		||||
		traits.AdderType,
 | 
			
		||||
		traits.ComparerType,
 | 
			
		||||
		traits.SizerType)
 | 
			
		||||
 | 
			
		||||
	// byteWrapperType golang reflected type for protobuf bytes wrapper type.
 | 
			
		||||
	byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{})
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Add implements traits.Adder interface method by concatenating byte sequences.
 | 
			
		||||
func (b Bytes) Add(other ref.Val) ref.Val {
 | 
			
		||||
	otherBytes, ok := other.(Bytes)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return ValOrErr(other, "no such overload")
 | 
			
		||||
	}
 | 
			
		||||
	return append(b, otherBytes...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Compare implments traits.Comparer interface method by lexicographic ordering.
 | 
			
		||||
func (b Bytes) Compare(other ref.Val) ref.Val {
 | 
			
		||||
	otherBytes, ok := other.(Bytes)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return ValOrErr(other, "no such overload")
 | 
			
		||||
	}
 | 
			
		||||
	return Int(bytes.Compare(b, otherBytes))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements the ref.Val interface method.
 | 
			
		||||
func (b Bytes) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	switch typeDesc.Kind() {
 | 
			
		||||
	case reflect.Array, reflect.Slice:
 | 
			
		||||
		return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
 | 
			
		||||
	case reflect.Ptr:
 | 
			
		||||
		switch typeDesc {
 | 
			
		||||
		case anyValueType:
 | 
			
		||||
			// Primitives must be wrapped before being set on an Any field.
 | 
			
		||||
			return anypb.New(wrapperspb.Bytes([]byte(b)))
 | 
			
		||||
		case byteWrapperType:
 | 
			
		||||
			// Convert the bytes to a wrapperspb.BytesValue.
 | 
			
		||||
			return wrapperspb.Bytes([]byte(b)), nil
 | 
			
		||||
		case jsonValueType:
 | 
			
		||||
			// CEL follows the proto3 to JSON conversion by encoding bytes to a string via base64.
 | 
			
		||||
			// The encoding below matches the golang 'encoding/json' behavior during marshaling,
 | 
			
		||||
			// which uses base64.StdEncoding.
 | 
			
		||||
			str := base64.StdEncoding.EncodeToString([]byte(b))
 | 
			
		||||
			return structpb.NewStringValue(str), nil
 | 
			
		||||
		}
 | 
			
		||||
	case reflect.Interface:
 | 
			
		||||
		bv := b.Value()
 | 
			
		||||
		if reflect.TypeOf(bv).Implements(typeDesc) {
 | 
			
		||||
			return bv, nil
 | 
			
		||||
		}
 | 
			
		||||
		if reflect.TypeOf(b).Implements(typeDesc) {
 | 
			
		||||
			return b, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil, fmt.Errorf("type conversion error from Bytes to '%v'", typeDesc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements the ref.Val interface method.
 | 
			
		||||
func (b Bytes) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	case StringType:
 | 
			
		||||
		if !utf8.Valid(b) {
 | 
			
		||||
			return NewErr("invalid UTF-8 in bytes, cannot convert to string")
 | 
			
		||||
		}
 | 
			
		||||
		return String(b)
 | 
			
		||||
	case BytesType:
 | 
			
		||||
		return b
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return BytesType
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%s' to '%s'", BytesType, typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements the ref.Val interface method.
 | 
			
		||||
func (b Bytes) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	otherBytes, ok := other.(Bytes)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return ValOrErr(other, "no such overload")
 | 
			
		||||
	}
 | 
			
		||||
	return Bool(bytes.Equal(b, otherBytes))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Size implements the traits.Sizer interface method.
 | 
			
		||||
func (b Bytes) Size() ref.Val {
 | 
			
		||||
	return Int(len(b))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements the ref.Val interface method.
 | 
			
		||||
func (b Bytes) Type() ref.Type {
 | 
			
		||||
	return BytesType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements the ref.Val interface method.
 | 
			
		||||
func (b Bytes) Value() interface{} {
 | 
			
		||||
	return []byte(b)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										17
									
								
								vendor/github.com/google/cel-go/common/types/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								vendor/github.com/google/cel-go/common/types/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,17 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Package types contains the types, traits, and utilities common to all
 | 
			
		||||
// components of expression handling.
 | 
			
		||||
package types
 | 
			
		||||
							
								
								
									
										200
									
								
								vendor/github.com/google/cel-go/common/types/double.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										200
									
								
								vendor/github.com/google/cel-go/common/types/double.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,200 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/common/types/traits"
 | 
			
		||||
 | 
			
		||||
	anypb "google.golang.org/protobuf/types/known/anypb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
	wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Double type that implements ref.Val, comparison, and mathematical
 | 
			
		||||
// operations.
 | 
			
		||||
type Double float64
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// DoubleType singleton.
 | 
			
		||||
	DoubleType = NewTypeValue("double",
 | 
			
		||||
		traits.AdderType,
 | 
			
		||||
		traits.ComparerType,
 | 
			
		||||
		traits.DividerType,
 | 
			
		||||
		traits.MultiplierType,
 | 
			
		||||
		traits.NegatorType,
 | 
			
		||||
		traits.SubtractorType)
 | 
			
		||||
 | 
			
		||||
	// doubleWrapperType reflected type for protobuf double wrapper type.
 | 
			
		||||
	doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{})
 | 
			
		||||
 | 
			
		||||
	// floatWrapperType reflected type for protobuf float wrapper type.
 | 
			
		||||
	floatWrapperType = reflect.TypeOf(&wrapperspb.FloatValue{})
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Add implements traits.Adder.Add.
 | 
			
		||||
func (d Double) Add(other ref.Val) ref.Val {
 | 
			
		||||
	otherDouble, ok := other.(Double)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	return d + otherDouble
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Compare implements traits.Comparer.Compare.
 | 
			
		||||
func (d Double) Compare(other ref.Val) ref.Val {
 | 
			
		||||
	otherDouble, ok := other.(Double)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	if d < otherDouble {
 | 
			
		||||
		return IntNegOne
 | 
			
		||||
	}
 | 
			
		||||
	if d > otherDouble {
 | 
			
		||||
		return IntOne
 | 
			
		||||
	}
 | 
			
		||||
	return IntZero
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements ref.Val.ConvertToNative.
 | 
			
		||||
func (d Double) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	switch typeDesc.Kind() {
 | 
			
		||||
	case reflect.Float32:
 | 
			
		||||
		v := float32(d)
 | 
			
		||||
		return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
 | 
			
		||||
	case reflect.Float64:
 | 
			
		||||
		v := float64(d)
 | 
			
		||||
		return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
 | 
			
		||||
	case reflect.Ptr:
 | 
			
		||||
		switch typeDesc {
 | 
			
		||||
		case anyValueType:
 | 
			
		||||
			// Primitives must be wrapped before being set on an Any field.
 | 
			
		||||
			return anypb.New(wrapperspb.Double(float64(d)))
 | 
			
		||||
		case doubleWrapperType:
 | 
			
		||||
			// Convert to a wrapperspb.DoubleValue
 | 
			
		||||
			return wrapperspb.Double(float64(d)), nil
 | 
			
		||||
		case floatWrapperType:
 | 
			
		||||
			// Convert to a wrapperspb.FloatValue (with truncation).
 | 
			
		||||
			return wrapperspb.Float(float32(d)), nil
 | 
			
		||||
		case jsonValueType:
 | 
			
		||||
			// Note, there are special cases for proto3 to json conversion that
 | 
			
		||||
			// expect the floating point value to be converted to a NaN,
 | 
			
		||||
			// Infinity, or -Infinity string values, but the jsonpb string
 | 
			
		||||
			// marshaling of the protobuf.Value will handle this conversion.
 | 
			
		||||
			return structpb.NewNumberValue(float64(d)), nil
 | 
			
		||||
		}
 | 
			
		||||
		switch typeDesc.Elem().Kind() {
 | 
			
		||||
		case reflect.Float32:
 | 
			
		||||
			v := float32(d)
 | 
			
		||||
			p := reflect.New(typeDesc.Elem())
 | 
			
		||||
			p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
 | 
			
		||||
			return p.Interface(), nil
 | 
			
		||||
		case reflect.Float64:
 | 
			
		||||
			v := float64(d)
 | 
			
		||||
			p := reflect.New(typeDesc.Elem())
 | 
			
		||||
			p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
 | 
			
		||||
			return p.Interface(), nil
 | 
			
		||||
		}
 | 
			
		||||
	case reflect.Interface:
 | 
			
		||||
		dv := d.Value()
 | 
			
		||||
		if reflect.TypeOf(dv).Implements(typeDesc) {
 | 
			
		||||
			return dv, nil
 | 
			
		||||
		}
 | 
			
		||||
		if reflect.TypeOf(d).Implements(typeDesc) {
 | 
			
		||||
			return d, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil, fmt.Errorf("type conversion error from Double to '%v'", typeDesc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements ref.Val.ConvertToType.
 | 
			
		||||
func (d Double) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	case IntType:
 | 
			
		||||
		i, err := doubleToInt64Checked(float64(d))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return wrapErr(err)
 | 
			
		||||
		}
 | 
			
		||||
		return Int(i)
 | 
			
		||||
	case UintType:
 | 
			
		||||
		i, err := doubleToUint64Checked(float64(d))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return wrapErr(err)
 | 
			
		||||
		}
 | 
			
		||||
		return Uint(i)
 | 
			
		||||
	case DoubleType:
 | 
			
		||||
		return d
 | 
			
		||||
	case StringType:
 | 
			
		||||
		return String(fmt.Sprintf("%g", float64(d)))
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return DoubleType
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%s' to '%s'", DoubleType, typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Divide implements traits.Divider.Divide.
 | 
			
		||||
func (d Double) Divide(other ref.Val) ref.Val {
 | 
			
		||||
	otherDouble, ok := other.(Double)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	return d / otherDouble
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements ref.Val.Equal.
 | 
			
		||||
func (d Double) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	otherDouble, ok := other.(Double)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	// TODO: Handle NaNs properly.
 | 
			
		||||
	return Bool(d == otherDouble)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Multiply implements traits.Multiplier.Multiply.
 | 
			
		||||
func (d Double) Multiply(other ref.Val) ref.Val {
 | 
			
		||||
	otherDouble, ok := other.(Double)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	return d * otherDouble
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Negate implements traits.Negater.Negate.
 | 
			
		||||
func (d Double) Negate() ref.Val {
 | 
			
		||||
	return -d
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Subtract implements traits.Subtractor.Subtract.
 | 
			
		||||
func (d Double) Subtract(subtrahend ref.Val) ref.Val {
 | 
			
		||||
	subtraDouble, ok := subtrahend.(Double)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(subtrahend)
 | 
			
		||||
	}
 | 
			
		||||
	return d - subtraDouble
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements ref.Val.Type.
 | 
			
		||||
func (d Double) Type() ref.Type {
 | 
			
		||||
	return DoubleType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements ref.Val.Value.
 | 
			
		||||
func (d Double) Value() interface{} {
 | 
			
		||||
	return float64(d)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										202
									
								
								vendor/github.com/google/cel-go/common/types/duration.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										202
									
								
								vendor/github.com/google/cel-go/common/types/duration.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,202 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/overloads"
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/common/types/traits"
 | 
			
		||||
 | 
			
		||||
	anypb "google.golang.org/protobuf/types/known/anypb"
 | 
			
		||||
	dpb "google.golang.org/protobuf/types/known/durationpb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Duration type that implements ref.Val and supports add, compare, negate,
 | 
			
		||||
// and subtract operators. This type is also a receiver which means it can
 | 
			
		||||
// participate in dispatch to receiver functions.
 | 
			
		||||
type Duration struct {
 | 
			
		||||
	time.Duration
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func durationOf(d time.Duration) Duration {
 | 
			
		||||
	return Duration{Duration: d}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// DurationType singleton.
 | 
			
		||||
	DurationType = NewTypeValue("google.protobuf.Duration",
 | 
			
		||||
		traits.AdderType,
 | 
			
		||||
		traits.ComparerType,
 | 
			
		||||
		traits.NegatorType,
 | 
			
		||||
		traits.ReceiverType,
 | 
			
		||||
		traits.SubtractorType)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Add implements traits.Adder.Add.
 | 
			
		||||
func (d Duration) Add(other ref.Val) ref.Val {
 | 
			
		||||
	switch other.Type() {
 | 
			
		||||
	case DurationType:
 | 
			
		||||
		dur2 := other.(Duration)
 | 
			
		||||
		val, err := addDurationChecked(d.Duration, dur2.Duration)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return wrapErr(err)
 | 
			
		||||
		}
 | 
			
		||||
		return durationOf(val)
 | 
			
		||||
	case TimestampType:
 | 
			
		||||
		ts := other.(Timestamp).Time
 | 
			
		||||
		val, err := addTimeDurationChecked(ts, d.Duration)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return wrapErr(err)
 | 
			
		||||
		}
 | 
			
		||||
		return timestampOf(val)
 | 
			
		||||
	}
 | 
			
		||||
	return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Compare implements traits.Comparer.Compare.
 | 
			
		||||
func (d Duration) Compare(other ref.Val) ref.Val {
 | 
			
		||||
	otherDur, ok := other.(Duration)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	d1 := d.Duration
 | 
			
		||||
	d2 := otherDur.Duration
 | 
			
		||||
	switch {
 | 
			
		||||
	case d1 < d2:
 | 
			
		||||
		return IntNegOne
 | 
			
		||||
	case d1 > d2:
 | 
			
		||||
		return IntOne
 | 
			
		||||
	default:
 | 
			
		||||
		return IntZero
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements ref.Val.ConvertToNative.
 | 
			
		||||
func (d Duration) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	// If the duration is already assignable to the desired type return it.
 | 
			
		||||
	if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) {
 | 
			
		||||
		return d.Duration, nil
 | 
			
		||||
	}
 | 
			
		||||
	if reflect.TypeOf(d).AssignableTo(typeDesc) {
 | 
			
		||||
		return d, nil
 | 
			
		||||
	}
 | 
			
		||||
	switch typeDesc {
 | 
			
		||||
	case anyValueType:
 | 
			
		||||
		// Pack the duration as a dpb.Duration into an Any value.
 | 
			
		||||
		return anypb.New(dpb.New(d.Duration))
 | 
			
		||||
	case durationValueType:
 | 
			
		||||
		// Unwrap the CEL value to its underlying proto value.
 | 
			
		||||
		return dpb.New(d.Duration), nil
 | 
			
		||||
	case jsonValueType:
 | 
			
		||||
		// CEL follows the proto3 to JSON conversion.
 | 
			
		||||
		// Note, using jsonpb would wrap the result in extra double quotes.
 | 
			
		||||
		v := d.ConvertToType(StringType)
 | 
			
		||||
		if IsError(v) {
 | 
			
		||||
			return nil, v.(*Err)
 | 
			
		||||
		}
 | 
			
		||||
		return structpb.NewStringValue(string(v.(String))), nil
 | 
			
		||||
	}
 | 
			
		||||
	return nil, fmt.Errorf("type conversion error from 'Duration' to '%v'", typeDesc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements ref.Val.ConvertToType.
 | 
			
		||||
func (d Duration) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	case StringType:
 | 
			
		||||
		return String(strconv.FormatFloat(d.Seconds(), 'f', -1, 64) + "s")
 | 
			
		||||
	case IntType:
 | 
			
		||||
		return Int(d.Duration)
 | 
			
		||||
	case DurationType:
 | 
			
		||||
		return d
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return DurationType
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%s' to '%s'", DurationType, typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements ref.Val.Equal.
 | 
			
		||||
func (d Duration) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	otherDur, ok := other.(Duration)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	return Bool(d.Duration == otherDur.Duration)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Negate implements traits.Negater.Negate.
 | 
			
		||||
func (d Duration) Negate() ref.Val {
 | 
			
		||||
	val, err := negateDurationChecked(d.Duration)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return wrapErr(err)
 | 
			
		||||
	}
 | 
			
		||||
	return durationOf(val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Receive implements traits.Receiver.Receive.
 | 
			
		||||
func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val {
 | 
			
		||||
	if len(args) == 0 {
 | 
			
		||||
		if f, found := durationZeroArgOverloads[function]; found {
 | 
			
		||||
			return f(d.Duration)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return NoSuchOverloadErr()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Subtract implements traits.Subtractor.Subtract.
 | 
			
		||||
func (d Duration) Subtract(subtrahend ref.Val) ref.Val {
 | 
			
		||||
	subtraDur, ok := subtrahend.(Duration)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(subtrahend)
 | 
			
		||||
	}
 | 
			
		||||
	val, err := subtractDurationChecked(d.Duration, subtraDur.Duration)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return wrapErr(err)
 | 
			
		||||
	}
 | 
			
		||||
	return durationOf(val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements ref.Val.Type.
 | 
			
		||||
func (d Duration) Type() ref.Type {
 | 
			
		||||
	return DurationType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements ref.Val.Value.
 | 
			
		||||
func (d Duration) Value() interface{} {
 | 
			
		||||
	return d.Duration
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	durationValueType = reflect.TypeOf(&dpb.Duration{})
 | 
			
		||||
 | 
			
		||||
	durationZeroArgOverloads = map[string]func(time.Duration) ref.Val{
 | 
			
		||||
		overloads.TimeGetHours: func(dur time.Duration) ref.Val {
 | 
			
		||||
			return Int(dur.Hours())
 | 
			
		||||
		},
 | 
			
		||||
		overloads.TimeGetMinutes: func(dur time.Duration) ref.Val {
 | 
			
		||||
			return Int(dur.Minutes())
 | 
			
		||||
		},
 | 
			
		||||
		overloads.TimeGetSeconds: func(dur time.Duration) ref.Val {
 | 
			
		||||
			return Int(dur.Seconds())
 | 
			
		||||
		},
 | 
			
		||||
		overloads.TimeGetMilliseconds: func(dur time.Duration) ref.Val {
 | 
			
		||||
			return Int(dur.Milliseconds())
 | 
			
		||||
		}}
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										130
									
								
								vendor/github.com/google/cel-go/common/types/err.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										130
									
								
								vendor/github.com/google/cel-go/common/types/err.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,130 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Err type which extends the built-in go error and implements ref.Val.
 | 
			
		||||
type Err struct {
 | 
			
		||||
	error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// ErrType singleton.
 | 
			
		||||
	ErrType = NewTypeValue("error")
 | 
			
		||||
 | 
			
		||||
	// errDivideByZero is an error indicating a division by zero of an integer value.
 | 
			
		||||
	errDivideByZero = errors.New("division by zero")
 | 
			
		||||
	// errModulusByZero is an error indicating a modulus by zero of an integer value.
 | 
			
		||||
	errModulusByZero = errors.New("modulus by zero")
 | 
			
		||||
	// errIntOverflow is an error representing integer overflow.
 | 
			
		||||
	errIntOverflow = errors.New("integer overflow")
 | 
			
		||||
	// errUintOverflow is an error representing unsigned integer overflow.
 | 
			
		||||
	errUintOverflow = errors.New("unsigned integer overflow")
 | 
			
		||||
	// errDurationOverflow is an error representing duration overflow.
 | 
			
		||||
	errDurationOverflow = errors.New("duration overflow")
 | 
			
		||||
	// errTimestampOverflow is an error representing timestamp overflow.
 | 
			
		||||
	errTimestampOverflow    = errors.New("timestamp overflow")
 | 
			
		||||
	celErrTimestampOverflow = &Err{error: errTimestampOverflow}
 | 
			
		||||
 | 
			
		||||
	// celErrNoSuchOverload indicates that the call arguments did not match a supported method signature.
 | 
			
		||||
	celErrNoSuchOverload = NewErr("no such overload")
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// NewErr creates a new Err described by the format string and args.
 | 
			
		||||
// TODO: Audit the use of this function and standardize the error messages and codes.
 | 
			
		||||
func NewErr(format string, args ...interface{}) ref.Val {
 | 
			
		||||
	return &Err{fmt.Errorf(format, args...)}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NoSuchOverloadErr returns a new types.Err instance with a no such overload message.
 | 
			
		||||
func NoSuchOverloadErr() ref.Val {
 | 
			
		||||
	return celErrNoSuchOverload
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion
 | 
			
		||||
// message that indicates that the native value could not be converted to a CEL ref.Val.
 | 
			
		||||
func UnsupportedRefValConversionErr(val interface{}) ref.Val {
 | 
			
		||||
	return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// MaybeNoSuchOverloadErr returns the error or unknown if the input ref.Val is one of these types,
 | 
			
		||||
// else a new no such overload error.
 | 
			
		||||
func MaybeNoSuchOverloadErr(val ref.Val) ref.Val {
 | 
			
		||||
	return ValOrErr(val, "no such overload")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ValOrErr either returns the existing error or create a new one.
 | 
			
		||||
// TODO: Audit the use of this function and standardize the error messages and codes.
 | 
			
		||||
func ValOrErr(val ref.Val, format string, args ...interface{}) ref.Val {
 | 
			
		||||
	if val == nil {
 | 
			
		||||
		return NewErr(format, args...)
 | 
			
		||||
	}
 | 
			
		||||
	switch val.Type() {
 | 
			
		||||
	case ErrType, UnknownType:
 | 
			
		||||
		return val
 | 
			
		||||
	default:
 | 
			
		||||
		return NewErr(format, args...)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// wrapErr wraps an existing Go error value into a CEL Err value.
 | 
			
		||||
func wrapErr(err error) ref.Val {
 | 
			
		||||
	return &Err{error: err}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements ref.Val.ConvertToNative.
 | 
			
		||||
func (e *Err) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	return nil, e.error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements ref.Val.ConvertToType.
 | 
			
		||||
func (e *Err) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	// Errors are not convertible to other representations.
 | 
			
		||||
	return e
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements ref.Val.Equal.
 | 
			
		||||
func (e *Err) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	// An error cannot be equal to any other value, so it returns itself.
 | 
			
		||||
	return e
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// String implements fmt.Stringer.
 | 
			
		||||
func (e *Err) String() string {
 | 
			
		||||
	return e.error.Error()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements ref.Val.Type.
 | 
			
		||||
func (e *Err) Type() ref.Type {
 | 
			
		||||
	return ErrType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements ref.Val.Value.
 | 
			
		||||
func (e *Err) Value() interface{} {
 | 
			
		||||
	return e.error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsError returns whether the input element ref.Type or ref.Val is equal to
 | 
			
		||||
// the ErrType singleton.
 | 
			
		||||
func IsError(val ref.Val) bool {
 | 
			
		||||
	return val.Type() == ErrType
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										286
									
								
								vendor/github.com/google/cel-go/common/types/int.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										286
									
								
								vendor/github.com/google/cel-go/common/types/int.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,286 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/common/types/traits"
 | 
			
		||||
 | 
			
		||||
	anypb "google.golang.org/protobuf/types/known/anypb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
	wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Int type that implements ref.Val as well as comparison and math operators.
 | 
			
		||||
type Int int64
 | 
			
		||||
 | 
			
		||||
// Int constants used for comparison results.
 | 
			
		||||
const (
 | 
			
		||||
	// IntZero is the zero-value for Int
 | 
			
		||||
	IntZero   = Int(0)
 | 
			
		||||
	IntOne    = Int(1)
 | 
			
		||||
	IntNegOne = Int(-1)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// IntType singleton.
 | 
			
		||||
	IntType = NewTypeValue("int",
 | 
			
		||||
		traits.AdderType,
 | 
			
		||||
		traits.ComparerType,
 | 
			
		||||
		traits.DividerType,
 | 
			
		||||
		traits.ModderType,
 | 
			
		||||
		traits.MultiplierType,
 | 
			
		||||
		traits.NegatorType,
 | 
			
		||||
		traits.SubtractorType)
 | 
			
		||||
 | 
			
		||||
	// int32WrapperType reflected type for protobuf int32 wrapper type.
 | 
			
		||||
	int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{})
 | 
			
		||||
 | 
			
		||||
	// int64WrapperType reflected type for protobuf int64 wrapper type.
 | 
			
		||||
	int64WrapperType = reflect.TypeOf(&wrapperspb.Int64Value{})
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Add implements traits.Adder.Add.
 | 
			
		||||
func (i Int) Add(other ref.Val) ref.Val {
 | 
			
		||||
	otherInt, ok := other.(Int)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	val, err := addInt64Checked(int64(i), int64(otherInt))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return wrapErr(err)
 | 
			
		||||
	}
 | 
			
		||||
	return Int(val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Compare implements traits.Comparer.Compare.
 | 
			
		||||
func (i Int) Compare(other ref.Val) ref.Val {
 | 
			
		||||
	otherInt, ok := other.(Int)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	if i < otherInt {
 | 
			
		||||
		return IntNegOne
 | 
			
		||||
	}
 | 
			
		||||
	if i > otherInt {
 | 
			
		||||
		return IntOne
 | 
			
		||||
	}
 | 
			
		||||
	return IntZero
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements ref.Val.ConvertToNative.
 | 
			
		||||
func (i Int) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	switch typeDesc.Kind() {
 | 
			
		||||
	case reflect.Int, reflect.Int32:
 | 
			
		||||
		// Enums are also mapped as int32 derivations.
 | 
			
		||||
		// Note, the code doesn't convert to the enum value directly since this is not known, but
 | 
			
		||||
		// the net effect with respect to proto-assignment is handled correctly by the reflection
 | 
			
		||||
		// Convert method.
 | 
			
		||||
		v, err := int64ToInt32Checked(int64(i))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
 | 
			
		||||
	case reflect.Int64:
 | 
			
		||||
		return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
 | 
			
		||||
	case reflect.Ptr:
 | 
			
		||||
		switch typeDesc {
 | 
			
		||||
		case anyValueType:
 | 
			
		||||
			// Primitives must be wrapped before being set on an Any field.
 | 
			
		||||
			return anypb.New(wrapperspb.Int64(int64(i)))
 | 
			
		||||
		case int32WrapperType:
 | 
			
		||||
			// Convert the value to a wrapperspb.Int32Value, error on overflow.
 | 
			
		||||
			v, err := int64ToInt32Checked(int64(i))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
			return wrapperspb.Int32(v), nil
 | 
			
		||||
		case int64WrapperType:
 | 
			
		||||
			// Convert the value to a wrapperspb.Int64Value.
 | 
			
		||||
			return wrapperspb.Int64(int64(i)), nil
 | 
			
		||||
		case jsonValueType:
 | 
			
		||||
			// The proto-to-JSON conversion rules would convert all 64-bit integer values to JSON
 | 
			
		||||
			// decimal strings. Because CEL ints might come from the automatic widening of 32-bit
 | 
			
		||||
			// values in protos, the JSON type is chosen dynamically based on the value.
 | 
			
		||||
			//
 | 
			
		||||
			// - Integers -2^53-1 < n < 2^53-1 are encoded as JSON numbers.
 | 
			
		||||
			// - Integers outside this range are encoded as JSON strings.
 | 
			
		||||
			//
 | 
			
		||||
			// The integer to float range represents the largest interval where such a conversion
 | 
			
		||||
			// can round-trip accurately. Thus, conversions from a 32-bit source can expect a JSON
 | 
			
		||||
			// number as with protobuf. Those consuming JSON from a 64-bit source must be able to
 | 
			
		||||
			// handle either a JSON number or a JSON decimal string. To handle these cases safely
 | 
			
		||||
			// the string values must be explicitly converted to int() within a CEL expression;
 | 
			
		||||
			// however, it is best to simply stay within the JSON number range when building JSON
 | 
			
		||||
			// objects in CEL.
 | 
			
		||||
			if i.isJSONSafe() {
 | 
			
		||||
				return structpb.NewNumberValue(float64(i)), nil
 | 
			
		||||
			}
 | 
			
		||||
			// Proto3 to JSON conversion requires string-formatted int64 values
 | 
			
		||||
			// since the conversion to floating point would result in truncation.
 | 
			
		||||
			return structpb.NewStringValue(strconv.FormatInt(int64(i), 10)), nil
 | 
			
		||||
		}
 | 
			
		||||
		switch typeDesc.Elem().Kind() {
 | 
			
		||||
		case reflect.Int32:
 | 
			
		||||
			// Convert the value to a wrapperspb.Int32Value, error on overflow.
 | 
			
		||||
			v, err := int64ToInt32Checked(int64(i))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
			p := reflect.New(typeDesc.Elem())
 | 
			
		||||
			p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
 | 
			
		||||
			return p.Interface(), nil
 | 
			
		||||
		case reflect.Int64:
 | 
			
		||||
			v := int64(i)
 | 
			
		||||
			p := reflect.New(typeDesc.Elem())
 | 
			
		||||
			p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
 | 
			
		||||
			return p.Interface(), nil
 | 
			
		||||
		}
 | 
			
		||||
	case reflect.Interface:
 | 
			
		||||
		iv := i.Value()
 | 
			
		||||
		if reflect.TypeOf(iv).Implements(typeDesc) {
 | 
			
		||||
			return iv, nil
 | 
			
		||||
		}
 | 
			
		||||
		if reflect.TypeOf(i).Implements(typeDesc) {
 | 
			
		||||
			return i, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil, fmt.Errorf("unsupported type conversion from 'int' to %v", typeDesc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements ref.Val.ConvertToType.
 | 
			
		||||
func (i Int) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	case IntType:
 | 
			
		||||
		return i
 | 
			
		||||
	case UintType:
 | 
			
		||||
		u, err := int64ToUint64Checked(int64(i))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return wrapErr(err)
 | 
			
		||||
		}
 | 
			
		||||
		return Uint(u)
 | 
			
		||||
	case DoubleType:
 | 
			
		||||
		return Double(i)
 | 
			
		||||
	case StringType:
 | 
			
		||||
		return String(fmt.Sprintf("%d", int64(i)))
 | 
			
		||||
	case TimestampType:
 | 
			
		||||
		// The maximum positive value that can be passed to time.Unix is math.MaxInt64 minus the number
 | 
			
		||||
		// of seconds between year 1 and year 1970. See comments on unixToInternal.
 | 
			
		||||
		if int64(i) < minUnixTime || int64(i) > maxUnixTime {
 | 
			
		||||
			return celErrTimestampOverflow
 | 
			
		||||
		}
 | 
			
		||||
		return timestampOf(time.Unix(int64(i), 0).UTC())
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return IntType
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%s' to '%s'", IntType, typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Divide implements traits.Divider.Divide.
 | 
			
		||||
func (i Int) Divide(other ref.Val) ref.Val {
 | 
			
		||||
	otherInt, ok := other.(Int)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	val, err := divideInt64Checked(int64(i), int64(otherInt))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return wrapErr(err)
 | 
			
		||||
	}
 | 
			
		||||
	return Int(val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements ref.Val.Equal.
 | 
			
		||||
func (i Int) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	otherInt, ok := other.(Int)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	return Bool(i == otherInt)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Modulo implements traits.Modder.Modulo.
 | 
			
		||||
func (i Int) Modulo(other ref.Val) ref.Val {
 | 
			
		||||
	otherInt, ok := other.(Int)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	val, err := moduloInt64Checked(int64(i), int64(otherInt))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return wrapErr(err)
 | 
			
		||||
	}
 | 
			
		||||
	return Int(val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Multiply implements traits.Multiplier.Multiply.
 | 
			
		||||
func (i Int) Multiply(other ref.Val) ref.Val {
 | 
			
		||||
	otherInt, ok := other.(Int)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	val, err := multiplyInt64Checked(int64(i), int64(otherInt))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return wrapErr(err)
 | 
			
		||||
	}
 | 
			
		||||
	return Int(val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Negate implements traits.Negater.Negate.
 | 
			
		||||
func (i Int) Negate() ref.Val {
 | 
			
		||||
	val, err := negateInt64Checked(int64(i))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return wrapErr(err)
 | 
			
		||||
	}
 | 
			
		||||
	return Int(val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Subtract implements traits.Subtractor.Subtract.
 | 
			
		||||
func (i Int) Subtract(subtrahend ref.Val) ref.Val {
 | 
			
		||||
	subtraInt, ok := subtrahend.(Int)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(subtrahend)
 | 
			
		||||
	}
 | 
			
		||||
	val, err := subtractInt64Checked(int64(i), int64(subtraInt))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return wrapErr(err)
 | 
			
		||||
	}
 | 
			
		||||
	return Int(val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements ref.Val.Type.
 | 
			
		||||
func (i Int) Type() ref.Type {
 | 
			
		||||
	return IntType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements ref.Val.Value.
 | 
			
		||||
func (i Int) Value() interface{} {
 | 
			
		||||
	return int64(i)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isJSONSafe indicates whether the int is safely representable as a floating point value in JSON.
 | 
			
		||||
func (i Int) isJSONSafe() bool {
 | 
			
		||||
	return i >= minIntJSON && i <= maxIntJSON
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// maxIntJSON is defined as the Number.MAX_SAFE_INTEGER value per EcmaScript 6.
 | 
			
		||||
	maxIntJSON = 1<<53 - 1
 | 
			
		||||
	// minIntJSON is defined as the Number.MIN_SAFE_INTEGER value per EcmaScript 6.
 | 
			
		||||
	minIntJSON = -maxIntJSON
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										55
									
								
								vendor/github.com/google/cel-go/common/types/iterator.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								vendor/github.com/google/cel-go/common/types/iterator.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,55 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/common/types/traits"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// IteratorType singleton.
 | 
			
		||||
	IteratorType = NewTypeValue("iterator", traits.IteratorType)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// baseIterator is the basis for list, map, and object iterators.
 | 
			
		||||
//
 | 
			
		||||
// An iterator in and of itself should not be a valid value for comparison, but must implement the
 | 
			
		||||
// `ref.Val` methods in order to be well-supported within instruction arguments processed by the
 | 
			
		||||
// interpreter.
 | 
			
		||||
type baseIterator struct{}
 | 
			
		||||
 | 
			
		||||
func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	return nil, fmt.Errorf("type conversion on iterators not supported")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (*baseIterator) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	return NewErr("no such overload")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (*baseIterator) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	return NewErr("no such overload")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (*baseIterator) Type() ref.Type {
 | 
			
		||||
	return IteratorType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (*baseIterator) Value() interface{} {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										28
									
								
								vendor/github.com/google/cel-go/common/types/json_value.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								vendor/github.com/google/cel-go/common/types/json_value.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,28 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"reflect"
 | 
			
		||||
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// JSON type constants representing the reflected types of protobuf JSON values.
 | 
			
		||||
var (
 | 
			
		||||
	jsonValueType     = reflect.TypeOf(&structpb.Value{})
 | 
			
		||||
	jsonListValueType = reflect.TypeOf(&structpb.ListValue{})
 | 
			
		||||
	jsonStructType    = reflect.TypeOf(&structpb.Struct{})
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										447
									
								
								vendor/github.com/google/cel-go/common/types/list.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										447
									
								
								vendor/github.com/google/cel-go/common/types/list.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,447 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/common/types/traits"
 | 
			
		||||
	"google.golang.org/protobuf/proto"
 | 
			
		||||
	"google.golang.org/protobuf/reflect/protoreflect"
 | 
			
		||||
 | 
			
		||||
	anypb "google.golang.org/protobuf/types/known/anypb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// ListType singleton.
 | 
			
		||||
	ListType = NewTypeValue("list",
 | 
			
		||||
		traits.AdderType,
 | 
			
		||||
		traits.ContainerType,
 | 
			
		||||
		traits.IndexerType,
 | 
			
		||||
		traits.IterableType,
 | 
			
		||||
		traits.SizerType)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// NewDynamicList returns a traits.Lister with heterogenous elements.
 | 
			
		||||
// value should be an array of "native" types, i.e. any type that
 | 
			
		||||
// NativeToValue() can convert to a ref.Val.
 | 
			
		||||
func NewDynamicList(adapter ref.TypeAdapter, value interface{}) traits.Lister {
 | 
			
		||||
	refValue := reflect.ValueOf(value)
 | 
			
		||||
	return &baseList{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		value:       value,
 | 
			
		||||
		size:        refValue.Len(),
 | 
			
		||||
		get: func(i int) interface{} {
 | 
			
		||||
			return refValue.Index(i).Interface()
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewStringList returns a traits.Lister containing only strings.
 | 
			
		||||
func NewStringList(adapter ref.TypeAdapter, elems []string) traits.Lister {
 | 
			
		||||
	return &baseList{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		value:       elems,
 | 
			
		||||
		size:        len(elems),
 | 
			
		||||
		get:         func(i int) interface{} { return elems[i] },
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewRefValList returns a traits.Lister with ref.Val elements.
 | 
			
		||||
//
 | 
			
		||||
// This type specialization is used with list literals within CEL expressions.
 | 
			
		||||
func NewRefValList(adapter ref.TypeAdapter, elems []ref.Val) traits.Lister {
 | 
			
		||||
	return &baseList{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		value:       elems,
 | 
			
		||||
		size:        len(elems),
 | 
			
		||||
		get:         func(i int) interface{} { return elems[i] },
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewProtoList returns a traits.Lister based on a pb.List instance.
 | 
			
		||||
func NewProtoList(adapter ref.TypeAdapter, list protoreflect.List) traits.Lister {
 | 
			
		||||
	return &baseList{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		value:       list,
 | 
			
		||||
		size:        list.Len(),
 | 
			
		||||
		get:         func(i int) interface{} { return list.Get(i).Interface() },
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewJSONList returns a traits.Lister based on structpb.ListValue instance.
 | 
			
		||||
func NewJSONList(adapter ref.TypeAdapter, l *structpb.ListValue) traits.Lister {
 | 
			
		||||
	vals := l.GetValues()
 | 
			
		||||
	return &baseList{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		value:       l,
 | 
			
		||||
		size:        len(vals),
 | 
			
		||||
		get:         func(i int) interface{} { return vals[i] },
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// baseList points to a list containing elements of any type.
 | 
			
		||||
// The `value` is an array of native values, and refValue is its reflection object.
 | 
			
		||||
// The `ref.TypeAdapter` enables native type to CEL type conversions.
 | 
			
		||||
type baseList struct {
 | 
			
		||||
	ref.TypeAdapter
 | 
			
		||||
	value interface{}
 | 
			
		||||
 | 
			
		||||
	// size indicates the number of elements within the list.
 | 
			
		||||
	// Since objects are immutable the size of a list is static.
 | 
			
		||||
	size int
 | 
			
		||||
 | 
			
		||||
	// get returns a value at the specified integer index.
 | 
			
		||||
	// The index is guaranteed to be checked against the list index range.
 | 
			
		||||
	get func(int) interface{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Add implements the traits.Adder interface method.
 | 
			
		||||
func (l *baseList) Add(other ref.Val) ref.Val {
 | 
			
		||||
	otherList, ok := other.(traits.Lister)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	if l.Size() == IntZero {
 | 
			
		||||
		return other
 | 
			
		||||
	}
 | 
			
		||||
	if otherList.Size() == IntZero {
 | 
			
		||||
		return l
 | 
			
		||||
	}
 | 
			
		||||
	return &concatList{
 | 
			
		||||
		TypeAdapter: l.TypeAdapter,
 | 
			
		||||
		prevList:    l,
 | 
			
		||||
		nextList:    otherList}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Contains implements the traits.Container interface method.
 | 
			
		||||
func (l *baseList) Contains(elem ref.Val) ref.Val {
 | 
			
		||||
	if IsUnknownOrError(elem) {
 | 
			
		||||
		return elem
 | 
			
		||||
	}
 | 
			
		||||
	var err ref.Val
 | 
			
		||||
	for i := 0; i < l.size; i++ {
 | 
			
		||||
		val := l.NativeToValue(l.get(i))
 | 
			
		||||
		cmp := elem.Equal(val)
 | 
			
		||||
		b, ok := cmp.(Bool)
 | 
			
		||||
		// When there is an error on the contain check, this is not necessarily terminal.
 | 
			
		||||
		// The contains call could find the element and return True, just as though the user
 | 
			
		||||
		// had written a per-element comparison in an exists() macro or logical ||, e.g.
 | 
			
		||||
		//    list.exists(e, e == elem)
 | 
			
		||||
		if !ok && err == nil {
 | 
			
		||||
			err = ValOrErr(cmp, "no such overload")
 | 
			
		||||
		}
 | 
			
		||||
		if b == True {
 | 
			
		||||
			return True
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	return False
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements the ref.Val interface method.
 | 
			
		||||
func (l *baseList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	// If the underlying list value is assignable to the reflected type return it.
 | 
			
		||||
	if reflect.TypeOf(l.value).AssignableTo(typeDesc) {
 | 
			
		||||
		return l.value, nil
 | 
			
		||||
	}
 | 
			
		||||
	// If the list wrapper is assignable to the desired type return it.
 | 
			
		||||
	if reflect.TypeOf(l).AssignableTo(typeDesc) {
 | 
			
		||||
		return l, nil
 | 
			
		||||
	}
 | 
			
		||||
	// Attempt to convert the list to a set of well known protobuf types.
 | 
			
		||||
	switch typeDesc {
 | 
			
		||||
	case anyValueType:
 | 
			
		||||
		json, err := l.ConvertToNative(jsonListValueType)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return anypb.New(json.(proto.Message))
 | 
			
		||||
	case jsonValueType, jsonListValueType:
 | 
			
		||||
		jsonValues, err :=
 | 
			
		||||
			l.ConvertToNative(reflect.TypeOf([]*structpb.Value{}))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		jsonList := &structpb.ListValue{Values: jsonValues.([]*structpb.Value)}
 | 
			
		||||
		if typeDesc == jsonListValueType {
 | 
			
		||||
			return jsonList, nil
 | 
			
		||||
		}
 | 
			
		||||
		return structpb.NewListValue(jsonList), nil
 | 
			
		||||
	}
 | 
			
		||||
	// Non-list conversion.
 | 
			
		||||
	if typeDesc.Kind() != reflect.Slice && typeDesc.Kind() != reflect.Array {
 | 
			
		||||
		return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// List conversion.
 | 
			
		||||
	// Allow the element ConvertToNative() function to determine whether conversion is possible.
 | 
			
		||||
	otherElemType := typeDesc.Elem()
 | 
			
		||||
	elemCount := l.size
 | 
			
		||||
	nativeList := reflect.MakeSlice(typeDesc, elemCount, elemCount)
 | 
			
		||||
	for i := 0; i < elemCount; i++ {
 | 
			
		||||
		elem := l.NativeToValue(l.get(i))
 | 
			
		||||
		nativeElemVal, err := elem.ConvertToNative(otherElemType)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		nativeList.Index(i).Set(reflect.ValueOf(nativeElemVal))
 | 
			
		||||
	}
 | 
			
		||||
	return nativeList.Interface(), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements the ref.Val interface method.
 | 
			
		||||
func (l *baseList) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	case ListType:
 | 
			
		||||
		return l
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return ListType
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements the ref.Val interface method.
 | 
			
		||||
func (l *baseList) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	otherList, ok := other.(traits.Lister)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	if l.Size() != otherList.Size() {
 | 
			
		||||
		return False
 | 
			
		||||
	}
 | 
			
		||||
	var maybeErr ref.Val
 | 
			
		||||
	for i := IntZero; i < l.Size().(Int); i++ {
 | 
			
		||||
		thisElem := l.Get(i)
 | 
			
		||||
		otherElem := otherList.Get(i)
 | 
			
		||||
		elemEq := thisElem.Equal(otherElem)
 | 
			
		||||
		if elemEq == False {
 | 
			
		||||
			return False
 | 
			
		||||
		}
 | 
			
		||||
		if maybeErr == nil && IsUnknownOrError(elemEq) {
 | 
			
		||||
			maybeErr = elemEq
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if maybeErr != nil {
 | 
			
		||||
		return maybeErr
 | 
			
		||||
	}
 | 
			
		||||
	return True
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get implements the traits.Indexer interface method.
 | 
			
		||||
func (l *baseList) Get(index ref.Val) ref.Val {
 | 
			
		||||
	i, ok := index.(Int)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return ValOrErr(index, "unsupported index type '%s' in list", index.Type())
 | 
			
		||||
	}
 | 
			
		||||
	iv := int(i)
 | 
			
		||||
	if iv < 0 || iv >= l.size {
 | 
			
		||||
		return NewErr("index '%d' out of range in list size '%d'", i, l.Size())
 | 
			
		||||
	}
 | 
			
		||||
	elem := l.get(iv)
 | 
			
		||||
	return l.NativeToValue(elem)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Iterator implements the traits.Iterable interface method.
 | 
			
		||||
func (l *baseList) Iterator() traits.Iterator {
 | 
			
		||||
	return newListIterator(l)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Size implements the traits.Sizer interface method.
 | 
			
		||||
func (l *baseList) Size() ref.Val {
 | 
			
		||||
	return Int(l.size)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements the ref.Val interface method.
 | 
			
		||||
func (l *baseList) Type() ref.Type {
 | 
			
		||||
	return ListType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements the ref.Val interface method.
 | 
			
		||||
func (l *baseList) Value() interface{} {
 | 
			
		||||
	return l.value
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// concatList combines two list implementations together into a view.
 | 
			
		||||
// The `ref.TypeAdapter` enables native type to CEL type conversions.
 | 
			
		||||
type concatList struct {
 | 
			
		||||
	ref.TypeAdapter
 | 
			
		||||
	value    interface{}
 | 
			
		||||
	prevList traits.Lister
 | 
			
		||||
	nextList traits.Lister
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Add implements the traits.Adder interface method.
 | 
			
		||||
func (l *concatList) Add(other ref.Val) ref.Val {
 | 
			
		||||
	otherList, ok := other.(traits.Lister)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	if l.Size() == IntZero {
 | 
			
		||||
		return other
 | 
			
		||||
	}
 | 
			
		||||
	if otherList.Size() == IntZero {
 | 
			
		||||
		return l
 | 
			
		||||
	}
 | 
			
		||||
	return &concatList{
 | 
			
		||||
		TypeAdapter: l.TypeAdapter,
 | 
			
		||||
		prevList:    l,
 | 
			
		||||
		nextList:    otherList}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Contains implments the traits.Container interface method.
 | 
			
		||||
func (l *concatList) Contains(elem ref.Val) ref.Val {
 | 
			
		||||
	// The concat list relies on the IsErrorOrUnknown checks against the input element to be
 | 
			
		||||
	// performed by the `prevList` and/or `nextList`.
 | 
			
		||||
	prev := l.prevList.Contains(elem)
 | 
			
		||||
	// Short-circuit the return if the elem was found in the prev list.
 | 
			
		||||
	if prev == True {
 | 
			
		||||
		return prev
 | 
			
		||||
	}
 | 
			
		||||
	// Return if the elem was found in the next list.
 | 
			
		||||
	next := l.nextList.Contains(elem)
 | 
			
		||||
	if next == True {
 | 
			
		||||
		return next
 | 
			
		||||
	}
 | 
			
		||||
	// Handle the case where an error or unknown was encountered before checking next.
 | 
			
		||||
	if IsUnknownOrError(prev) {
 | 
			
		||||
		return prev
 | 
			
		||||
	}
 | 
			
		||||
	// Otherwise, rely on the next value as the representative result.
 | 
			
		||||
	return next
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements the ref.Val interface method.
 | 
			
		||||
func (l *concatList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	combined := NewDynamicList(l.TypeAdapter, l.Value().([]interface{}))
 | 
			
		||||
	return combined.ConvertToNative(typeDesc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements the ref.Val interface method.
 | 
			
		||||
func (l *concatList) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	case ListType:
 | 
			
		||||
		return l
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return ListType
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements the ref.Val interface method.
 | 
			
		||||
func (l *concatList) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	otherList, ok := other.(traits.Lister)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	if l.Size() != otherList.Size() {
 | 
			
		||||
		return False
 | 
			
		||||
	}
 | 
			
		||||
	var maybeErr ref.Val
 | 
			
		||||
	for i := IntZero; i < l.Size().(Int); i++ {
 | 
			
		||||
		thisElem := l.Get(i)
 | 
			
		||||
		otherElem := otherList.Get(i)
 | 
			
		||||
		elemEq := thisElem.Equal(otherElem)
 | 
			
		||||
		if elemEq == False {
 | 
			
		||||
			return False
 | 
			
		||||
		}
 | 
			
		||||
		if maybeErr == nil && IsUnknownOrError(elemEq) {
 | 
			
		||||
			maybeErr = elemEq
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if maybeErr != nil {
 | 
			
		||||
		return maybeErr
 | 
			
		||||
	}
 | 
			
		||||
	return True
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get implements the traits.Indexer interface method.
 | 
			
		||||
func (l *concatList) Get(index ref.Val) ref.Val {
 | 
			
		||||
	i, ok := index.(Int)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(index)
 | 
			
		||||
	}
 | 
			
		||||
	if i < l.prevList.Size().(Int) {
 | 
			
		||||
		return l.prevList.Get(i)
 | 
			
		||||
	}
 | 
			
		||||
	offset := i - l.prevList.Size().(Int)
 | 
			
		||||
	return l.nextList.Get(offset)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Iterator implements the traits.Iterable interface method.
 | 
			
		||||
func (l *concatList) Iterator() traits.Iterator {
 | 
			
		||||
	return newListIterator(l)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Size implements the traits.Sizer interface method.
 | 
			
		||||
func (l *concatList) Size() ref.Val {
 | 
			
		||||
	return l.prevList.Size().(Int).Add(l.nextList.Size())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements the ref.Val interface method.
 | 
			
		||||
func (l *concatList) Type() ref.Type {
 | 
			
		||||
	return ListType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements the ref.Val interface method.
 | 
			
		||||
func (l *concatList) Value() interface{} {
 | 
			
		||||
	if l.value == nil {
 | 
			
		||||
		merged := make([]interface{}, l.Size().(Int))
 | 
			
		||||
		prevLen := l.prevList.Size().(Int)
 | 
			
		||||
		for i := Int(0); i < prevLen; i++ {
 | 
			
		||||
			merged[i] = l.prevList.Get(i).Value()
 | 
			
		||||
		}
 | 
			
		||||
		nextLen := l.nextList.Size().(Int)
 | 
			
		||||
		for j := Int(0); j < nextLen; j++ {
 | 
			
		||||
			merged[prevLen+j] = l.nextList.Get(j).Value()
 | 
			
		||||
		}
 | 
			
		||||
		l.value = merged
 | 
			
		||||
	}
 | 
			
		||||
	return l.value
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newListIterator(listValue traits.Lister) traits.Iterator {
 | 
			
		||||
	return &listIterator{
 | 
			
		||||
		listValue: listValue,
 | 
			
		||||
		len:       listValue.Size().(Int),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type listIterator struct {
 | 
			
		||||
	*baseIterator
 | 
			
		||||
	listValue traits.Lister
 | 
			
		||||
	cursor    Int
 | 
			
		||||
	len       Int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HasNext implements the traits.Iterator interface method.
 | 
			
		||||
func (it *listIterator) HasNext() ref.Val {
 | 
			
		||||
	return Bool(it.cursor < it.len)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Next implements the traits.Iterator interface method.
 | 
			
		||||
func (it *listIterator) Next() ref.Val {
 | 
			
		||||
	if it.HasNext() == True {
 | 
			
		||||
		index := it.cursor
 | 
			
		||||
		it.cursor++
 | 
			
		||||
		return it.listValue.Get(index)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										809
									
								
								vendor/github.com/google/cel-go/common/types/map.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										809
									
								
								vendor/github.com/google/cel-go/common/types/map.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,809 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types/pb"
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"github.com/google/cel-go/common/types/traits"
 | 
			
		||||
	"github.com/stoewer/go-strcase"
 | 
			
		||||
	"google.golang.org/protobuf/proto"
 | 
			
		||||
	"google.golang.org/protobuf/reflect/protoreflect"
 | 
			
		||||
 | 
			
		||||
	anypb "google.golang.org/protobuf/types/known/anypb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs.
 | 
			
		||||
func NewDynamicMap(adapter ref.TypeAdapter, value interface{}) traits.Mapper {
 | 
			
		||||
	refValue := reflect.ValueOf(value)
 | 
			
		||||
	return &baseMap{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		mapAccessor: newReflectMapAccessor(adapter, refValue),
 | 
			
		||||
		value:       value,
 | 
			
		||||
		size:        refValue.Len(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewJSONStruct creates a traits.Mapper implementation backed by a JSON struct that has been
 | 
			
		||||
// encoded in protocol buffer form.
 | 
			
		||||
//
 | 
			
		||||
// The `adapter` argument provides type adaptation capabilities from proto to CEL.
 | 
			
		||||
func NewJSONStruct(adapter ref.TypeAdapter, value *structpb.Struct) traits.Mapper {
 | 
			
		||||
	fields := value.GetFields()
 | 
			
		||||
	return &baseMap{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		mapAccessor: newJSONStructAccessor(adapter, fields),
 | 
			
		||||
		value:       value,
 | 
			
		||||
		size:        len(fields),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values.
 | 
			
		||||
func NewRefValMap(adapter ref.TypeAdapter, value map[ref.Val]ref.Val) traits.Mapper {
 | 
			
		||||
	return &baseMap{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		mapAccessor: newRefValMapAccessor(value),
 | 
			
		||||
		value:       value,
 | 
			
		||||
		size:        len(value),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values.
 | 
			
		||||
func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]interface{}) traits.Mapper {
 | 
			
		||||
	return &baseMap{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		mapAccessor: newStringIfaceMapAccessor(adapter, value),
 | 
			
		||||
		value:       value,
 | 
			
		||||
		size:        len(value),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewStringStringMap returns a specialized traits.Mapper with string keys and values.
 | 
			
		||||
func NewStringStringMap(adapter ref.TypeAdapter, value map[string]string) traits.Mapper {
 | 
			
		||||
	return &baseMap{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		mapAccessor: newStringMapAccessor(value),
 | 
			
		||||
		value:       value,
 | 
			
		||||
		size:        len(value),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewProtoMap returns a specialized traits.Mapper for handling protobuf map values.
 | 
			
		||||
func NewProtoMap(adapter ref.TypeAdapter, value *pb.Map) traits.Mapper {
 | 
			
		||||
	return &protoMap{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		value:       value,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// MapType singleton.
 | 
			
		||||
	MapType = NewTypeValue("map",
 | 
			
		||||
		traits.ContainerType,
 | 
			
		||||
		traits.IndexerType,
 | 
			
		||||
		traits.IterableType,
 | 
			
		||||
		traits.SizerType)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// mapAccessor is a private interface for finding values within a map and iterating over the keys.
 | 
			
		||||
// This interface implements portions of the API surface area required by the traits.Mapper
 | 
			
		||||
// interface.
 | 
			
		||||
type mapAccessor interface {
 | 
			
		||||
	// Find returns a value, if one exists, for the inpput key.
 | 
			
		||||
	//
 | 
			
		||||
	// If the key is not found the function returns (nil, false).
 | 
			
		||||
	// If the input key is not valid for the map, or is Err or Unknown the function returns
 | 
			
		||||
	// (Unknown|Err, false).
 | 
			
		||||
	Find(ref.Val) (ref.Val, bool)
 | 
			
		||||
 | 
			
		||||
	// Iterator returns an Iterator over the map key set.
 | 
			
		||||
	Iterator() traits.Iterator
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// baseMap is a reflection based map implementation designed to handle a variety of map-like types.
 | 
			
		||||
//
 | 
			
		||||
// Since CEL is side-effect free, the base map represents an immutable object.
 | 
			
		||||
type baseMap struct {
 | 
			
		||||
	// TypeAdapter used to convert keys and values accessed within the map.
 | 
			
		||||
	ref.TypeAdapter
 | 
			
		||||
 | 
			
		||||
	// mapAccessor interface implementation used to find and iterate over map keys.
 | 
			
		||||
	mapAccessor
 | 
			
		||||
 | 
			
		||||
	// value is the native Go value upon which the map type operators.
 | 
			
		||||
	value interface{}
 | 
			
		||||
 | 
			
		||||
	// size is the number of entries in the map.
 | 
			
		||||
	size int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Contains implements the traits.Container interface method.
 | 
			
		||||
func (m *baseMap) Contains(index ref.Val) ref.Val {
 | 
			
		||||
	val, found := m.Find(index)
 | 
			
		||||
	// When the index is not found and val is non-nil, this is an error or unknown value.
 | 
			
		||||
	if !found && val != nil && IsUnknownOrError(val) {
 | 
			
		||||
		return val
 | 
			
		||||
	}
 | 
			
		||||
	return Bool(found)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements the ref.Val interface method.
 | 
			
		||||
func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	// If the map is already assignable to the desired type return it, e.g. interfaces and
 | 
			
		||||
	// maps with the same key value types.
 | 
			
		||||
	if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
 | 
			
		||||
		return m.value, nil
 | 
			
		||||
	}
 | 
			
		||||
	if reflect.TypeOf(m).AssignableTo(typeDesc) {
 | 
			
		||||
		return m, nil
 | 
			
		||||
	}
 | 
			
		||||
	switch typeDesc {
 | 
			
		||||
	case anyValueType:
 | 
			
		||||
		json, err := m.ConvertToNative(jsonStructType)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return anypb.New(json.(proto.Message))
 | 
			
		||||
	case jsonValueType, jsonStructType:
 | 
			
		||||
		jsonEntries, err :=
 | 
			
		||||
			m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		jsonMap := &structpb.Struct{Fields: jsonEntries.(map[string]*structpb.Value)}
 | 
			
		||||
		if typeDesc == jsonStructType {
 | 
			
		||||
			return jsonMap, nil
 | 
			
		||||
		}
 | 
			
		||||
		return structpb.NewStructValue(jsonMap), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Unwrap pointers, but track their use.
 | 
			
		||||
	isPtr := false
 | 
			
		||||
	if typeDesc.Kind() == reflect.Ptr {
 | 
			
		||||
		tk := typeDesc
 | 
			
		||||
		typeDesc = typeDesc.Elem()
 | 
			
		||||
		if typeDesc.Kind() == reflect.Ptr {
 | 
			
		||||
			return nil, fmt.Errorf("unsupported type conversion to '%v'", tk)
 | 
			
		||||
		}
 | 
			
		||||
		isPtr = true
 | 
			
		||||
	}
 | 
			
		||||
	switch typeDesc.Kind() {
 | 
			
		||||
	// Map conversion.
 | 
			
		||||
	case reflect.Map:
 | 
			
		||||
		otherKey := typeDesc.Key()
 | 
			
		||||
		otherElem := typeDesc.Elem()
 | 
			
		||||
		nativeMap := reflect.MakeMapWithSize(typeDesc, m.size)
 | 
			
		||||
		it := m.Iterator()
 | 
			
		||||
		for it.HasNext() == True {
 | 
			
		||||
			key := it.Next()
 | 
			
		||||
			refKeyValue, err := key.ConvertToNative(otherKey)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
			refElemValue, err := m.Get(key).ConvertToNative(otherElem)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
			nativeMap.SetMapIndex(reflect.ValueOf(refKeyValue), reflect.ValueOf(refElemValue))
 | 
			
		||||
		}
 | 
			
		||||
		return nativeMap.Interface(), nil
 | 
			
		||||
	case reflect.Struct:
 | 
			
		||||
		nativeStructPtr := reflect.New(typeDesc)
 | 
			
		||||
		nativeStruct := nativeStructPtr.Elem()
 | 
			
		||||
		it := m.Iterator()
 | 
			
		||||
		for it.HasNext() == True {
 | 
			
		||||
			key := it.Next()
 | 
			
		||||
			// Ensure the field name being referenced is exported.
 | 
			
		||||
			// Only exported (public) field names can be set by reflection, where the name
 | 
			
		||||
			// must be at least one character in length and start with an upper-case letter.
 | 
			
		||||
			fieldName := key.ConvertToType(StringType)
 | 
			
		||||
			if IsError(fieldName) {
 | 
			
		||||
				return nil, fieldName.(*Err)
 | 
			
		||||
			}
 | 
			
		||||
			name := string(fieldName.(String))
 | 
			
		||||
			name = strcase.UpperCamelCase(name)
 | 
			
		||||
			fieldRef := nativeStruct.FieldByName(name)
 | 
			
		||||
			if !fieldRef.IsValid() {
 | 
			
		||||
				return nil, fmt.Errorf("type conversion error, no such field '%s' in type '%v'", name, typeDesc)
 | 
			
		||||
			}
 | 
			
		||||
			fieldValue, err := m.Get(key).ConvertToNative(fieldRef.Type())
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
			fieldRef.Set(reflect.ValueOf(fieldValue))
 | 
			
		||||
		}
 | 
			
		||||
		if isPtr {
 | 
			
		||||
			return nativeStructPtr.Interface(), nil
 | 
			
		||||
		}
 | 
			
		||||
		return nativeStruct.Interface(), nil
 | 
			
		||||
	}
 | 
			
		||||
	return nil, fmt.Errorf("type conversion error from map to '%v'", typeDesc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements the ref.Val interface method.
 | 
			
		||||
func (m *baseMap) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	case MapType:
 | 
			
		||||
		return m
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return MapType
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements the ref.Val interface method.
 | 
			
		||||
func (m *baseMap) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	otherMap, ok := other.(traits.Mapper)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	if m.Size() != otherMap.Size() {
 | 
			
		||||
		return False
 | 
			
		||||
	}
 | 
			
		||||
	it := m.Iterator()
 | 
			
		||||
	var maybeErr ref.Val
 | 
			
		||||
	for it.HasNext() == True {
 | 
			
		||||
		key := it.Next()
 | 
			
		||||
		thisVal, _ := m.Find(key)
 | 
			
		||||
		otherVal, found := otherMap.Find(key)
 | 
			
		||||
		if !found {
 | 
			
		||||
			if otherVal == nil {
 | 
			
		||||
				return False
 | 
			
		||||
			}
 | 
			
		||||
			if maybeErr == nil {
 | 
			
		||||
				maybeErr = MaybeNoSuchOverloadErr(otherVal)
 | 
			
		||||
			}
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		valEq := thisVal.Equal(otherVal)
 | 
			
		||||
		if valEq == False {
 | 
			
		||||
			return False
 | 
			
		||||
		}
 | 
			
		||||
		if maybeErr == nil && IsUnknownOrError(valEq) {
 | 
			
		||||
			maybeErr = valEq
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if maybeErr != nil {
 | 
			
		||||
		return maybeErr
 | 
			
		||||
	}
 | 
			
		||||
	return True
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get implements the traits.Indexer interface method.
 | 
			
		||||
func (m *baseMap) Get(key ref.Val) ref.Val {
 | 
			
		||||
	v, found := m.Find(key)
 | 
			
		||||
	if !found {
 | 
			
		||||
		return ValOrErr(v, "no such key: %v", key)
 | 
			
		||||
	}
 | 
			
		||||
	return v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Size implements the traits.Sizer interface method.
 | 
			
		||||
func (m *baseMap) Size() ref.Val {
 | 
			
		||||
	return Int(m.size)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements the ref.Val interface method.
 | 
			
		||||
func (m *baseMap) Type() ref.Type {
 | 
			
		||||
	return MapType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements the ref.Val interface method.
 | 
			
		||||
func (m *baseMap) Value() interface{} {
 | 
			
		||||
	return m.value
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newJSONStructAccessor(adapter ref.TypeAdapter, st map[string]*structpb.Value) mapAccessor {
 | 
			
		||||
	return &jsonStructAccessor{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		st:          st,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type jsonStructAccessor struct {
 | 
			
		||||
	ref.TypeAdapter
 | 
			
		||||
	st map[string]*structpb.Value
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Find searches the json struct field map for the input key value and returns (value, true) if
 | 
			
		||||
// found.
 | 
			
		||||
//
 | 
			
		||||
// If the key is not found the function returns (nil, false).
 | 
			
		||||
// If the input key is not a String, or is an  Err or Unknown, the function returns
 | 
			
		||||
// (Unknown|Err, false).
 | 
			
		||||
func (a *jsonStructAccessor) Find(key ref.Val) (ref.Val, bool) {
 | 
			
		||||
	strKey, ok := key.(String)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return ValOrErr(key, "unsupported key type: %v", key.Type()), false
 | 
			
		||||
	}
 | 
			
		||||
	keyVal, found := a.st[string(strKey)]
 | 
			
		||||
	if !found {
 | 
			
		||||
		return nil, false
 | 
			
		||||
	}
 | 
			
		||||
	return a.NativeToValue(keyVal), true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Iterator creates a new traits.Iterator from the set of JSON struct field names.
 | 
			
		||||
func (a *jsonStructAccessor) Iterator() traits.Iterator {
 | 
			
		||||
	// Copy the keys to make their order stable.
 | 
			
		||||
	mapKeys := make([]string, len(a.st))
 | 
			
		||||
	i := 0
 | 
			
		||||
	for k := range a.st {
 | 
			
		||||
		mapKeys[i] = k
 | 
			
		||||
		i++
 | 
			
		||||
	}
 | 
			
		||||
	return &stringKeyIterator{
 | 
			
		||||
		mapKeys: mapKeys,
 | 
			
		||||
		len:     len(mapKeys),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newReflectMapAccessor(adapter ref.TypeAdapter, value reflect.Value) mapAccessor {
 | 
			
		||||
	keyType := value.Type().Key()
 | 
			
		||||
	return &reflectMapAccessor{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		refValue:    value,
 | 
			
		||||
		keyType:     keyType,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type reflectMapAccessor struct {
 | 
			
		||||
	ref.TypeAdapter
 | 
			
		||||
	refValue reflect.Value
 | 
			
		||||
	keyType  reflect.Type
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Find converts the input key to a native Golang type and then uses reflection to find the key,
 | 
			
		||||
// returning (value, true) if present.
 | 
			
		||||
//
 | 
			
		||||
// If the key is not found the function returns (nil, false).
 | 
			
		||||
// If the input key is not a String, or is an  Err or Unknown, the function returns
 | 
			
		||||
// (Unknown|Err, false).
 | 
			
		||||
func (a *reflectMapAccessor) Find(key ref.Val) (ref.Val, bool) {
 | 
			
		||||
	if IsUnknownOrError(key) {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(key), false
 | 
			
		||||
	}
 | 
			
		||||
	if a.refValue.Len() == 0 {
 | 
			
		||||
		return nil, false
 | 
			
		||||
	}
 | 
			
		||||
	k, err := key.ConvertToNative(a.keyType)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return NewErr("unsupported key type: %v", key.Type()), false
 | 
			
		||||
	}
 | 
			
		||||
	refKey := reflect.ValueOf(k)
 | 
			
		||||
	val := a.refValue.MapIndex(refKey)
 | 
			
		||||
	if val.IsValid() {
 | 
			
		||||
		return a.NativeToValue(val.Interface()), true
 | 
			
		||||
	}
 | 
			
		||||
	mapIt := a.refValue.MapRange()
 | 
			
		||||
	for mapIt.Next() {
 | 
			
		||||
		if refKey.Kind() == mapIt.Key().Kind() {
 | 
			
		||||
			return nil, false
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("unsupported key type: %v", key.Type()), false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Iterator creates a Golang reflection based traits.Iterator.
 | 
			
		||||
func (a *reflectMapAccessor) Iterator() traits.Iterator {
 | 
			
		||||
	return &mapIterator{
 | 
			
		||||
		TypeAdapter: a.TypeAdapter,
 | 
			
		||||
		mapKeys:     a.refValue.MapRange(),
 | 
			
		||||
		len:         a.refValue.Len(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor {
 | 
			
		||||
	return &refValMapAccessor{mapVal: mapVal}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type refValMapAccessor struct {
 | 
			
		||||
	mapVal map[ref.Val]ref.Val
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Find uses native map accesses to find the key, returning (value, true) if present.
 | 
			
		||||
//
 | 
			
		||||
// If the key is not found the function returns (nil, false).
 | 
			
		||||
// If the input key is an Err or Unknown, the function returns (Unknown|Err, false).
 | 
			
		||||
func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) {
 | 
			
		||||
	if IsUnknownOrError(key) {
 | 
			
		||||
		return key, false
 | 
			
		||||
	}
 | 
			
		||||
	if len(a.mapVal) == 0 {
 | 
			
		||||
		return nil, false
 | 
			
		||||
	}
 | 
			
		||||
	keyVal, found := a.mapVal[key]
 | 
			
		||||
	if found {
 | 
			
		||||
		return keyVal, true
 | 
			
		||||
	}
 | 
			
		||||
	for k := range a.mapVal {
 | 
			
		||||
		if k.Type().TypeName() == key.Type().TypeName() {
 | 
			
		||||
			return nil, false
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("unsupported key type: %v", key.Type()), found
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection.
 | 
			
		||||
func (a *refValMapAccessor) Iterator() traits.Iterator {
 | 
			
		||||
	return &mapIterator{
 | 
			
		||||
		TypeAdapter: DefaultTypeAdapter,
 | 
			
		||||
		mapKeys:     reflect.ValueOf(a.mapVal).MapRange(),
 | 
			
		||||
		len:         len(a.mapVal),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newStringMapAccessor(strMap map[string]string) mapAccessor {
 | 
			
		||||
	return &stringMapAccessor{mapVal: strMap}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type stringMapAccessor struct {
 | 
			
		||||
	mapVal map[string]string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Find uses native map accesses to find the key, returning (value, true) if present.
 | 
			
		||||
//
 | 
			
		||||
// If the key is not found the function returns (nil, false).
 | 
			
		||||
// If the input key is not a String, or is an Err or Unknown, the function returns
 | 
			
		||||
// (Unknown|Err, false).
 | 
			
		||||
func (a *stringMapAccessor) Find(key ref.Val) (ref.Val, bool) {
 | 
			
		||||
	strKey, ok := key.(String)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return ValOrErr(key, "unsupported key type: %v", key.Type()), false
 | 
			
		||||
	}
 | 
			
		||||
	keyVal, found := a.mapVal[string(strKey)]
 | 
			
		||||
	if !found {
 | 
			
		||||
		return nil, false
 | 
			
		||||
	}
 | 
			
		||||
	return String(keyVal), true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Iterator creates a new traits.Iterator from the string key set of the map.
 | 
			
		||||
func (a *stringMapAccessor) Iterator() traits.Iterator {
 | 
			
		||||
	// Copy the keys to make their order stable.
 | 
			
		||||
	mapKeys := make([]string, len(a.mapVal))
 | 
			
		||||
	i := 0
 | 
			
		||||
	for k := range a.mapVal {
 | 
			
		||||
		mapKeys[i] = k
 | 
			
		||||
		i++
 | 
			
		||||
	}
 | 
			
		||||
	return &stringKeyIterator{
 | 
			
		||||
		mapKeys: mapKeys,
 | 
			
		||||
		len:     len(mapKeys),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]interface{}) mapAccessor {
 | 
			
		||||
	return &stringIfaceMapAccessor{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		mapVal:      mapVal,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type stringIfaceMapAccessor struct {
 | 
			
		||||
	ref.TypeAdapter
 | 
			
		||||
	mapVal map[string]interface{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Find uses native map accesses to find the key, returning (value, true) if present.
 | 
			
		||||
//
 | 
			
		||||
// If the key is not found the function returns (nil, false).
 | 
			
		||||
// If the input key is not a String, or is an Err or Unknown, the function returns
 | 
			
		||||
// (Unknown|Err, false).
 | 
			
		||||
func (a *stringIfaceMapAccessor) Find(key ref.Val) (ref.Val, bool) {
 | 
			
		||||
	strKey, ok := key.(String)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return ValOrErr(key, "unsupported key type: %v", key.Type()), false
 | 
			
		||||
	}
 | 
			
		||||
	keyVal, found := a.mapVal[string(strKey)]
 | 
			
		||||
	if !found {
 | 
			
		||||
		return nil, false
 | 
			
		||||
	}
 | 
			
		||||
	return a.NativeToValue(keyVal), true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Iterator creates a new traits.Iterator from the string key set of the map.
 | 
			
		||||
func (a *stringIfaceMapAccessor) Iterator() traits.Iterator {
 | 
			
		||||
	// Copy the keys to make their order stable.
 | 
			
		||||
	mapKeys := make([]string, len(a.mapVal))
 | 
			
		||||
	i := 0
 | 
			
		||||
	for k := range a.mapVal {
 | 
			
		||||
		mapKeys[i] = k
 | 
			
		||||
		i++
 | 
			
		||||
	}
 | 
			
		||||
	return &stringKeyIterator{
 | 
			
		||||
		mapKeys: mapKeys,
 | 
			
		||||
		len:     len(mapKeys),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to
 | 
			
		||||
// accessing protoreflect.Map values.
 | 
			
		||||
type protoMap struct {
 | 
			
		||||
	ref.TypeAdapter
 | 
			
		||||
	value *pb.Map
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Contains returns whether the map contains the given key.
 | 
			
		||||
func (m *protoMap) Contains(key ref.Val) ref.Val {
 | 
			
		||||
	val, found := m.Find(key)
 | 
			
		||||
	// When the index is not found and val is non-nil, this is an error or unknown value.
 | 
			
		||||
	if !found && val != nil && IsUnknownOrError(val) {
 | 
			
		||||
		return val
 | 
			
		||||
	}
 | 
			
		||||
	return Bool(found)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements the ref.Val interface method.
 | 
			
		||||
//
 | 
			
		||||
// Note, assignment to Golang struct types is not yet supported.
 | 
			
		||||
func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	// If the map is already assignable to the desired type return it, e.g. interfaces and
 | 
			
		||||
	// maps with the same key value types.
 | 
			
		||||
	switch typeDesc {
 | 
			
		||||
	case anyValueType:
 | 
			
		||||
		json, err := m.ConvertToNative(jsonStructType)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return anypb.New(json.(proto.Message))
 | 
			
		||||
	case jsonValueType, jsonStructType:
 | 
			
		||||
		jsonEntries, err :=
 | 
			
		||||
			m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		jsonMap := &structpb.Struct{
 | 
			
		||||
			Fields: jsonEntries.(map[string]*structpb.Value)}
 | 
			
		||||
		if typeDesc == jsonStructType {
 | 
			
		||||
			return jsonMap, nil
 | 
			
		||||
		}
 | 
			
		||||
		return structpb.NewStructValue(jsonMap), nil
 | 
			
		||||
	}
 | 
			
		||||
	switch typeDesc.Kind() {
 | 
			
		||||
	case reflect.Struct, reflect.Ptr:
 | 
			
		||||
		if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
 | 
			
		||||
			return m.value, nil
 | 
			
		||||
		}
 | 
			
		||||
		if reflect.TypeOf(m).AssignableTo(typeDesc) {
 | 
			
		||||
			return m, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if typeDesc.Kind() != reflect.Map {
 | 
			
		||||
		return nil, fmt.Errorf("unsupported type conversion: %v to map", typeDesc)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	keyType := m.value.KeyType.ReflectType()
 | 
			
		||||
	valType := m.value.ValueType.ReflectType()
 | 
			
		||||
	otherKeyType := typeDesc.Key()
 | 
			
		||||
	otherValType := typeDesc.Elem()
 | 
			
		||||
	mapVal := reflect.MakeMapWithSize(typeDesc, m.value.Len())
 | 
			
		||||
	var err error
 | 
			
		||||
	m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
 | 
			
		||||
		ntvKey := key.Interface()
 | 
			
		||||
		ntvVal := val.Interface()
 | 
			
		||||
		switch ntvVal.(type) {
 | 
			
		||||
		case protoreflect.Message:
 | 
			
		||||
			ntvVal = ntvVal.(protoreflect.Message).Interface()
 | 
			
		||||
		}
 | 
			
		||||
		if keyType == otherKeyType && valType == otherValType {
 | 
			
		||||
			mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
		celKey := m.NativeToValue(ntvKey)
 | 
			
		||||
		celVal := m.NativeToValue(ntvVal)
 | 
			
		||||
		ntvKey, err = celKey.ConvertToNative(otherKeyType)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			// early terminate the range loop.
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
		ntvVal, err = celVal.ConvertToNative(otherValType)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			// early terminate the range loop.
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
		mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
 | 
			
		||||
		return true
 | 
			
		||||
	})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return mapVal.Interface(), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements the ref.Val interface method.
 | 
			
		||||
func (m *protoMap) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	case MapType:
 | 
			
		||||
		return m
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return MapType
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements the ref.Val interface method.
 | 
			
		||||
func (m *protoMap) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	otherMap, ok := other.(traits.Mapper)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	if m.value.Map.Len() != int(otherMap.Size().(Int)) {
 | 
			
		||||
		return False
 | 
			
		||||
	}
 | 
			
		||||
	var retVal ref.Val = True
 | 
			
		||||
	m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
 | 
			
		||||
		keyVal := m.NativeToValue(key.Interface())
 | 
			
		||||
		valVal := m.NativeToValue(val)
 | 
			
		||||
		otherVal, found := otherMap.Find(keyVal)
 | 
			
		||||
		if !found {
 | 
			
		||||
			if otherVal == nil {
 | 
			
		||||
				retVal = False
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
			retVal = MaybeNoSuchOverloadErr(otherVal)
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
		valEq := valVal.Equal(otherVal)
 | 
			
		||||
		if valEq != True {
 | 
			
		||||
			retVal = valEq
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
		return true
 | 
			
		||||
	})
 | 
			
		||||
	return retVal
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Find returns whether the protoreflect.Map contains the input key.
 | 
			
		||||
//
 | 
			
		||||
// If the key is not found the function returns (nil, false).
 | 
			
		||||
// If the input key is not a supported proto map key type, or is an Err or Unknown,
 | 
			
		||||
// the function returns
 | 
			
		||||
// (Unknown|Err, false).
 | 
			
		||||
func (m *protoMap) Find(key ref.Val) (ref.Val, bool) {
 | 
			
		||||
	if IsUnknownOrError(key) {
 | 
			
		||||
		return key, false
 | 
			
		||||
	}
 | 
			
		||||
	// Convert the input key to the expected protobuf key type.
 | 
			
		||||
	ntvKey, err := key.ConvertToNative(m.value.KeyType.ReflectType())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return NewErr("unsupported key type: %v", key.Type()), false
 | 
			
		||||
	}
 | 
			
		||||
	// Use protoreflection to get the key value.
 | 
			
		||||
	val := m.value.Get(protoreflect.ValueOf(ntvKey).MapKey())
 | 
			
		||||
	if !val.IsValid() {
 | 
			
		||||
		return nil, false
 | 
			
		||||
	}
 | 
			
		||||
	// Perform nominal type unwrapping from the input value.
 | 
			
		||||
	switch v := val.Interface().(type) {
 | 
			
		||||
	case protoreflect.List, protoreflect.Map:
 | 
			
		||||
		// Maps do not support list or map values
 | 
			
		||||
		return NewErr("unsupported map element type: (%T)%v", v, v), false
 | 
			
		||||
	default:
 | 
			
		||||
		return m.NativeToValue(v), true
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get implements the traits.Indexer interface method.
 | 
			
		||||
func (m *protoMap) Get(key ref.Val) ref.Val {
 | 
			
		||||
	v, found := m.Find(key)
 | 
			
		||||
	if !found {
 | 
			
		||||
		return ValOrErr(v, "no such key: %v", key)
 | 
			
		||||
	}
 | 
			
		||||
	return v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Iterator implements the traits.Iterable interface method.
 | 
			
		||||
func (m *protoMap) Iterator() traits.Iterator {
 | 
			
		||||
	// Copy the keys to make their order stable.
 | 
			
		||||
	mapKeys := make([]protoreflect.MapKey, 0, m.value.Len())
 | 
			
		||||
	m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
 | 
			
		||||
		mapKeys = append(mapKeys, k)
 | 
			
		||||
		return true
 | 
			
		||||
	})
 | 
			
		||||
	return &protoMapIterator{
 | 
			
		||||
		TypeAdapter: m.TypeAdapter,
 | 
			
		||||
		mapKeys:     mapKeys,
 | 
			
		||||
		len:         m.value.Len(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Size returns the number of entries in the protoreflect.Map.
 | 
			
		||||
func (m *protoMap) Size() ref.Val {
 | 
			
		||||
	return Int(m.value.Len())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements the ref.Val interface method.
 | 
			
		||||
func (m *protoMap) Type() ref.Type {
 | 
			
		||||
	return MapType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements the ref.Val interface method.
 | 
			
		||||
func (m *protoMap) Value() interface{} {
 | 
			
		||||
	return m.value
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type mapIterator struct {
 | 
			
		||||
	*baseIterator
 | 
			
		||||
	ref.TypeAdapter
 | 
			
		||||
	mapKeys *reflect.MapIter
 | 
			
		||||
	cursor  int
 | 
			
		||||
	len     int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HasNext implements the traits.Iterator interface method.
 | 
			
		||||
func (it *mapIterator) HasNext() ref.Val {
 | 
			
		||||
	return Bool(it.cursor < it.len)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Next implements the traits.Iterator interface method.
 | 
			
		||||
func (it *mapIterator) Next() ref.Val {
 | 
			
		||||
	if it.HasNext() == True && it.mapKeys.Next() {
 | 
			
		||||
		it.cursor++
 | 
			
		||||
		refKey := it.mapKeys.Key()
 | 
			
		||||
		return it.NativeToValue(refKey.Interface())
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type protoMapIterator struct {
 | 
			
		||||
	*baseIterator
 | 
			
		||||
	ref.TypeAdapter
 | 
			
		||||
	mapKeys []protoreflect.MapKey
 | 
			
		||||
	cursor  int
 | 
			
		||||
	len     int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HasNext implements the traits.Iterator interface method.
 | 
			
		||||
func (it *protoMapIterator) HasNext() ref.Val {
 | 
			
		||||
	return Bool(it.cursor < it.len)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Next implements the traits.Iterator interface method.
 | 
			
		||||
func (it *protoMapIterator) Next() ref.Val {
 | 
			
		||||
	if it.HasNext() == True {
 | 
			
		||||
		index := it.cursor
 | 
			
		||||
		it.cursor++
 | 
			
		||||
		refKey := it.mapKeys[index]
 | 
			
		||||
		return it.NativeToValue(refKey.Interface())
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type stringKeyIterator struct {
 | 
			
		||||
	*baseIterator
 | 
			
		||||
	mapKeys []string
 | 
			
		||||
	cursor  int
 | 
			
		||||
	len     int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HasNext implements the traits.Iterator interface method.
 | 
			
		||||
func (it *stringKeyIterator) HasNext() ref.Val {
 | 
			
		||||
	return Bool(it.cursor < it.len)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Next implements the traits.Iterator interface method.
 | 
			
		||||
func (it *stringKeyIterator) Next() ref.Val {
 | 
			
		||||
	if it.HasNext() == True {
 | 
			
		||||
		index := it.cursor
 | 
			
		||||
		it.cursor++
 | 
			
		||||
		return String(it.mapKeys[index])
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										100
									
								
								vendor/github.com/google/cel-go/common/types/null.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								vendor/github.com/google/cel-go/common/types/null.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,100 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"google.golang.org/protobuf/proto"
 | 
			
		||||
 | 
			
		||||
	anypb "google.golang.org/protobuf/types/known/anypb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Null type implementation.
 | 
			
		||||
type Null structpb.NullValue
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// NullType singleton.
 | 
			
		||||
	NullType = NewTypeValue("null_type")
 | 
			
		||||
	// NullValue singleton.
 | 
			
		||||
	NullValue = Null(structpb.NullValue_NULL_VALUE)
 | 
			
		||||
 | 
			
		||||
	jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// ConvertToNative implements ref.Val.ConvertToNative.
 | 
			
		||||
func (n Null) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	switch typeDesc.Kind() {
 | 
			
		||||
	case reflect.Int32:
 | 
			
		||||
		return reflect.ValueOf(n).Convert(typeDesc).Interface(), nil
 | 
			
		||||
	case reflect.Ptr:
 | 
			
		||||
		switch typeDesc {
 | 
			
		||||
		case anyValueType:
 | 
			
		||||
			// Convert to a JSON-null before packing to an Any field since the enum value for JSON
 | 
			
		||||
			// null cannot be packed directly.
 | 
			
		||||
			pb, err := n.ConvertToNative(jsonValueType)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
			return anypb.New(pb.(proto.Message))
 | 
			
		||||
		case jsonValueType:
 | 
			
		||||
			return structpb.NewNullValue(), nil
 | 
			
		||||
		}
 | 
			
		||||
	case reflect.Interface:
 | 
			
		||||
		nv := n.Value()
 | 
			
		||||
		if reflect.TypeOf(nv).Implements(typeDesc) {
 | 
			
		||||
			return nv, nil
 | 
			
		||||
		}
 | 
			
		||||
		if reflect.TypeOf(n).Implements(typeDesc) {
 | 
			
		||||
			return n, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// If the type conversion isn't supported return an error.
 | 
			
		||||
	return nil, fmt.Errorf("type conversion error from '%v' to '%v'", NullType, typeDesc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertToType implements ref.Val.ConvertToType.
 | 
			
		||||
func (n Null) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	case StringType:
 | 
			
		||||
		return String("null")
 | 
			
		||||
	case NullType:
 | 
			
		||||
		return n
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return NullType
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%s' to '%s'", NullType, typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Equal implements ref.Val.Equal.
 | 
			
		||||
func (n Null) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	if NullType != other.Type() {
 | 
			
		||||
		return ValOrErr(other, "no such overload")
 | 
			
		||||
	}
 | 
			
		||||
	return True
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Type implements ref.Val.Type.
 | 
			
		||||
func (n Null) Type() ref.Type {
 | 
			
		||||
	return NullType
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value implements ref.Val.Value.
 | 
			
		||||
func (n Null) Value() interface{} {
 | 
			
		||||
	return structpb.NullValue_NULL_VALUE
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										158
									
								
								vendor/github.com/google/cel-go/common/types/object.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										158
									
								
								vendor/github.com/google/cel-go/common/types/object.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,158 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
 | 
			
		||||
	"github.com/google/cel-go/common/types/pb"
 | 
			
		||||
	"github.com/google/cel-go/common/types/ref"
 | 
			
		||||
	"google.golang.org/protobuf/encoding/protojson"
 | 
			
		||||
	"google.golang.org/protobuf/proto"
 | 
			
		||||
 | 
			
		||||
	anypb "google.golang.org/protobuf/types/known/anypb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type protoObj struct {
 | 
			
		||||
	ref.TypeAdapter
 | 
			
		||||
	value     proto.Message
 | 
			
		||||
	typeDesc  *pb.TypeDescription
 | 
			
		||||
	typeValue *TypeValue
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewObject returns an object based on a proto.Message value which handles
 | 
			
		||||
// conversion between protobuf type values and expression type values.
 | 
			
		||||
// Objects support indexing and iteration.
 | 
			
		||||
//
 | 
			
		||||
// Note: the type value is pulled from the list of registered types within the
 | 
			
		||||
// type provider. If the proto type is not registered within the type provider,
 | 
			
		||||
// then this will result in an error within the type adapter / provider.
 | 
			
		||||
func NewObject(adapter ref.TypeAdapter,
 | 
			
		||||
	typeDesc *pb.TypeDescription,
 | 
			
		||||
	typeValue *TypeValue,
 | 
			
		||||
	value proto.Message) ref.Val {
 | 
			
		||||
	return &protoObj{
 | 
			
		||||
		TypeAdapter: adapter,
 | 
			
		||||
		value:       value,
 | 
			
		||||
		typeDesc:    typeDesc,
 | 
			
		||||
		typeValue:   typeValue}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
 | 
			
		||||
	pb := o.value
 | 
			
		||||
	if reflect.TypeOf(pb).AssignableTo(typeDesc) {
 | 
			
		||||
		return pb, nil
 | 
			
		||||
	}
 | 
			
		||||
	if reflect.TypeOf(o).AssignableTo(typeDesc) {
 | 
			
		||||
		return o, nil
 | 
			
		||||
	}
 | 
			
		||||
	switch typeDesc {
 | 
			
		||||
	case anyValueType:
 | 
			
		||||
		_, isAny := pb.(*anypb.Any)
 | 
			
		||||
		if isAny {
 | 
			
		||||
			return pb, nil
 | 
			
		||||
		}
 | 
			
		||||
		return anypb.New(pb)
 | 
			
		||||
	case jsonValueType:
 | 
			
		||||
		// Marshal the proto to JSON first, and then rehydrate as protobuf.Value as there is no
 | 
			
		||||
		// support for direct conversion from proto.Message to protobuf.Value.
 | 
			
		||||
		bytes, err := protojson.Marshal(pb)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		json := &structpb.Value{}
 | 
			
		||||
		err = protojson.Unmarshal(bytes, json)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return json, nil
 | 
			
		||||
	default:
 | 
			
		||||
		if typeDesc == o.typeDesc.ReflectType() {
 | 
			
		||||
			return o.value, nil
 | 
			
		||||
		}
 | 
			
		||||
		if typeDesc.Kind() == reflect.Ptr {
 | 
			
		||||
			val := reflect.New(typeDesc.Elem()).Interface()
 | 
			
		||||
			dstPB, ok := val.(proto.Message)
 | 
			
		||||
			if ok {
 | 
			
		||||
				proto.Merge(dstPB, pb)
 | 
			
		||||
				return dstPB, nil
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil, fmt.Errorf("type conversion error from '%T' to '%v'", o.value, typeDesc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *protoObj) ConvertToType(typeVal ref.Type) ref.Val {
 | 
			
		||||
	switch typeVal {
 | 
			
		||||
	default:
 | 
			
		||||
		if o.Type().TypeName() == typeVal.TypeName() {
 | 
			
		||||
			return o
 | 
			
		||||
		}
 | 
			
		||||
	case TypeType:
 | 
			
		||||
		return o.typeValue
 | 
			
		||||
	}
 | 
			
		||||
	return NewErr("type conversion error from '%s' to '%s'", o.typeDesc.Name(), typeVal)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *protoObj) Equal(other ref.Val) ref.Val {
 | 
			
		||||
	if o.typeDesc.Name() != other.Type().TypeName() {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(other)
 | 
			
		||||
	}
 | 
			
		||||
	return Bool(proto.Equal(o.value, other.Value().(proto.Message)))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsSet tests whether a field which is defined is set to a non-default value.
 | 
			
		||||
func (o *protoObj) IsSet(field ref.Val) ref.Val {
 | 
			
		||||
	protoFieldName, ok := field.(String)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(field)
 | 
			
		||||
	}
 | 
			
		||||
	protoFieldStr := string(protoFieldName)
 | 
			
		||||
	fd, found := o.typeDesc.FieldByName(protoFieldStr)
 | 
			
		||||
	if !found {
 | 
			
		||||
		return NewErr("no such field '%s'", field)
 | 
			
		||||
	}
 | 
			
		||||
	if fd.IsSet(o.value) {
 | 
			
		||||
		return True
 | 
			
		||||
	}
 | 
			
		||||
	return False
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *protoObj) Get(index ref.Val) ref.Val {
 | 
			
		||||
	protoFieldName, ok := index.(String)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return MaybeNoSuchOverloadErr(index)
 | 
			
		||||
	}
 | 
			
		||||
	protoFieldStr := string(protoFieldName)
 | 
			
		||||
	fd, found := o.typeDesc.FieldByName(protoFieldStr)
 | 
			
		||||
	if !found {
 | 
			
		||||
		return NewErr("no such field '%s'", index)
 | 
			
		||||
	}
 | 
			
		||||
	fv, err := fd.GetFrom(o.value)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return NewErr(err.Error())
 | 
			
		||||
	}
 | 
			
		||||
	return o.NativeToValue(fv)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *protoObj) Type() ref.Type {
 | 
			
		||||
	return o.typeValue
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *protoObj) Value() interface{} {
 | 
			
		||||
	return o.value
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										357
									
								
								vendor/github.com/google/cel-go/common/types/overflow.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										357
									
								
								vendor/github.com/google/cel-go/common/types/overflow.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,357 @@
 | 
			
		||||
// Copyright 2021 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package types
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"math"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	doubleTwoTo64 = math.Ldexp(1.0, 64)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// addInt64Checked performs addition with overflow detection of two int64 values.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails the error return value will be non-nil.
 | 
			
		||||
func addInt64Checked(x, y int64) (int64, error) {
 | 
			
		||||
	if (y > 0 && x > math.MaxInt64-y) || (y < 0 && x < math.MinInt64-y) {
 | 
			
		||||
		return 0, errIntOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return x + y, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// subtractInt64Checked performs subtraction with overflow detection of two int64 values.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails the error return value will be non-nil.
 | 
			
		||||
func subtractInt64Checked(x, y int64) (int64, error) {
 | 
			
		||||
	if (y < 0 && x > math.MaxInt64+y) || (y > 0 && x < math.MinInt64+y) {
 | 
			
		||||
		return 0, errIntOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return x - y, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// negateInt64Checked performs negation with overflow detection of an int64.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails the error return value will be non-nil.
 | 
			
		||||
func negateInt64Checked(x int64) (int64, error) {
 | 
			
		||||
	// In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
 | 
			
		||||
	if x == math.MinInt64 {
 | 
			
		||||
		return 0, errIntOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return -x, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// multiplyInt64Checked performs multiplication with overflow detection of two int64 value.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails the error return value will be non-nil.
 | 
			
		||||
func multiplyInt64Checked(x, y int64) (int64, error) {
 | 
			
		||||
	// Detecting multiplication overflow is more complicated than the others. The first two detect
 | 
			
		||||
	// attempting to negate MinInt64, which would result in MaxInt64+1. The other four detect normal
 | 
			
		||||
	// overflow conditions.
 | 
			
		||||
	if (x == -1 && y == math.MinInt64) || (y == -1 && x == math.MinInt64) ||
 | 
			
		||||
		// x is positive, y is positive
 | 
			
		||||
		(x > 0 && y > 0 && x > math.MaxInt64/y) ||
 | 
			
		||||
		// x is positive, y is negative
 | 
			
		||||
		(x > 0 && y < 0 && y < math.MinInt64/x) ||
 | 
			
		||||
		// x is negative, y is positive
 | 
			
		||||
		(x < 0 && y > 0 && x < math.MinInt64/y) ||
 | 
			
		||||
		// x is negative, y is negative
 | 
			
		||||
		(x < 0 && y < 0 && y < math.MaxInt64/x) {
 | 
			
		||||
		return 0, errIntOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return x * y, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// divideInt64Checked performs division with overflow detection of two int64 values,
 | 
			
		||||
// as well as a division by zero check.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails the error return value will be non-nil.
 | 
			
		||||
func divideInt64Checked(x, y int64) (int64, error) {
 | 
			
		||||
	// Division by zero.
 | 
			
		||||
	if y == 0 {
 | 
			
		||||
		return 0, errDivideByZero
 | 
			
		||||
	}
 | 
			
		||||
	// In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
 | 
			
		||||
	if x == math.MinInt64 && y == -1 {
 | 
			
		||||
		return 0, errIntOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return x / y, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// moduloInt64Checked performs modulo with overflow detection of two int64 values
 | 
			
		||||
// as well as a modulus by zero check.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails the error return value will be non-nil.
 | 
			
		||||
func moduloInt64Checked(x, y int64) (int64, error) {
 | 
			
		||||
	// Modulus by zero.
 | 
			
		||||
	if y == 0 {
 | 
			
		||||
		return 0, errModulusByZero
 | 
			
		||||
	}
 | 
			
		||||
	// In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
 | 
			
		||||
	if x == math.MinInt64 && y == -1 {
 | 
			
		||||
		return 0, errIntOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return x % y, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// addUint64Checked performs addition with overflow detection of two uint64 values.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func addUint64Checked(x, y uint64) (uint64, error) {
 | 
			
		||||
	if y > 0 && x > math.MaxUint64-y {
 | 
			
		||||
		return 0, errUintOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return x + y, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// subtractUint64Checked performs subtraction with overflow detection of two uint64 values.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func subtractUint64Checked(x, y uint64) (uint64, error) {
 | 
			
		||||
	if y > x {
 | 
			
		||||
		return 0, errUintOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return x - y, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// multiplyUint64Checked performs multiplication with overflow detection of two uint64 values.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func multiplyUint64Checked(x, y uint64) (uint64, error) {
 | 
			
		||||
	if y != 0 && x > math.MaxUint64/y {
 | 
			
		||||
		return 0, errUintOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return x * y, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// divideUint64Checked performs division with a test for division by zero.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails the error return value will be non-nil.
 | 
			
		||||
func divideUint64Checked(x, y uint64) (uint64, error) {
 | 
			
		||||
	if y == 0 {
 | 
			
		||||
		return 0, errDivideByZero
 | 
			
		||||
	}
 | 
			
		||||
	return x / y, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// moduloUint64Checked performs modulo with a test for modulus by zero.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails the error return value will be non-nil.
 | 
			
		||||
func moduloUint64Checked(x, y uint64) (uint64, error) {
 | 
			
		||||
	if y == 0 {
 | 
			
		||||
		return 0, errModulusByZero
 | 
			
		||||
	}
 | 
			
		||||
	return x % y, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// addDurationChecked performs addition with overflow detection of two time.Durations.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func addDurationChecked(x, y time.Duration) (time.Duration, error) {
 | 
			
		||||
	val, err := addInt64Checked(int64(x), int64(y))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return time.Duration(0), err
 | 
			
		||||
	}
 | 
			
		||||
	return time.Duration(val), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// subtractDurationChecked performs subtraction with overflow detection of two time.Durations.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func subtractDurationChecked(x, y time.Duration) (time.Duration, error) {
 | 
			
		||||
	val, err := subtractInt64Checked(int64(x), int64(y))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return time.Duration(0), err
 | 
			
		||||
	}
 | 
			
		||||
	return time.Duration(val), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// negateDurationChecked performs negation with overflow detection of a time.Duration.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func negateDurationChecked(x time.Duration) (time.Duration, error) {
 | 
			
		||||
	val, err := negateInt64Checked(int64(x))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return time.Duration(0), err
 | 
			
		||||
	}
 | 
			
		||||
	return time.Duration(val), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// addDurationChecked performs addition with overflow detection of a time.Time and time.Duration.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func addTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) {
 | 
			
		||||
	// This is tricky. A time is represented as (int64, int32) where the first is seconds and second
 | 
			
		||||
	// is nanoseconds. A duration is int64 representing nanoseconds. We cannot normalize time to int64
 | 
			
		||||
	// as it could potentially overflow. The only way to proceed is to break time and duration into
 | 
			
		||||
	// second and nanosecond components.
 | 
			
		||||
 | 
			
		||||
	// First we break time into its components by truncating and subtracting.
 | 
			
		||||
	sec1 := x.Truncate(time.Second).Unix()                // Truncate to seconds.
 | 
			
		||||
	nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
 | 
			
		||||
 | 
			
		||||
	// Second we break duration into its components by dividing and modulo.
 | 
			
		||||
	sec2 := int64(y) / int64(time.Second)  // Truncate to seconds.
 | 
			
		||||
	nsec2 := int64(y) % int64(time.Second) // Get remainder.
 | 
			
		||||
 | 
			
		||||
	// Add seconds first, detecting any overflow.
 | 
			
		||||
	sec, err := addInt64Checked(sec1, sec2)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return time.Time{}, err
 | 
			
		||||
	}
 | 
			
		||||
	// Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999].
 | 
			
		||||
	nsec := nsec1 + nsec2
 | 
			
		||||
 | 
			
		||||
	// We need to normalize nanoseconds to be positive and carry extra nanoseconds to seconds.
 | 
			
		||||
	// Adapted from time.Unix(int64, int64).
 | 
			
		||||
	if nsec < 0 || nsec >= int64(time.Second) {
 | 
			
		||||
		// Add seconds.
 | 
			
		||||
		sec, err = addInt64Checked(sec, nsec/int64(time.Second))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return time.Time{}, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		nsec -= (nsec / int64(time.Second)) * int64(time.Second)
 | 
			
		||||
		if nsec < 0 {
 | 
			
		||||
			// Subtract an extra second
 | 
			
		||||
			sec, err = addInt64Checked(sec, -1)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return time.Time{}, err
 | 
			
		||||
			}
 | 
			
		||||
			nsec += int64(time.Second)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Check if the the number of seconds from Unix epoch is within our acceptable range.
 | 
			
		||||
	if sec < minUnixTime || sec > maxUnixTime {
 | 
			
		||||
		return time.Time{}, errTimestampOverflow
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Return resulting time and propagate time zone.
 | 
			
		||||
	return time.Unix(sec, nsec).In(x.Location()), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// subtractTimeChecked performs subtraction with overflow detection of two time.Time.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func subtractTimeChecked(x, y time.Time) (time.Duration, error) {
 | 
			
		||||
	// Similar to addTimeDurationOverflow() above.
 | 
			
		||||
 | 
			
		||||
	// First we break time into its components by truncating and subtracting.
 | 
			
		||||
	sec1 := x.Truncate(time.Second).Unix()                // Truncate to seconds.
 | 
			
		||||
	nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
 | 
			
		||||
 | 
			
		||||
	// Second we break duration into its components by truncating and subtracting.
 | 
			
		||||
	sec2 := y.Truncate(time.Second).Unix()                // Truncate to seconds.
 | 
			
		||||
	nsec2 := y.Sub(y.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
 | 
			
		||||
 | 
			
		||||
	// Subtract seconds first, detecting any overflow.
 | 
			
		||||
	sec, err := subtractInt64Checked(sec1, sec2)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return time.Duration(0), err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999].
 | 
			
		||||
	nsec := nsec1 - nsec2
 | 
			
		||||
 | 
			
		||||
	// Scale seconds to nanoseconds detecting overflow.
 | 
			
		||||
	tsec, err := multiplyInt64Checked(sec, int64(time.Second))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return time.Duration(0), err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Lastly we need to add the two nanoseconds together.
 | 
			
		||||
	val, err := addInt64Checked(tsec, nsec)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return time.Duration(0), err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return time.Duration(val), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// subtractTimeDurationChecked performs subtraction with overflow detection of a time.Time and
 | 
			
		||||
// time.Duration.
 | 
			
		||||
//
 | 
			
		||||
// If the operation fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func subtractTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) {
 | 
			
		||||
	// The easiest way to implement this is to negate y and add them.
 | 
			
		||||
	// x - y = x + -y
 | 
			
		||||
	val, err := negateDurationChecked(y)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return time.Time{}, err
 | 
			
		||||
	}
 | 
			
		||||
	return addTimeDurationChecked(x, val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// doubleToInt64Checked converts a double to an int64 value.
 | 
			
		||||
//
 | 
			
		||||
// If the conversion fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func doubleToInt64Checked(v float64) (int64, error) {
 | 
			
		||||
	if math.IsInf(v, 0) || math.IsNaN(v) || v <= float64(math.MinInt64) || v >= float64(math.MaxInt64) {
 | 
			
		||||
		return 0, errIntOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return int64(v), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// doubleToInt64Checked converts a double to a uint64 value.
 | 
			
		||||
//
 | 
			
		||||
// If the conversion fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func doubleToUint64Checked(v float64) (uint64, error) {
 | 
			
		||||
	if math.IsInf(v, 0) || math.IsNaN(v) || v < 0 || v >= doubleTwoTo64 {
 | 
			
		||||
		return 0, errUintOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return uint64(v), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// int64toUint64Checked converts an int64 to a uint64 value.
 | 
			
		||||
//
 | 
			
		||||
// If the conversion fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func int64ToUint64Checked(v int64) (uint64, error) {
 | 
			
		||||
	if v < 0 {
 | 
			
		||||
		return 0, errUintOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return uint64(v), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// int64toInt32Checked converts an int64 to an int32 value.
 | 
			
		||||
//
 | 
			
		||||
// If the conversion fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func int64ToInt32Checked(v int64) (int32, error) {
 | 
			
		||||
	if v < math.MinInt32 || v > math.MaxInt32 {
 | 
			
		||||
		return 0, errIntOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return int32(v), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// uint64toUint32Checked converts a uint64 to a uint32 value.
 | 
			
		||||
//
 | 
			
		||||
// If the conversion fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func uint64ToUint32Checked(v uint64) (uint32, error) {
 | 
			
		||||
	if v > math.MaxUint32 {
 | 
			
		||||
		return 0, errUintOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return uint32(v), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// uint64toInt64Checked converts a uint64 to an int64 value.
 | 
			
		||||
//
 | 
			
		||||
// If the conversion fails due to overflow the error return value will be non-nil.
 | 
			
		||||
func uint64ToInt64Checked(v uint64) (int64, error) {
 | 
			
		||||
	if v > math.MaxInt64 {
 | 
			
		||||
		return 0, errIntOverflow
 | 
			
		||||
	}
 | 
			
		||||
	return int64(v), nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										50
									
								
								vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										50
									
								
								vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,50 @@
 | 
			
		||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
 | 
			
		||||
 | 
			
		||||
package(
 | 
			
		||||
    default_visibility = ["//visibility:public"],
 | 
			
		||||
    licenses = ["notice"],  # Apache 2.0
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_library(
 | 
			
		||||
    name = "go_default_library",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "checked.go",
 | 
			
		||||
        "enum.go",
 | 
			
		||||
        "file.go",
 | 
			
		||||
        "pb.go",
 | 
			
		||||
        "type.go",
 | 
			
		||||
    ],
 | 
			
		||||
    importpath = "github.com/google/cel-go/common/types/pb",
 | 
			
		||||
    deps = [
 | 
			
		||||
        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//proto:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/anypb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/structpb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
go_test(
 | 
			
		||||
    name = "go_default_test",
 | 
			
		||||
    size = "small",
 | 
			
		||||
    srcs = [
 | 
			
		||||
        "file_test.go",
 | 
			
		||||
        "pb_test.go",
 | 
			
		||||
        "type_test.go",
 | 
			
		||||
    ],
 | 
			
		||||
    embed = [":go_default_library"],
 | 
			
		||||
    deps = [
 | 
			
		||||
        "//checker/decls:go_default_library",
 | 
			
		||||
        "//test/proto2pb:test_all_types_go_proto",
 | 
			
		||||
        "//test/proto3pb:test_all_types_go_proto",
 | 
			
		||||
        "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
 | 
			
		||||
        "@org_golang_google_protobuf//types/descriptorpb:go_default_library",
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										93
									
								
								vendor/github.com/google/cel-go/common/types/pb/checked.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										93
									
								
								vendor/github.com/google/cel-go/common/types/pb/checked.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,93 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package pb
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"google.golang.org/protobuf/reflect/protoreflect"
 | 
			
		||||
 | 
			
		||||
	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
 | 
			
		||||
	emptypb "google.golang.org/protobuf/types/known/emptypb"
 | 
			
		||||
	structpb "google.golang.org/protobuf/types/known/structpb"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// CheckedPrimitives map from proto field descriptor type to expr.Type.
 | 
			
		||||
	CheckedPrimitives = map[protoreflect.Kind]*exprpb.Type{
 | 
			
		||||
		protoreflect.BoolKind:     checkedBool,
 | 
			
		||||
		protoreflect.BytesKind:    checkedBytes,
 | 
			
		||||
		protoreflect.DoubleKind:   checkedDouble,
 | 
			
		||||
		protoreflect.FloatKind:    checkedDouble,
 | 
			
		||||
		protoreflect.Int32Kind:    checkedInt,
 | 
			
		||||
		protoreflect.Int64Kind:    checkedInt,
 | 
			
		||||
		protoreflect.Sint32Kind:   checkedInt,
 | 
			
		||||
		protoreflect.Sint64Kind:   checkedInt,
 | 
			
		||||
		protoreflect.Uint32Kind:   checkedUint,
 | 
			
		||||
		protoreflect.Uint64Kind:   checkedUint,
 | 
			
		||||
		protoreflect.Fixed32Kind:  checkedUint,
 | 
			
		||||
		protoreflect.Fixed64Kind:  checkedUint,
 | 
			
		||||
		protoreflect.Sfixed32Kind: checkedInt,
 | 
			
		||||
		protoreflect.Sfixed64Kind: checkedInt,
 | 
			
		||||
		protoreflect.StringKind:   checkedString}
 | 
			
		||||
 | 
			
		||||
	// CheckedWellKnowns map from qualified proto type name to expr.Type for
 | 
			
		||||
	// well-known proto types.
 | 
			
		||||
	CheckedWellKnowns = map[string]*exprpb.Type{
 | 
			
		||||
		// Wrapper types.
 | 
			
		||||
		"google.protobuf.BoolValue":   checkedWrap(checkedBool),
 | 
			
		||||
		"google.protobuf.BytesValue":  checkedWrap(checkedBytes),
 | 
			
		||||
		"google.protobuf.DoubleValue": checkedWrap(checkedDouble),
 | 
			
		||||
		"google.protobuf.FloatValue":  checkedWrap(checkedDouble),
 | 
			
		||||
		"google.protobuf.Int64Value":  checkedWrap(checkedInt),
 | 
			
		||||
		"google.protobuf.Int32Value":  checkedWrap(checkedInt),
 | 
			
		||||
		"google.protobuf.UInt64Value": checkedWrap(checkedUint),
 | 
			
		||||
		"google.protobuf.UInt32Value": checkedWrap(checkedUint),
 | 
			
		||||
		"google.protobuf.StringValue": checkedWrap(checkedString),
 | 
			
		||||
		// Well-known types.
 | 
			
		||||
		"google.protobuf.Any":       checkedAny,
 | 
			
		||||
		"google.protobuf.Duration":  checkedDuration,
 | 
			
		||||
		"google.protobuf.Timestamp": checkedTimestamp,
 | 
			
		||||
		// Json types.
 | 
			
		||||
		"google.protobuf.ListValue": checkedListDyn,
 | 
			
		||||
		"google.protobuf.NullValue": checkedNull,
 | 
			
		||||
		"google.protobuf.Struct":    checkedMapStringDyn,
 | 
			
		||||
		"google.protobuf.Value":     checkedDyn,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// common types
 | 
			
		||||
	checkedDyn = &exprpb.Type{TypeKind: &exprpb.Type_Dyn{Dyn: &emptypb.Empty{}}}
 | 
			
		||||
	// Wrapper and primitive types.
 | 
			
		||||
	checkedBool   = checkedPrimitive(exprpb.Type_BOOL)
 | 
			
		||||
	checkedBytes  = checkedPrimitive(exprpb.Type_BYTES)
 | 
			
		||||
	checkedDouble = checkedPrimitive(exprpb.Type_DOUBLE)
 | 
			
		||||
	checkedInt    = checkedPrimitive(exprpb.Type_INT64)
 | 
			
		||||
	checkedString = checkedPrimitive(exprpb.Type_STRING)
 | 
			
		||||
	checkedUint   = checkedPrimitive(exprpb.Type_UINT64)
 | 
			
		||||
	// Well-known type equivalents.
 | 
			
		||||
	checkedAny       = checkedWellKnown(exprpb.Type_ANY)
 | 
			
		||||
	checkedDuration  = checkedWellKnown(exprpb.Type_DURATION)
 | 
			
		||||
	checkedTimestamp = checkedWellKnown(exprpb.Type_TIMESTAMP)
 | 
			
		||||
	// Json-based type equivalents.
 | 
			
		||||
	checkedNull = &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_Null{
 | 
			
		||||
			Null: structpb.NullValue_NULL_VALUE}}
 | 
			
		||||
	checkedListDyn = &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_ListType_{
 | 
			
		||||
			ListType: &exprpb.Type_ListType{ElemType: checkedDyn}}}
 | 
			
		||||
	checkedMapStringDyn = &exprpb.Type{
 | 
			
		||||
		TypeKind: &exprpb.Type_MapType_{
 | 
			
		||||
			MapType: &exprpb.Type_MapType{
 | 
			
		||||
				KeyType:   checkedString,
 | 
			
		||||
				ValueType: checkedDyn}}}
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										44
									
								
								vendor/github.com/google/cel-go/common/types/pb/enum.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								vendor/github.com/google/cel-go/common/types/pb/enum.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,44 @@
 | 
			
		||||
// Copyright 2018 Google LLC
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
package pb
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"google.golang.org/protobuf/reflect/protoreflect"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// NewEnumValueDescription produces an enum value description with the fully qualified enum value
 | 
			
		||||
// name and the enum value descriptor.
 | 
			
		||||
func NewEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription {
 | 
			
		||||
	return &EnumValueDescription{
 | 
			
		||||
		enumValueName: name,
 | 
			
		||||
		desc:          desc,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnumValueDescription maps a fully-qualified enum value name to its numeric value.
 | 
			
		||||
type EnumValueDescription struct {
 | 
			
		||||
	enumValueName string
 | 
			
		||||
	desc          protoreflect.EnumValueDescriptor
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Name returns the fully-qualified identifier name for the enum value.
 | 
			
		||||
func (ed *EnumValueDescription) Name() string {
 | 
			
		||||
	return ed.enumValueName
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Value returns the (numeric) value of the enum.
 | 
			
		||||
func (ed *EnumValueDescription) Value() int32 {
 | 
			
		||||
	return int32(ed.desc.Number())
 | 
			
		||||
}
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user