Java 类org.antlr.v4.runtime.misc.IntervalSet 实例源码

项目:codebuff    文件:LL1OptionalBlockSingleAlt.java   
public LL1OptionalBlockSingleAlt(OutputModelFactory factory,
                                     GrammarAST blkAST,
                                     List<CodeBlockForAlt> alts)
    {
        super(factory, blkAST, alts);
        this.decision = ((DecisionState)blkAST.atnState).decision;

        /** Lookahead for each alt 1..n */
//      IntervalSet[] altLookSets = LinearApproximator.getLL1LookaheadSets(dfa);
        IntervalSet[] altLookSets = factory.getGrammar().decisionLOOK.get(decision);
        altLook = getAltLookaheadAsStringLists(altLookSets);
        IntervalSet look = altLookSets[0];
        IntervalSet followLook = altLookSets[1];

        IntervalSet expecting = look.or(followLook);
        this.error = getThrowNoViableAlt(factory, blkAST, expecting);

        expr = addCodeForLookaheadTempVar(look);
        followExpr = factory.getLL1Test(followLook, blkAST);
    }
项目:codebuff    文件:TestSetInline.java   
private static Bitset[] createBitsets(OutputModelFactory factory,
                                      IntervalSet set,
                                      int wordSize,
                                      boolean useZeroOffset) {
    List<Bitset> bitsetList = new ArrayList<Bitset>();
    for (int ttype : set.toArray()) {
        Bitset current = !bitsetList.isEmpty() ? bitsetList.get(bitsetList.size() - 1) : null;
        if (current == null || ttype > (current.shift + wordSize-1)) {
            current = new Bitset();
            if (useZeroOffset && ttype >= 0 && ttype < wordSize-1) {
                current.shift = 0;
            }
            else {
                current.shift = ttype;
            }

            bitsetList.add(current);
        }

        current.ttypes.add(factory.getGenerator().getTarget().getTokenTypeAsTargetLabel(factory.getGrammar(), ttype));
    }

    return bitsetList.toArray(new Bitset[bitsetList.size()]);
}
项目:goworks    文件:GoCompletionQuery.java   
public static RuleContext getTopContext(Parser parser, RuleContext context, IntervalSet values, boolean checkTop) {
    if (checkTop && context instanceof ParserRuleContext) {
        if (values.contains(context.getRuleIndex())) {
            return context;
        }
    }

    if (context.isEmpty()) {
        return null;
    }

    if (values.contains(parser.getATN().states.get(context.invokingState).ruleIndex)) {
        return context.parent;
    }

    return getTopContext(parser, context.parent, values, false);
}
项目:Scratch-ApuC    文件:DefaultErrorStrategy.java   
/**
 * This method implements the single-token deletion inline error recovery
 * strategy. It is called by {@link #recoverInline} to attempt to recover
 * from mismatched input. If this method returns null, the parser and error
 * handler state will not have changed. If this method returns non-null,
 * {@code recognizer} will <em>not</em> be in error recovery mode since the
 * returned token was a successful match.
 *
 * <p>If the single-token deletion is successful, this method calls
 * {@link #reportUnwantedToken} to report the error, followed by
 * {@link Parser#consume} to actually "delete" the extraneous token. Then,
 * before returning {@link #reportMatch} is called to signal a successful
 * match.</p>
 *
 * @param recognizer the parser instance
 * @return the successfully matched {@link Token} instance if single-token
 * deletion successfully recovers from the mismatched input, otherwise
 * {@code null}
 */
@Nullable
protected Token singleTokenDeletion(@NotNull Parser recognizer) {
    int nextTokenType = recognizer.getInputStream().LA(2);
    IntervalSet expecting = getExpectedTokens(recognizer);
    if ( expecting.contains(nextTokenType) ) {
        reportUnwantedToken(recognizer);
        /*
        System.err.println("recoverFromMismatchedToken deleting "+
                           ((TokenStream)recognizer.getInputStream()).LT(1)+
                           " since "+((TokenStream)recognizer.getInputStream()).LT(2)+
                           " is what we want");
        */
        recognizer.consume(); // simply delete extra token
        // we want to return the token we're actually matching
        Token matchedSymbol = recognizer.getCurrentToken();
        reportMatch(recognizer);  // we know current token is correct
        return matchedSymbol;
    }
    return null;
}
项目:Scratch-ApuC    文件:DefaultErrorStrategy.java   
/** Conjure up a missing token during error recovery.
 *
 *  The recognizer attempts to recover from single missing
 *  symbols. But, actions might refer to that missing symbol.
 *  For example, x=ID {f($x);}. The action clearly assumes
 *  that there has been an identifier matched previously and that
 *  $x points at that token. If that token is missing, but
 *  the next token in the stream is what we want we assume that
 *  this token is missing and we keep going. Because we
 *  have to return some token to replace the missing token,
 *  we have to conjure one up. This method gives the user control
 *  over the tokens returned for missing tokens. Mostly,
 *  you will want to create something special for identifier
 *  tokens. For literals such as '{' and ',', the default
 *  action in the parser or tree parser works. It simply creates
 *  a CommonToken of the appropriate type. The text will be the token.
 *  If you change what tokens must be created by the lexer,
 *  override this method to create the appropriate tokens.
 */
@NotNull
protected Token getMissingSymbol(@NotNull Parser recognizer) {
    Token currentSymbol = recognizer.getCurrentToken();
    IntervalSet expecting = getExpectedTokens(recognizer);
    int expectedTokenType = expecting.getMinElement(); // get any element
    String tokenText;
    if ( expectedTokenType== Token.EOF ) tokenText = "<missing EOF>";
    else tokenText = "<missing "+recognizer.getTokenNames()[expectedTokenType]+">";
    Token current = currentSymbol;
    Token lookback = recognizer.getInputStream().LT(-1);
    if ( current.getType() == Token.EOF && lookback!=null ) {
        current = lookback;
    }
    return
        recognizer.getTokenFactory().create(new Pair<TokenSource, CharStream>(current.getTokenSource(), current.getTokenSource().getInputStream()), expectedTokenType, tokenText,
                        Token.DEFAULT_CHANNEL,
                        -1, -1,
                        current.getLine(), current.getCharPositionInLine());
}
项目:Scratch-ApuC    文件:ParserATNSimulator.java   
/**
 * Return a configuration set containing only the configurations from
 * {@code configs} which are in a {@link RuleStopState}. If all
 * configurations in {@code configs} are already in a rule stop state, this
 * method simply returns {@code configs}.
 *
 * <p>When {@code lookToEndOfRule} is true, this method uses
 * {@link ATN#nextTokens} for each configuration in {@code configs} which is
 * not already in a rule stop state to see if a rule stop state is reachable
 * from the configuration via epsilon-only transitions.</p>
 *
 * @param configs the configuration set to update
 * @param lookToEndOfRule when true, this method checks for rule stop states
 * reachable by epsilon-only transitions from each configuration in
 * {@code configs}.
 *
 * @return {@code configs} if all configurations in {@code configs} are in a
 * rule stop state, otherwise return a new configuration set containing only
 * the configurations from {@code configs} which are in a rule stop state
 */
@NotNull
protected ATNConfigSet removeAllConfigsNotInRuleStopState(@NotNull ATNConfigSet configs, boolean lookToEndOfRule) {
    if (PredictionMode.allConfigsInRuleStopStates(configs)) {
        return configs;
    }

    ATNConfigSet result = new ATNConfigSet(configs.fullCtx);
    for (ATNConfig config : configs) {
        if (config.state instanceof RuleStopState) {
            result.add(config, mergeCache);
            continue;
        }

        if (lookToEndOfRule && config.state.onlyHasEpsilonTransitions()) {
            IntervalSet nextTokens = atn.nextTokens(config.state);
            if (nextTokens.contains(Token.EPSILON)) {
                ATNState endOfRuleState = atn.ruleToStopState[config.state.ruleIndex];
                result.add(new ATNConfig(config, endOfRuleState), mergeCache);
            }
        }
    }

    return result;
}
项目:Scratch-ApuC    文件:LL1Analyzer.java   
/**
     * Calculates the SLL(1) expected lookahead set for each outgoing transition
     * of an {@link ATNState}. The returned array has one element for each
     * outgoing transition in {@code s}. If the closure from transition
     * <em>i</em> leads to a semantic predicate before matching a symbol, the
     * element at index <em>i</em> of the result will be {@code null}.
     *
     * @param s the ATN state
     * @return the expected symbols for each outgoing transition of {@code s}.
     */
    @Nullable
    public IntervalSet[] getDecisionLookahead(@Nullable ATNState s) {
//      System.out.println("LOOK("+s.stateNumber+")");
        if ( s==null ) {
            return null;
        }

        IntervalSet[] look = new IntervalSet[s.getNumberOfTransitions()];
        for (int alt = 0; alt < s.getNumberOfTransitions(); alt++) {
            look[alt] = new IntervalSet();
            Set<ATNConfig> lookBusy = new HashSet<ATNConfig>();
            boolean seeThruPreds = false; // fail to get lookahead upon pred
            _LOOK(s.transition(alt).target, null, PredictionContext.EMPTY,
                  look[alt], lookBusy, new BitSet(), seeThruPreds, false);
            // Wipe out lookahead for this alternative if we found nothing
            // or we had a predicate when we !seeThruPreds
            if ( look[alt].size()==0 || look[alt].contains(HIT_PRED) ) {
                look[alt] = null;
            }
        }
        return look;
    }
项目:jStyleParser    文件:CSSErrorStrategy.java   
/**
 * Consumes token until lexer state is function-balanced and
 * token from follow is matched. Matched token is also consumed
 */
protected void consumeUntilGreedy(Parser recognizer, IntervalSet set, CSSLexerState.RecoveryMode mode) {
    CSSToken t;
    do {
        Token next = recognizer.getInputStream().LT(1);
        if (next instanceof CSSToken) {
            t = (CSSToken) recognizer.getInputStream().LT(1);
            if (t.getType() == Token.EOF) {
                logger.trace("token eof ");
                break;
            }
        } else
            break; /* not a CSSToken, probably EOF */
        logger.trace("Skipped greedy: {}", t.getText());
        // consume token even if it will match
        recognizer.consume();
    }
    while (!(t.getLexerState().isBalanced(mode, null, t) && set.contains(t.getType())));
}
项目:jStyleParser    文件:CSSErrorStrategy.java   
/**
 * Consumes token until lexer state is function-balanced and
 * token from follow is matched.
 */
public void consumeUntil(Parser recognizer, IntervalSet follow, CSSLexerState.RecoveryMode mode, CSSLexerState ls) {
    CSSToken t;
    boolean finish;
    TokenStream input = recognizer.getInputStream();
    do {
        Token next = input.LT(1);
        if (next instanceof CSSToken) {
            t = (CSSToken) input.LT(1);
            if (t.getType() == Token.EOF) {
                logger.trace("token eof ");
                break;
            }
        } else
            break; /* not a CSSToken, probably EOF */
        // consume token if does not match
        finish = (t.getLexerState().isBalanced(mode, ls, t) && follow.contains(t.getType()));
        if (!finish) {
            logger.trace("Skipped: {}", t);
            input.consume();
        }
    } while (!finish);
}
项目:intellij-plugin-v4    文件:ANTLRv4ExternalAnnotator.java   
public static Map<String,GrammarAST> getUnusedParserRules(Grammar g) {
        if ( g.ast==null || g.isLexer() ) return null;
        List<GrammarAST> ruleNodes = g.ast.getNodesWithTypePreorderDFS(IntervalSet.of(ANTLRParser.RULE_REF));
        // in case of errors, we walk AST ourselves
        // ANTLR's Grammar object might have bailed on rule defs etc...
        Set<String> ruleRefs = new HashSet<String>();
        Map<String,GrammarAST> ruleDefs = new HashMap<String,GrammarAST>();
        for (GrammarAST x : ruleNodes) {
            if ( x.getParent().getType()==ANTLRParser.RULE ) {
//              System.out.println("def "+x);
                ruleDefs.put(x.getText(), x);
            }
            else if ( x instanceof RuleRefAST ) {
                RuleRefAST r = (RuleRefAST) x;
//              System.out.println("ref "+r);
                ruleRefs.add(r.getText());
            }
        }
        ruleDefs.keySet().removeAll(ruleRefs);
        return ruleDefs;
    }
项目:beetl2.0    文件:BeetlAntlrErrorStrategy.java   
protected void reportUnwantedToken(@NotNull Parser recognizer)
{
    if (inErrorRecoveryMode(recognizer))
    {
        return;
    }

    beginErrorCondition(recognizer);

    Token t = recognizer.getCurrentToken();
    String tokenName = getTokenErrorDisplay(t);
    IntervalSet expecting = getExpectedTokens(recognizer);
    String msg = "多余输入 " + tokenName + " 期望 " + expecting.toString(recognizer.getTokenNames());
    BeetlException exception = new BeetlParserException(BeetlException.PARSER_MISS_ERROR, msg);
    //      exception.token = this.getGrammarToken(t);
    exception.pushToken(this.getGrammarToken(t));
    throw exception;
}
项目:digraph-parser    文件:GraphParser.java   
@Override
public void reportMissingToken(Parser recognizer) {
    beginErrorCondition(recognizer);
    Token t = recognizer.getCurrentToken();
    IntervalSet expecting = getExpectedTokens(recognizer);
    String msg = "missing " + expecting.toString(recognizer.getTokenNames()) + " at " + getTokenErrorDisplay(t);
    throw new RecognitionException(msg, recognizer, recognizer.getInputStream(), recognizer.getContext());
}
项目:arma-dialog-creator    文件:ExpressionInterpreter.java   
@Override
public void reportMissingToken(Parser recognizer) {
    beginErrorCondition(recognizer);
    Token t = recognizer.getCurrentToken();
    IntervalSet expecting = getExpectedTokens(recognizer);
    String msg = "missing " + expecting.toString(recognizer.getTokenNames()) + " at " + getTokenErrorDisplay(t);
    throw new RecognitionException(msg, recognizer, recognizer.getInputStream(), recognizer.getContext());
}
项目:ftc    文件:CursorContextListener.java   
@Override
public void notifyOnError(Token offendingToken, Token missingToken, IntervalSet tokensExpected) {
    super.notifyOnError(offendingToken, missingToken, tokensExpected);
    if (offendingToken != null)
        offendingSymbol = offendingToken;
    if (tokensExpected.size() > 0)
        expectedSymbols = getTokenNames(tokensExpected);

    debugOnError();
}
项目:codebuff    文件:GrammarAST.java   
public List<GrammarAST> getNodesWithType(IntervalSet types) {
    List<GrammarAST> nodes = new ArrayList<GrammarAST>();
    List<GrammarAST> work = new LinkedList<GrammarAST>();
    work.add(this);
    GrammarAST t;
    while ( !work.isEmpty() ) {
        t = work.remove(0);
        if ( types==null || types.contains(t.getType()) ) nodes.add(t);
        if ( t.children!=null ) {
            work.addAll(Arrays.asList(t.getChildrenAsArray()));
        }
    }
    return nodes;
}
项目:codebuff    文件:GrammarAST.java   
public void getNodesWithTypePreorderDFS_(List<GrammarAST> nodes, IntervalSet types) {
    if ( types.contains(this.getType()) ) nodes.add(this);
    // walk all children of root.
    for (int i= 0; i < getChildCount(); i++) {
        GrammarAST child = (GrammarAST)getChild(i);
        child.getNodesWithTypePreorderDFS_(nodes, types);
    }
}
项目:codebuff    文件:Grammar.java   
/** Return a set of all possible token or char types for this grammar */
public IntSet getTokenTypes() {
    if ( isLexer() ) {
        return getAllCharValues();
    }
    return IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, getMaxTokenType());
}
项目:codebuff    文件:Grammar.java   
public static Map<Integer, Interval> getStateToGrammarRegionMap(GrammarRootAST ast, IntervalSet grammarTokenTypes) {
    Map<Integer, Interval> stateToGrammarRegionMap = new HashMap<Integer, Interval>();
    if ( ast==null ) return stateToGrammarRegionMap;

    List<GrammarAST> nodes = ast.getNodesWithType(grammarTokenTypes);
    for (GrammarAST n : nodes) {
        if (n.atnState != null) {
            Interval tokenRegion = Interval.of(n.getTokenStartIndex(), n.getTokenStopIndex());
            org.antlr.runtime.tree.Tree ruleNode = null;
            // RULEs, BLOCKs of transformed recursive rules point to original token interval
            switch ( n.getType() ) {
                case ANTLRParser.RULE :
                    ruleNode = n;
                    break;
                case ANTLRParser.BLOCK :
                case ANTLRParser.CLOSURE :
                    ruleNode = n.getAncestor(ANTLRParser.RULE);
                    break;
            }
            if ( ruleNode instanceof RuleAST ) {
                String ruleName = ((RuleAST) ruleNode).getRuleName();
                Rule r = ast.g.getRule(ruleName);
                if ( r instanceof LeftRecursiveRule ) {
                    RuleAST originalAST = ((LeftRecursiveRule) r).getOriginalAST();
                    tokenRegion = Interval.of(originalAST.getTokenStartIndex(), originalAST.getTokenStopIndex());
                }
            }
            stateToGrammarRegionMap.put(n.atnState.stateNumber, tokenRegion);
        }
    }
    return stateToGrammarRegionMap;
}
项目:codebuff    文件:LexerATNFactory.java   
/** [Aa\t \u1234a-z\]\-] char sets */
@Override
public Handle charSetLiteral(GrammarAST charSetAST) {
    ATNState left = newState(charSetAST);
    ATNState right = newState(charSetAST);
    IntervalSet set = getSetFromCharSetLiteral(charSetAST);
    left.addTransition(new SetTransition(right, set));
    charSetAST.atnState = left;
    return new Handle(left, right);
}
项目:codebuff    文件:LexerATNFactory.java   
public IntervalSet getSetFromCharSetLiteral(GrammarAST charSetAST) {
    String chars = charSetAST.getText();
    chars = chars.substring(1, chars.length()-1);
    String cset = '"'+ chars +'"';
    IntervalSet set = new IntervalSet();

    // unescape all valid escape char like \n, leaving escaped dashes as '\-'
    // so we can avoid seeing them as '-' range ops.
    chars = CharSupport.getStringFromGrammarStringLiteral(cset);
    // now make x-y become set of char
    int n = chars.length();
    for (int i=0; i< n; i++) {
        int c = chars.charAt(i);
        if ( c=='\\' && (i+1)<n && chars.charAt(i+1)=='-' ) { // \-
            set.add('-');
            i++;
        }
        else if ( (i+2)<n && chars.charAt(i+1)=='-' ) { // range x-y
            int x = c;
            int y = chars.charAt(i+2);
            if ( x<=y ) set.add(x,y);
            i+=2;
        }
        else {
            set.add(c);
        }
    }
    return set;
}
项目:codebuff    文件:AnalysisPipeline.java   
protected void processLexer() {
    // make sure all non-fragment lexer rules must match at least one symbol
    for (Rule rule : g.rules.values()) {
        if (rule.isFragment()) {
            continue;
        }

        LL1Analyzer analyzer = new LL1Analyzer(g.atn);
        IntervalSet look = analyzer.LOOK(g.atn.ruleToStartState[rule.index], null);
        if (look.contains(Token.EPSILON)) {
            g.tool.errMgr.grammarError(ErrorType.EPSILON_TOKEN, g.fileName, ((GrammarAST)rule.ast.getChild(0)).getToken(), rule.name);
        }
    }
}
项目:codebuff    文件:AnalysisPipeline.java   
/** Return whether lookahead sets are disjoint; no lookahead ⇒ not disjoint */
public static boolean disjoint(IntervalSet[] altLook) {
    boolean collision = false;
    IntervalSet combined = new IntervalSet();
    if ( altLook==null ) return false;
    for (IntervalSet look : altLook) {
        if ( look==null ) return false; // lookahead must've computation failed
        if ( !look.and(combined).isNil() ) {
            collision = true;
            break;
        }
        combined.addAll(look);
    }
    return !collision;
}
项目:codebuff    文件:LeftRecursiveRuleAnalyzer.java   
public AltAST addPrecedenceArgToRules(AltAST t, int prec) {
    if ( t==null ) return null;
    // get all top-level rule refs from ALT
    List<GrammarAST> outerAltRuleRefs = t.getNodesWithTypePreorderDFS(IntervalSet.of(RULE_REF));
    for (GrammarAST x : outerAltRuleRefs) {
        RuleRefAST rref = (RuleRefAST)x;
        boolean recursive = rref.getText().equals(ruleName);
        boolean rightmost = rref == outerAltRuleRefs.get(outerAltRuleRefs.size()-1);
        if ( recursive && rightmost ) {
            GrammarAST dummyValueNode = new GrammarAST(new CommonToken(ANTLRParser.INT, ""+prec));
            rref.setOption(LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME, dummyValueNode);
        }
    }
    return t;
}
项目:codebuff    文件:LL1PlusBlockSingleAlt.java   
public LL1PlusBlockSingleAlt(OutputModelFactory factory, GrammarAST plusRoot, List<CodeBlockForAlt> alts) {
    super(factory, plusRoot, alts);

    BlockAST blkAST = (BlockAST)plusRoot.getChild(0);
    PlusBlockStartState blkStart = (PlusBlockStartState)blkAST.atnState;

    stateNumber = blkStart.loopBackState.stateNumber;
    blockStartStateNumber = blkStart.stateNumber;
    PlusBlockStartState plus = (PlusBlockStartState)blkAST.atnState;
    this.decision = plus.loopBackState.decision;
    IntervalSet[] altLookSets = factory.getGrammar().decisionLOOK.get(decision);

    IntervalSet loopBackLook = altLookSets[0];
    loopExpr = addCodeForLoopLookaheadTempVar(loopBackLook);
}
项目:codebuff    文件:TestSetInline.java   
public TestSetInline(OutputModelFactory factory, GrammarAST ast, IntervalSet set, int wordSize) {
    super(factory, ast);
    bitsetWordSize = wordSize;
    Bitset[] withZeroOffset = createBitsets(factory, set, wordSize, true);
    Bitset[] withoutZeroOffset = createBitsets(factory, set, wordSize, false);
    this.bitsets = withZeroOffset.length <= withoutZeroOffset.length ? withZeroOffset : withoutZeroOffset;
    this.varName = "_la";
}
项目:codebuff    文件:Choice.java   
public List<String[]> getAltLookaheadAsStringLists(IntervalSet[] altLookSets) {
    List<String[]> altLook = new ArrayList<String[]>();
    for (IntervalSet s : altLookSets) {
        altLook.add(factory.getGenerator().getTarget().getTokenTypesAsTargetLabels(factory.getGrammar(), s.toArray()));
    }
    return altLook;
}
项目:codebuff    文件:Choice.java   
public TestSetInline addCodeForLookaheadTempVar(IntervalSet look) {
    List<SrcOp> testOps = factory.getLL1Test(look, ast);
    TestSetInline expr = Utils.find(testOps, TestSetInline.class);
    if (expr != null) {
        Decl d = new TokenTypeDecl(factory, expr.varName);
        factory.getCurrentRuleFunction().addLocalDecl(d);
        CaptureNextTokenType nextType = new CaptureNextTokenType(factory,expr.varName);
        addPreambleOp(nextType);
    }
    return expr;
}
项目:codebuff    文件:ThrowRecognitionException.java   
public ThrowRecognitionException(OutputModelFactory factory, GrammarAST ast, IntervalSet expecting) {
        super(factory, ast);
        //this.decision = ((BlockStartState)ast.ATNState).decision;
        grammarLine = ast.getLine();
        grammarLine = ast.getCharPositionInLine();
        grammarFile = factory.getGrammar().fileName;
        //this.expecting = factory.createExpectingBitSet(ast, decision, expecting, "error");
//      factory.defineBitSet(this.expecting);
    }
项目:codebuff    文件:LL1AltBlock.java   
public LL1AltBlock(OutputModelFactory factory, GrammarAST blkAST, List<CodeBlockForAlt> alts) {
    super(factory, blkAST, alts);
    this.decision = ((DecisionState)blkAST.atnState).decision;

    /** Lookahead for each alt 1..n */
    IntervalSet[] altLookSets = factory.getGrammar().decisionLOOK.get(decision);
    altLook = getAltLookaheadAsStringLists(altLookSets);

    IntervalSet expecting = IntervalSet.or(altLookSets); // combine alt sets
    this.error = getThrowNoViableAlt(factory, blkAST, expecting);
}
项目:codebuff    文件:LL1Loop.java   
public SrcOp addCodeForLoopLookaheadTempVar(IntervalSet look) {
    TestSetInline expr = addCodeForLookaheadTempVar(look);
    if (expr != null) {
        CaptureNextTokenType nextType = new CaptureNextTokenType(factory, expr.varName);
        addIterationOp(nextType);
    }
    return expr;
}
项目:codebuff    文件:Sync.java   
public Sync(OutputModelFactory factory,
                GrammarAST blkOrEbnfRootAST,
                IntervalSet expecting,
                int decision,
                String position)
    {
        super(factory, blkOrEbnfRootAST);
        this.decision = decision;
//      this.expecting = factory.createExpectingBitSet(ast, decision, expecting, position);
//      factory.defineBitSet(this.expecting);
    }
项目:dcos-commons    文件:RangeUtils.java   
/**
 * Removes the range intervals listed in {@code subtrahend} from {@code minuend}.
 */
public static List<Range> subtractRanges(List<Range> minuend, List<Range> subtrahend) {
    IntervalSet iMinuend = intervalsToIntervalSet(rangesToIntervals(minuend));
    IntervalSet iSubtrahend = intervalsToIntervalSet(rangesToIntervals(subtrahend));
    IntervalSet iDifference = IntervalSet.subtract(iMinuend, iSubtrahend);
    return intervalSetToRanges(iDifference);
}
项目:dcos-commons    文件:RangeUtils.java   
private static IntervalSet intervalsToIntervalSet(List<Interval> intervals) {
    IntervalSet intervalSet = new IntervalSet();
    for (Interval interval : intervals) {
        intervalSet.add(interval.a, interval.b);
    }
    return intervalSet;
}
项目:ksql    文件:KsqlParserErrorStrategy.java   
protected void reportUnwantedToken(Parser recognizer) {
  if (!this.inErrorRecoveryMode(recognizer)) {
    this.beginErrorCondition(recognizer);
    Token t = recognizer.getCurrentToken();
    String tokenName = this.getTokenErrorDisplay(t);
    IntervalSet expecting = this.getExpectedTokens(recognizer);
    String msg =
        "extraneous input " + tokenName + " expecting "
        + expecting.toString(recognizer.getVocabulary());
    recognizer.notifyErrorListeners(t, msg, (RecognitionException) null);
  }
}
项目:ksql    文件:KsqlParserErrorStrategy.java   
protected void reportMissingToken(Parser recognizer) {
  if (!this.inErrorRecoveryMode(recognizer)) {
    this.beginErrorCondition(recognizer);
    Token t = recognizer.getCurrentToken();
    IntervalSet expecting = this.getExpectedTokens(recognizer);
    String msg =
        "missing " + expecting.toString(recognizer.getVocabulary()) + " at " + this
            .getTokenErrorDisplay(t);
    recognizer.notifyErrorListeners(t, msg, (RecognitionException) null);
  }
}
项目:jetbrains    文件:ErrorStrategyAdaptor.java   
@Override
protected void consumeUntil(Parser recognizer, IntervalSet set) {
    Token o = recognizer.getCurrentToken();
    if ( o.getType()==Token.EOF ) {
        recognizer.getRuleContext().addErrorNode(o);
    }
    super.consumeUntil(recognizer, set);
}
项目:KIARA    文件:ParserExceptionErrorStrategyImpl.java   
@Override
public void reportMissingToken(Parser recognizer) {
    beginErrorCondition(recognizer);
    Token t = recognizer.getCurrentToken();
    IntervalSet expecting = getExpectedTokens(recognizer);
    String msg = "";
    msg += "In file " + recognizer.getSourceName() + " at line " + recognizer.getContext().start.getLine() + ": ";
    msg += "Missing "+expecting.toString(recognizer.getTokenNames()) + " at " + getTokenErrorDisplay(t) + ";";
    //msg += "Line Number " + recognizer.getContext().start.getLine() + ", Column " + recognizer.getContext().start.getCharPositionInLine() + ";";
    throw new RecognitionException(msg, recognizer, recognizer.getInputStream(), recognizer.getContext());
}
项目:vzome-core    文件:ZomicASTTest.java   
public void testOK_ZomicLexer() {
    try {
        CharStream inputStream = new ANTLRInputStream("red -7");
        ZomicLexer lexer = new ZomicLexer(inputStream);
        ATN atn = lexer.getATN();
        int stateNumber = 0;
        IntervalSet intervalSet = atn.getExpectedTokens(stateNumber, RuleContext.EMPTY);
        // TODO: just playing around to see what's available here...
        assertTrue(intervalSet.size() > 0);
    }
    catch(Exception ex) {
        assertNull(ex.toString(), ex);
    }
}
项目:goworks    文件:GroupSetElementsHintParserTask.java   
private void processAlternatives(List<? extends AbstractGrammarParser.AlternativeContext> alternatives) {
    if (alternatives.size() <= 1) {
        return;
    }

    IntervalSet setlikeAlts = new IntervalSet();
    for (int i = 0; i < alternatives.size(); i++) {
        if (!isIgnored(alternatives.get(i))) {
            setlikeAlts.add(i);
        }
    }

    if (setlikeAlts.size() <= 1 || setlikeAlts.size() == alternatives.size()) {
        return;
    }

    for (Interval interval : setlikeAlts.getIntervals()) {
        if (interval.length() > 1) {
            TerminalNode firstNode = ParseTrees.getStartNode(alternatives.get(interval.a));
            TerminalNode lastNode = ParseTrees.getStopNode(alternatives.get(interval.b));
            if (firstNode == null || lastNode == null) {
                continue;
            }

            int startIndex = firstNode.getSymbol().getStartIndex();
            int stopIndex = lastNode.getSymbol().getStopIndex();
            _rewriteRanges.add(startIndex, stopIndex);
        }
    }
}
项目:goworks    文件:GroupHighlighterLexer.java   
private static Transition createSetTransition(ATNState target, IntervalSet set) {
    if (set.getIntervals().size() == 1) {
        Interval interval = set.getIntervals().get(0);
        if (interval.a == interval.b) {
            return new AtomTransition(target, interval.a);
        } else {
            return new RangeTransition(target, interval.a, interval.b);
        }
    } else {
        return new SetTransition(target, set);
    }
}