Java 类org.antlr.v4.runtime.Vocabulary 实例源码

项目:rainbow    文件:ReservedIdentifiers.java   
private static Set<String> possibleIdentifiers()
{
    ImmutableSet.Builder<String> names = ImmutableSet.builder();
    Vocabulary vocabulary = SqlBaseLexer.VOCABULARY;
    for (int i = 0; i <= vocabulary.getMaxTokenType(); i++) {
        String name = nullToEmpty(vocabulary.getLiteralName(i));
        Matcher matcher = IDENTIFIER.matcher(name);
        if (matcher.matches()) {
            names.add(matcher.group(1));
        }
    }
    return names.build();
}
项目:codebuff    文件:Trainer.java   
public static TerminalNode getMatchingLeftSymbol(Corpus corpus,
                                                 InputDocument doc,
                                                 TerminalNode node)
{
    ParserRuleContext parent = (ParserRuleContext)node.getParent();
    int curTokensParentRuleIndex = parent.getRuleIndex();
    Token curToken = node.getSymbol();
    if (corpus.ruleToPairsBag != null) {
        String ruleName = doc.parser.getRuleNames()[curTokensParentRuleIndex];
        RuleAltKey ruleAltKey = new RuleAltKey(ruleName, parent.getAltNumber());
        List<Pair<Integer, Integer>> pairs = corpus.ruleToPairsBag.get(ruleAltKey);
        if ( pairs!=null ) {
            // Find appropriate pair given current token
            // If more than one pair (a,b) with b=current token pick first one
            // or if a common pair like ({,}), then give that one preference.
            // or if b is punctuation, prefer a that is punct
            List<Integer> viableMatchingLeftTokenTypes = viableLeftTokenTypes(parent, curToken, pairs);
            Vocabulary vocab = doc.parser.getVocabulary();
            if ( !viableMatchingLeftTokenTypes.isEmpty() ) {
                int matchingLeftTokenType =
                    CollectTokenPairs.getMatchingLeftTokenType(curToken, viableMatchingLeftTokenTypes, vocab);
                List<TerminalNode> matchingLeftNodes = parent.getTokens(matchingLeftTokenType);
                // get matching left node by getting last node to left of current token
                List<TerminalNode> nodesToLeftOfCurrentToken =
                    filter(matchingLeftNodes, n -> n.getSymbol().getTokenIndex()<curToken.getTokenIndex());
                TerminalNode matchingLeftNode = nodesToLeftOfCurrentToken.get(nodesToLeftOfCurrentToken.size()-1);
                if (matchingLeftNode == null) {
                    System.err.println("can't find matching node for "+node.getSymbol());
                }
                return matchingLeftNode;
            }
        }
    }
    return null;
}
项目:codebuff    文件:GrammarParserInterpreter.java   
public GrammarParserInterpreter(Grammar g,
                                String grammarFileName,
                                Vocabulary vocabulary,
                                Collection<String> ruleNames,
                                ATN atn,
                                TokenStream input) {
    super(grammarFileName, vocabulary, ruleNames, atn, input);
    this.g = g;
}
项目:goworks    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
@Override
protected TokenSourceWithStateV4<SimpleLexerState> createLexer(CharStream input, SimpleLexerState startState) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    ParserDebuggerLexerWrapper lexer = new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
    startState.apply(lexer);
    return lexer;
}
项目:goworks    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
项目:antlrworks2    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
@Override
protected TokenSourceWithStateV4<SimpleLexerState> createLexer(CharStream input, SimpleLexerState startState) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    ParserDebuggerLexerWrapper lexer = new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
    startState.apply(lexer);
    return lexer;
}
项目:antlrworks2    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
项目:rapidminer    文件:FunctionExpressionLexer.java   
@Override
public Vocabulary getVocabulary() {
    return VOCABULARY;
}
项目:Expr3    文件:ExprParser.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:codebuff    文件:Trainer.java   
public static String _toString(FeatureMetaData[] FEATURES, InputDocument doc, int[] features,
                               boolean showInfo) {
    Vocabulary v = doc.parser.getVocabulary();
    String[] ruleNames = doc.parser.getRuleNames();
    StringBuilder buf = new StringBuilder();
    for (int i=0; i<FEATURES.length; i++) {
        if ( FEATURES[i].type.equals(UNUSED) ) continue;
        if ( i>0 ) buf.append(" ");
        if ( i==INDEX_CUR_TOKEN_TYPE ) {
            buf.append("| "); // separate prev from current tokens
        }
        int displayWidth = FEATURES[i].type.displayWidth;
        switch ( FEATURES[i].type ) {
            case TOKEN :
                String tokenName = v.getDisplayName(features[i]);
                String abbrev = StringUtils.abbreviateMiddle(tokenName, "*", displayWidth);
                String centered = StringUtils.center(abbrev, displayWidth);
                buf.append(String.format("%"+displayWidth+"s", centered));
                break;
            case RULE :
                if ( features[i]>=0 ) {
                    String ruleName = ruleNames[unrulealt(features[i])[0]];
                    int ruleAltNum = unrulealt(features[i])[1];
                    ruleName += ":"+ruleAltNum;
                    abbrev = StringUtils.abbreviateMiddle(ruleName, "*", displayWidth);
                    buf.append(String.format("%"+displayWidth+"s", abbrev));
                }
                else {
                    buf.append(Tool.sequence(displayWidth, " "));
                }
                break;
            case INT :
            case INFO_LINE:
            case INFO_CHARPOS:
                if ( showInfo ) {
                    if ( features[i]>=0 ) {
                        buf.append(String.format("%"+displayWidth+"s", StringUtils.center(String.valueOf(features[i]), displayWidth)));
                    }
                    else {
                        buf.append(Tool.sequence(displayWidth, " "));
                    }
                }
                break;
            case INFO_FILE:
                if ( showInfo ) {
                    String fname = new File(doc.fileName).getName();
                    fname = StringUtils.abbreviate(fname, displayWidth);
                    buf.append(String.format("%"+displayWidth+"s", fname));
                }
                break;
            case BOOL :
                if ( features[i]!=-1 ) {
                    buf.append(features[i] == 1 ? "true " : "false");
                }
                else {
                    buf.append(Tool.sequence(displayWidth, " "));
                }
                break;
            default :
                System.err.println("NO STRING FOR FEATURE TYPE: "+ FEATURES[i].type);
        }
    }
    return buf.toString();
}
项目:codebuff    文件:CollectTokenPairs.java   
public CollectTokenPairs(Vocabulary vocab, String[] ruleNames) {
    this.vocab = vocab;
    this.ruleNames = ruleNames;
}
项目:codebuff    文件:Grammar.java   
/**
 * Gets a {@link Vocabulary} instance describing the vocabulary used by the
 * grammar.
 */

public Vocabulary getVocabulary() {
    return new VocabularyImpl(getTokenLiteralNames(), getTokenSymbolicNames());
}
项目:netlist-graph    文件:Verilog2001Parser.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:netlist-graph    文件:Verilog2001Lexer.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:KeepTry    文件:HelloLexer.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:KeepTry    文件:HelloParser.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:rapidminer-studio    文件:FunctionExpressionLexer.java   
@Override
public Vocabulary getVocabulary() {
    return VOCABULARY;
}
项目:UMLS-Terminology-Server    文件:ExpressionConstraintParser.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:UMLS-Terminology-Server    文件:ExpressionConstraintLexer.java   
@Override
public Vocabulary getVocabulary() {
  return VOCABULARY;
}
项目:netbeans-editorconfig-editor    文件:EditorConfigLexer.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:netbeans-editorconfig-editor    文件:EditorConfigParser.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:SkinnyAssembler    文件:SkinnyParser.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:SkinnyAssembler    文件:SkinnyLexer.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:components    文件:SoqlParser.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:goworks    文件:ParserDebuggerReferenceAnchorsParserTask.java   
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
    throws InterruptedException, ExecutionException {

    //ParserDebuggerEditorKit.LEX
    synchronized (lock) {
        ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        if (fileParseResultData == null || parseTreeResult == null) {
            Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
            Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
            TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
            InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
            ParserRuleContext parseResult;

            ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
            String grammarFileName = parserInterpreterData.grammarFileName;
            Vocabulary vocabulary = parserInterpreterData.vocabulary;
            List<String> ruleNames = parserInterpreterData.ruleNames;
            ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
            TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);

            long startTime = System.nanoTime();
            parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
            parser.getInterpreter().optimize_ll1 = false;
            parser.getInterpreter().reportAmbiguities = true;
            parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
            parser.removeErrorListeners();
            parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
            parser.addErrorListener(new StatisticsParserErrorListener());
            SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
            parser.addErrorListener(syntaxErrorListener);
            parser.setBuildParseTree(true);
            parser.setErrorHandler(new DefaultErrorStrategy());
            parseResult = parser.parse(parserInterpreterData.startRuleIndex);

            String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
            FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
            fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
            parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
        }

        results.addResult(fileParseResultData);
        results.addResult(parseTreeResult);
    }
}
项目:goworks    文件:ParserDebuggerReferenceAnchorsParserTask.java   
public TracingParserInterpreter(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, ATN atn, TokenStream input) {
    super(grammarFileName, vocabulary, ruleNames, atn, input);
}
项目:goworks    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
public ParserDebuggerLexerWrapper(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
    super(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
}
项目:org.pshdl    文件:MemoryModelLexer.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:org.pshdl    文件:MemoryModelParser.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:org.pshdl    文件:PSHDLLangLexer.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:org.pshdl    文件:PSHDLLang.java   
@Override

    public Vocabulary getVocabulary() {
        return VOCABULARY;
    }
项目:editorconfig-netbeans    文件:EditorConfigLexer.java   
@Override
public Vocabulary getVocabulary() {
  return VOCABULARY;
}
项目:editorconfig-netbeans    文件:EditorConfigParser.java   
@Override
public Vocabulary getVocabulary() {
  return VOCABULARY;
}
项目:antlrworks2    文件:ParserDebuggerReferenceAnchorsParserTask.java   
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
    throws InterruptedException, ExecutionException {

    //ParserDebuggerEditorKit.LEX
    synchronized (lock) {
        ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        if (fileParseResultData == null || parseTreeResult == null) {
            Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
            Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
            TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
            InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
            ParserRuleContext parseResult;

            ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
            String grammarFileName = parserInterpreterData.grammarFileName;
            Vocabulary vocabulary = parserInterpreterData.vocabulary;
            List<String> ruleNames = parserInterpreterData.ruleNames;
            ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
            TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);

            long startTime = System.nanoTime();
            parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
            parser.getInterpreter().optimize_ll1 = false;
            parser.getInterpreter().reportAmbiguities = true;
            parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
            parser.removeErrorListeners();
            parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
            parser.addErrorListener(new StatisticsParserErrorListener());
            SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
            parser.addErrorListener(syntaxErrorListener);
            parser.setBuildParseTree(true);
            parser.setErrorHandler(new DefaultErrorStrategy());
            parseResult = parser.parse(parserInterpreterData.startRuleIndex);

            String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
            FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
            fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
            parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
        }

        results.addResult(fileParseResultData);
        results.addResult(parseTreeResult);
    }
}
项目:antlrworks2    文件:ParserDebuggerReferenceAnchorsParserTask.java   
public TracingParserInterpreter(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, ATN atn, TokenStream input) {
    super(grammarFileName, vocabulary, ruleNames, atn, input);
}
项目:antlrworks2    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
public ParserDebuggerLexerWrapper(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
    super(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
}