Java 类org.apache.commons.lang3.text.StrTokenizer 实例源码

项目:dremio-oss    文件:DirectWriterCommand.java   
private Writer getWriter(OptionManager options, SchemaConfig.SchemaInfoProvider infoProvider) throws IOException{
  final String storeTablePath = options.getOption(QUERY_RESULTS_STORE_TABLE.getOptionName()).string_val;
  final List<String> storeTable = new StrTokenizer(storeTablePath, '.', ParserConfig.QUOTING.string.charAt(0))
      .setIgnoreEmptyTokens(true).getTokenList();

  // store query results as the system user
  final SchemaPlus systemUserSchema = context.getRootSchema(
      SchemaConfig
          .newBuilder(SystemUser.SYSTEM_USERNAME)
          .setProvider(infoProvider)
          .build());
  final AbstractSchema schema = SchemaUtilities.resolveToMutableSchemaInstance(systemUserSchema,
      Util.skipLast(storeTable), true, MutationType.TABLE);

  // Query results are stored in arrow format. If need arises, we can change
  // this to a configuration option.
  final Map<String, Object> storageOptions = ImmutableMap.<String, Object> of("type",
      ArrowFormatPlugin.ARROW_DEFAULT_NAME);

  final CreateTableEntry createTableEntry = schema.createNewTable(Util.last(storeTable), WriterOptions.DEFAULT, storageOptions);
  return createTableEntry.getWriter(null);
}
项目:ASCIIGenome    文件:Utils.java   
/** Split string x in tokens. Effectively just a friendly wrapper around StrTokenizer.
 * Use *single* quotes for avoiding splitting. 
 */
public static ArrayList<String> tokenize(String x, String delimiterString){

    if(x == null){
        return null;
    }

    // This is a hack to allow empty tokens to be passed at the command line. 
    // An empty 
    x= x.replace("''", "' '");

    // See also http://stackoverflow.com/questions/38161437/inconsistent-behaviour-of-strtokenizer-to-split-string
    StrTokenizer str= new StrTokenizer(x);
    str.setTrimmerMatcher(StrMatcher.spaceMatcher());
    str.setDelimiterString(delimiterString);
    str.setQuoteChar('\'');
    // str.setIgnoreEmptyTokens(false);
    ArrayList<String> tokens= (ArrayList<String>) str.getTokenList();
    for(int i= 0; i < tokens.size(); i++){
        String tok= tokens.get(i).trim();
        tokens.set(i, tok);
    }
    return tokens;

}
项目:owsi-core-parent    文件:FieldPath.java   
public static final FieldPath fromString(String string) {
    if (StringUtils.isBlank(string)) {
        return null;
    } else {
        List<FieldPathComponent> components = Lists.newLinkedList();
        StrTokenizer tokenizer = new StrTokenizer(string, DELIMITER_MATCHER);
        for (String token : tokenizer.getTokenList()) {
            if (ITEM_TOKEN.equals(token)) {
                components.add(FieldPathComponent.ITEM);
            } else {
                components.add(new FieldPathPropertyComponent(token));
            }
        }
        return new FieldPath(components);
    }
}
项目:dwca-io    文件:StrTokenizerPerformance.java   
private long test(StrTokenizer tokenizer, File source) throws IOException {
  FileInputStream fis = new FileInputStream(source);
  InputStreamReader reader = new InputStreamReader(fis, "utf8");
  BufferedReader br = new BufferedReader(reader);

  // keep track of time while iterating
  long start = System.currentTimeMillis();
  String row = br.readLine();
  while (row != null) {
    tokenizer.reset(row);
    String[] columns = tokenizer.getTokenArray();
    row = br.readLine();
  }
  long dur = System.currentTimeMillis() - start;
  br.close();
  return dur;
}
项目:dwca-io    文件:StrTokenizerTest.java   
@Test
public void testCsvUnquoted() throws IOException {
  StrTokenizer tokenizer = new StrTokenizer();
  tokenizer.setDelimiterString(",");
  tokenizer.setEmptyTokenAsNull(true);
  tokenizer.setIgnoreEmptyTokens(false);

  tokenizer.reset("121,432423, 9099053,Frieda karla L.,DC.,Ahrens");
  String[] columns = tokenizer.getTokenArray();
  assertEquals("121", columns[0]);
  assertEquals("432423", columns[1]);
  assertEquals(" 9099053", columns[2]);
  assertEquals("Frieda karla L.", columns[3]);
  assertEquals("DC.", columns[4]);
  assertEquals("Ahrens", columns[5]);

  tokenizer.reset(",,,,zzz  ");
  columns = tokenizer.getTokenArray();
  assertNull(columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);
}
项目:openshift-deployer-plugin    文件:DeployApplication.java   
private Map<String, String> parseEnvironmentVariables(final BuildListener listener) throws AbortException {
    Map<String, String> mapOfEnvironmentVariables = new HashMap<String, String>();

    for (String environmentVariable :
            new StrTokenizer(environmentVariables, spaceMatcher(), quoteMatcher()).getTokenList()) {

        if (environmentVariable.contains("=")) {
            String[] parts = environmentVariable.split("=", 2);
            mapOfEnvironmentVariables.put(parts[0], parts[1]);
        } else {
               abort(listener, "Invalid environment variable: " + environmentVariable);
           }
       }

    return mapOfEnvironmentVariables;
}
项目:stratos    文件:StratosApplication.java   
@Override
protected int executeCommand(String line) {
    String[] tokens = new StrTokenizer(line).getTokenArray();
    String action = tokens[0];
    String[] actionArgs = Arrays.copyOfRange(tokens, 1, tokens.length);
    if (logger.isDebugEnabled()) {
        logger.debug("Executing command action: {}, Tokens: {}", action, tokens.length);
    }
    Command<StratosCommandContext> command = commands.get(action);
    if (command == null) {
        System.out.println(action + ": command not found.");
        return CliConstants.COMMAND_FAILED;
    }
    try {
        return command.execute(context, actionArgs, new Option[0]);
    } catch (CommandException e) {
        if (logger.isErrorEnabled()) {
            logger.error("Error executing command: " + action, e);
        }
        return CliConstants.ERROR_CODE;
    }
}
项目:Machine-Learning-End-to-Endguide-for-Java-developers    文件:App.java   
public static void apacheCommonsTokenizer(String text){
    StrTokenizer tokenizer = new StrTokenizer(text,",");
    while (tokenizer.hasNext()) {
        out.println(tokenizer.next());
    }

}
项目:Java-Data-Science-Made-Easy    文件:App.java   
public static void apacheCommonsTokenizer(String text){
    StrTokenizer tokenizer = new StrTokenizer(text,",");
    while (tokenizer.hasNext()) {
        out.println(tokenizer.next());
    }

}
项目:Java-for-Data-Science    文件:App.java   
public static void apacheCommonsTokenizer(String text){
    StrTokenizer tokenizer = new StrTokenizer(text,",");
    while (tokenizer.hasNext()) {
        out.println(tokenizer.next());
    }

}
项目:dremio-oss    文件:SqlHandlerUtil.java   
/**
 * When enabled, add a writer rel on top of the given rel to catch the output and write to configured store table.
 * @param inputRel
 * @return
 */
public static Rel storeQueryResultsIfNeeded(final SqlParser.Config config, final QueryContext context,
                                            final Rel inputRel) {
  final OptionManager options = context.getOptions();
  final boolean storeResults = options.getOption(STORE_QUERY_RESULTS.getOptionName()) != null ?
      options.getOption(STORE_QUERY_RESULTS.getOptionName()).bool_val : false;

  if (!storeResults) {
    return inputRel;
  }

  // store query results as the system user
  final SchemaPlus systemUserSchema = context.getRootSchema(
      SchemaConfig.newBuilder(SystemUser.SYSTEM_USERNAME)
          .setProvider(context.getSchemaInfoProvider())
          .build());
  final String storeTablePath = options.getOption(QUERY_RESULTS_STORE_TABLE.getOptionName()).string_val;
  final List<String> storeTable =
      new StrTokenizer(storeTablePath, '.', config.quoting().string.charAt(0))
          .setIgnoreEmptyTokens(true)
          .getTokenList();

  final AbstractSchema schema = SchemaUtilities.resolveToMutableSchemaInstance(systemUserSchema,
      Util.skipLast(storeTable), true, MutationType.TABLE);

  // Query results are stored in arrow format. If need arises, we can change this to a configuration option.
  final Map<String, Object> storageOptions = ImmutableMap.<String, Object>of("type", ArrowFormatPlugin.ARROW_DEFAULT_NAME);

  final CreateTableEntry createTableEntry = schema.createNewTable(Util.last(storeTable), WriterOptions.DEFAULT, storageOptions);

  final RelTraitSet traits = inputRel.getCluster().traitSet().plus(Rel.LOGICAL);
  return new WriterRel(inputRel.getCluster(), traits, inputRel, createTableEntry, inputRel.getRowType());
}
项目:dremio-oss    文件:PathUtils.java   
/**
 * Convert fs path to list of strings.
 * /a/b/c -> [a,b,c]
 * @param fsPath a string
 * @return list of path components
 */
public static List<String> toPathComponents(String fsPath) {
  if (fsPath == null ) {
    return EMPTY_SCHEMA_PATHS;
  }

  final StrTokenizer tokenizer = new StrTokenizer(fsPath, SLASH_CHAR, SqlUtils.QUOTE).setIgnoreEmptyTokens(true);
  return tokenizer.getTokenList();
}
项目:rsterminology    文件:ParsedMedicationBuilder.java   
static String[] _tokenizeString(String string) {
    final StrTokenizer _tokenizer = new StrTokenizer().
            setDelimiterMatcher(StrMatcher.trimMatcher()).
            setQuoteMatcher(StrMatcher.quoteMatcher()).
            setTrimmerMatcher(StrMatcher.trimMatcher()).
            setIgnoredMatcher(StrMatcher.quoteMatcher());
    _tokenizer.reset(string.toLowerCase());
    return _tokenizer.getTokenArray();
}
项目:gvnix1    文件:FinderCommands.java   
@CliCommand(value = "finder list", help = "List all finders for a given target (must be an entity)")
public SortedSet<String> listFinders(
        @CliOption(key = "class", mandatory = false, unspecifiedDefaultValue = "*", optionContext = UPDATE_PROJECT, help = "The controller or entity for which the finders are generated") final JavaType typeName,
        @CliOption(key = { "", "depth" }, mandatory = false, unspecifiedDefaultValue = "1", specifiedDefaultValue = "1", help = "The depth of attribute combinations to be generated for the finders") final Integer depth,
        @CliOption(key = "filter", mandatory = false, help = "A comma separated list of strings that must be present in a filter to be included") final String filter) {

    Validate.isTrue(depth >= 1, "Depth must be at least 1");
    Validate.isTrue(depth <= 3, "Depth must not be greater than 3");

    final SortedSet<String> finders = finderOperations.listFindersFor(
            typeName, depth);
    if (StringUtils.isBlank(filter)) {
        return finders;
    }

    final Set<String> requiredEntries = new HashSet<String>();
    final String[] filterTokens = new StrTokenizer(filter, ",")
            .getTokenArray();
    for (final String requiredString : filterTokens) {
        requiredEntries.add(requiredString.toLowerCase());
    }
    if (requiredEntries.isEmpty()) {
        return finders;
    }

    final SortedSet<String> result = new TreeSet<String>();
    for (final String finder : finders) {
        required: for (final String requiredEntry : requiredEntries) {
            if (finder.toLowerCase().contains(requiredEntry)) {
                result.add(finder);
                break required;
            }
        }
    }
    return result;
}
项目:gvnix1    文件:AntPathMatcher.java   
/**
 * Given a pattern and a full path, determine the pattern-mapped part.
 * <p>
 * For example:
 * <ul>
 * <li>'<code>/docs/cvs/commit.html</code>' and '
 * <code>/docs/cvs/commit.html</code> -> ''</li>
 * <li>'<code>/docs/*</code>' and '<code>/docs/cvs/commit</code> -> '
 * <code>cvs/commit</code>'</li>
 * <li>'<code>/docs/cvs/*.html</code>' and '
 * <code>/docs/cvs/commit.html</code> -> '<code>commit.html</code>'</li>
 * <li>'<code>/docs/**</code>' and '<code>/docs/cvs/commit</code> -> '
 * <code>cvs/commit</code>'</li>
 * <li>'<code>/docs/**\/*.html</code>' and '
 * <code>/docs/cvs/commit.html</code> -> '<code>cvs/commit.html</code>'</li>
 * <li>'<code>/*.html</code>' and '<code>/docs/cvs/commit.html</code> -> '
 * <code>docs/cvs/commit.html</code>'</li>
 * <li>'<code>*.html</code>' and '<code>/docs/cvs/commit.html</code> -> '
 * <code>/docs/cvs/commit.html</code>'</li>
 * <li>'<code>*</code>' and '<code>/docs/cvs/commit.html</code> -> '
 * <code>/docs/cvs/commit.html</code>'</li>
 * </ul>
 * <p>
 * Assumes that {@link #match} returns <code>true</code> for '
 * <code>pattern</code>' and '<code>path</code>', but does
 * <strong>not</strong> enforce this.
 */
public String extractPathWithinPattern(final String pattern,
        final String path) {
    final String[] patternParts = new StrTokenizer(pattern, pathSeparator)
            .setIgnoreEmptyTokens(true).getTokenArray();
    final String[] pathParts = new StrTokenizer(path, pathSeparator)
            .setIgnoreEmptyTokens(true).getTokenArray();

    final StringBuilder builder = new StringBuilder();

    // Add any path parts that have a wildcarded pattern part.
    int puts = 0;
    for (int i = 0; i < patternParts.length; i++) {
        final String patternPart = patternParts[i];
        if ((patternPart.indexOf('*') > -1 || patternPart.indexOf('?') > -1)
                && pathParts.length >= i + 1) {
            if (puts > 0 || i == 0 && !pattern.startsWith(pathSeparator)) {
                builder.append(pathSeparator);
            }
            builder.append(pathParts[i]);
            puts++;
        }
    }

    // Append any trailing path parts.
    for (int i = patternParts.length; i < pathParts.length; i++) {
        if (puts > 0 || i > 0) {
            builder.append(pathSeparator);
        }
        builder.append(pathParts[i]);
    }

    return builder.toString();
}
项目:atsd-jdbc    文件:AtsdConnectionInfo.java   
private List<String> splitTablePatterns() {
    final String value = rawTables();
    if (value == null) {
        return Collections.emptyList();
    } else {
        return new StrTokenizer(value, ',', '"').getTokenList();
    }
}
项目:tstconfig    文件:Config.java   
public void parseKeyValue(String line) {
    int keySeparatorIndex = line.indexOf(keySeparator);
    String key;
    String valueString;
    if (keySeparatorIndex < 0) {
        if (keySeparatorOptional) {
            key = line.trim();
            valueString = "";
        } else {
            return;
        }
    } else {
        key = line.substring(0, keySeparatorIndex).trim();
        valueString = line.substring(
                keySeparatorIndex + keySeparator.length()
        ).trim();
    }

    String[] values;
    if (separator == null) {
        values = new String[]{valueString};
    } else {
        StrTokenizer tokenizer = createStrTokenizer(valueString);
        values = tokenizer.getTokenArray();
    }

    String[] result = new String[values.length + 1];
    result[0] = key;
    System.arraycopy(values, 0, result, 1, values.length);

    storeLine(result);
}
项目:dwca-io    文件:StrTokenizerTest.java   
@Test
public void testCsvQuoted() throws IOException {
  StrTokenizer tokenizer = new StrTokenizer();
  tokenizer.setDelimiterString(",");
  tokenizer.setQuoteChar('"');
  tokenizer.setEmptyTokenAsNull(true);
  tokenizer.setIgnoreEmptyTokens(false);

  tokenizer.reset("121,432423, 9099053,\"Frieda karla L.,DC.\",Ahrens");
  String[] columns = tokenizer.getTokenArray();
  assertEquals("121", columns[0]);
  assertEquals("432423", columns[1]);
  assertEquals(" 9099053", columns[2]);
  assertEquals("Frieda karla L.,DC.", columns[3]);
  assertEquals("Ahrens", columns[4]);

  tokenizer.reset("   ,4321");
  columns = tokenizer.getTokenArray();
  assertEquals("   ", columns[0]);
  assertEquals("4321", columns[1]);

  tokenizer.reset(" ,,,,zzz  ");
  columns = tokenizer.getTokenArray();
  assertEquals(" ", columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);

  tokenizer.reset(",,,,zzz  ");
  columns = tokenizer.getTokenArray();
  assertNull(columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);
}
项目:dwca-io    文件:StrTokenizerTest.java   
@Test
public void testPipes() throws IOException {
  StrTokenizer tokenizer = new StrTokenizer();
  tokenizer.setDelimiterChar('|');
  tokenizer.setQuoteChar('"');
  tokenizer.setEmptyTokenAsNull(true);
  tokenizer.setIgnoreEmptyTokens(false);

  tokenizer.reset("121|432423| 9099053|\"Frieda karla L.|DC.\"|Ahrens");
  String[] columns = tokenizer.getTokenArray();
  assertEquals("121", columns[0]);
  assertEquals("432423", columns[1]);
  assertEquals(" 9099053", columns[2]);
  assertEquals("Frieda karla L.|DC.", columns[3]);
  assertEquals("Ahrens", columns[4]);

  tokenizer.reset("   |4321");
  columns = tokenizer.getTokenArray();
  assertEquals("   ", columns[0]);
  assertEquals("4321", columns[1]);

  tokenizer.reset(" ||||zzz  ");
  columns = tokenizer.getTokenArray();
  assertEquals(" ", columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);

  tokenizer.reset("||||zzz  ");
  columns = tokenizer.getTokenArray();
  assertNull(columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);
}
项目:dwca-io    文件:StrTokenizerTest.java   
@Test
public void testTabQuoted() throws IOException {
  StrTokenizer tokenizer = new StrTokenizer();
  tokenizer.setDelimiterString("\t");
  tokenizer.setQuoteChar('"');
  tokenizer.setEmptyTokenAsNull(true);
  tokenizer.setIgnoreEmptyTokens(false);

  tokenizer.reset("121\t432423\t 9099053\t\"Frieda karla L.,DC.\"\tAhrens");
  String[] columns = tokenizer.getTokenArray();
  assertEquals("121", columns[0]);
  assertEquals("432423", columns[1]);
  assertEquals(" 9099053", columns[2]);
  assertEquals("Frieda karla L.,DC.", columns[3]);
  assertEquals("Ahrens", columns[4]);

  tokenizer.reset("   \t4321");
  columns = tokenizer.getTokenArray();
  assertEquals("   ", columns[0]);
  assertEquals("4321", columns[1]);

  tokenizer.reset(" \t\t\t\tzzz  ");
  columns = tokenizer.getTokenArray();
  assertEquals(" ", columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);

  tokenizer.reset("\t\t\t\tzzz  ");
  columns = tokenizer.getTokenArray();
  assertNull(columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);
}
项目:dwca-io    文件:StrTokenizerTest.java   
@Test
public void testTabUnquoted() throws IOException {
  StrTokenizer tokenizer = new StrTokenizer();
  tokenizer.setDelimiterString("\t");
  tokenizer.setEmptyTokenAsNull(true);
  tokenizer.setIgnoreEmptyTokens(false);

  tokenizer.reset("121\t432423\t 9099053\t\"Frieda karla L.,DC.\"\tAhrens");
  String[] columns = tokenizer.getTokenArray();
  assertEquals("121", columns[0]);
  assertEquals("432423", columns[1]);
  assertEquals(" 9099053", columns[2]);
  assertEquals("\"Frieda karla L.,DC.\"", columns[3]);
  assertEquals("Ahrens", columns[4]);

  tokenizer.reset("   \t4321");
  columns = tokenizer.getTokenArray();
  assertEquals("   ", columns[0]);
  assertEquals("4321", columns[1]);

  tokenizer.reset(" \t\t\t\tzzz  ");
  columns = tokenizer.getTokenArray();
  assertEquals(" ", columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);

  tokenizer.reset("\t\t\t\tzzz  ");
  columns = tokenizer.getTokenArray();
  assertNull(columns[0]);
  assertNull(columns[1]);
  assertNull(columns[2]);
  assertNull(columns[3]);
  assertEquals("zzz  ", columns[4]);
}
项目:cs-actions    文件:ProcessExecutor.java   
private List<String> processCommand(String commandLine) {
    List<String> command = new ArrayList<>(arguments);
    if(!isEmpty(commandLine)) {
        command.addAll(new StrTokenizer(commandLine, ',', '"').getTokenList());
    }

    return command;
}
项目:jfunk    文件:CsvDataSource.java   
/**
 * Loads the CSV file from the file system.
 */
public void load() throws IOException {
    lines = Lists.newLinkedList();
    headers = null;

    StrTokenizer st = StrTokenizer.getCSVInstance();
    st.setDelimiterChar(';');

    // Default encoding is used (--> UTF-8).
    BufferedReader br = null;
    try {
        br = new BufferedReader(new FileReader(fileName));
        for (String line = null; (line = br.readLine()) != null;) {
            String trimmedLine = StringUtils.trimToNull(line);
            if (trimmedLine == null || trimmedLine.startsWith("#")) {
                continue;
            }
            st.reset(line);
            ArrayList<String> tokens = Lists.newArrayList(st.getTokenArray());
            if (headers == null) {
                headers = tokens;
            } else {
                lines.add(tokens);
            }
        }
    } finally {
        IOUtils.closeQuietly(br);
    }
}
项目:jfunk    文件:CsvDataProcessor.java   
private List<Column> initColumns(final StrTokenizer st, final String headerLine) {
    st.reset(headerLine);

    String[] headers = st.getTokenArray();
    List<Column> columns = newArrayListWithCapacity(headers.length);
    for (String header : headers) {
        columns.add(new Column(header));
    }
    return columns;
}
项目:perfload-perfalyzer    文件:EmailReporter.java   
private List<? extends List<String>> loadData(final File file) throws IOException {
    try (BufferedReader br = newReader(file, Charsets.UTF_8)) {
        List<List<String>> rows = newArrayList();
        StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
        tokenizer.setDelimiterChar(DELIMITER);

        for (String line; (line = br.readLine()) != null; ) {
            tokenizer.reset(line);
            List<String> tokenList = tokenizer.getTokenList();
            rows.add(tokenList);
        }

        return rows;
    }
}
项目:perfload-perfalyzer    文件:PerfAlyzer.java   
private void extractFilesForMarkers() {
    if (!markers.isEmpty()) {
        listPerfAlyzerFiles(normalizedDir).stream().filter(perfAlyzerFile -> {
            // GC logs cannot split up here and need to explicitly handle
            // markers later.
            // Load profiles contains the markers themselves and thus need
            // to be filtered out as well.
            String fileName = perfAlyzerFile.getFile().getName();
            return !fileName.contains("gclog") & !fileName.contains("[loadprofile]");
        }).forEach(perfAlyzerFile -> markers.forEach(marker -> {
            PerfAlyzerFile markerFile = perfAlyzerFile.copy();
            markerFile.setMarker(marker.getName());
            Path destPath = normalizedDir.toPath().resolve(markerFile.getFile().toPath());

            try (WritableByteChannel destChannel = newByteChannel(destPath, CREATE, WRITE)) {
                Path srcPath = normalizedDir.toPath().resolve(perfAlyzerFile.getFile().toPath());
                StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
                tokenizer.setDelimiterChar(';');
                try (Stream<String> lines = Files.lines(srcPath, UTF_8);) {
                    lines.filter(line -> {
                        try {
                            tokenizer.reset(line);
                            String timestampString = tokenizer.nextToken();
                            long timestamp = Long.parseLong(timestampString);
                            return marker.getLeftMillis() <= timestamp && marker.getRightMillis() > timestamp;
                        } catch (NumberFormatException ex) {
                            LOG.error("Invalid data line: {}", line);
                            return false;
                        }
                    }).forEach(line -> writeLineToChannel(destChannel, line, UTF_8));
                }
            } catch (IOException e) {
                throw new UncheckedIOException(e);
            }
        }));
    }
}
项目:perfload-perfalyzer    文件:PerfAlyzerUtils.java   
public static Map<String, String> readAggregatedMap(final File executionsFile, final Charset charset) throws IOException {
    final StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
    tokenizer.setDelimiterChar(';');

    Map<String, String> result = newHashMapWithExpectedSize(11);

    List<String> lines = Files.readLines(executionsFile, charset);
    String[] headers = null;

    for (String line : lines) {
        tokenizer.reset(line);
        String[] tokens = tokenizer.getTokenArray();

        if (headers == null) {
            headers = tokens;
        } else {

            String[] data = tokenizer.getTokenArray();

            String operation = data[0];
            for (int i = 1; i < headers.length; ++i) {
                result.put(operation + "." + headers[i], data[i]);
            }
        }
    }

    return result;
}
项目:perfload-core    文件:XmlConfigReader.java   
private ListMultimap<ProcessKey, LoadProfileEvent> readLoadProfileEvents(final Element testplan) throws IOException {
    ListMultimap<ProcessKey, LoadProfileEvent> eventsByProcess = ArrayListMultimap.create();
    String loadProfile = testplan.elementTextTrim("loadProfile");

    // relative to testplan
    File loadProfileConfigFile = new File(new File(testplanFile.getParentFile(), "loadprofiles"), loadProfile);

    try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(loadProfileConfigFile), "UTF-8"))) {
        StrTokenizer st = StrTokenizer.getCSVInstance();
        st.setDelimiterChar(';');

        for (String line = null; (line = br.readLine()) != null;) {
            // ignore line that are blank, commented out, or represent markers
            if (isBlank(line) || startsWith(line, "#") || MARKER_PATTERN.matcher(line).matches()) {
                continue;
            }

            st.reset(line);
            String[] tokens = st.getTokenArray();

            long startTime = Long.parseLong(tokens[0]);
            String operation = tokens[1];
            String target = tokens[2];
            int daemonId = Integer.parseInt(tokens[3]);
            int processId = Integer.parseInt(tokens[4]);

            eventsByProcess.put(new ProcessKey(daemonId, processId), new LoadProfileEvent(startTime, operation, target,
                    daemonId, processId));
        }
    }

    return eventsByProcess;
}
项目:dremio-oss    文件:SubSchemaWrapper.java   
/**
 * Parse the top level schema name back into subschema path.
 * @param topLevelSchemaName
 * @return
 */
public static List<String> toSubSchemaPath(String topLevelSchemaName) {
  return new StrTokenizer(topLevelSchemaName, '.', '\'')
      .setIgnoreEmptyTokens(true)
      .getTokenList();
}
项目:dremio-oss    文件:SqlUtils.java   
/**
 * Parse the schema path into a list of schema entries.
 * @param schemaPath
 * @return
 */
public static List<String> parseSchemaPath(String schemaPath) {
  return new StrTokenizer(schemaPath, '.', SqlUtils.QUOTE)
      .setIgnoreEmptyTokens(true)
      .getTokenList();
}
项目:gvnix1    文件:ControllerCommands.java   
@CliCommand(value = "web mvc scaffold", help = "Create a new scaffold Controller (ie where Roo maintains CRUD functionality automatically)")
public void webMvcScaffold(
        @CliOption(key = { "class", "" }, mandatory = true, help = "The path and name of the controller object to be created") final JavaType controller,
        @CliOption(key = "backingType", mandatory = false, optionContext = PROJECT, unspecifiedDefaultValue = "*", help = "The name of the form backing type which the controller exposes to the web tier") final JavaType backingType,
        @CliOption(key = "path", mandatory = false, help = "The base path under which the controller listens for RESTful requests (defaults to the simple name of the form backing object)") String path,
        @CliOption(key = "disallowedOperations", mandatory = false, help = "A comma separated list of operations (only create, update, delete allowed) that should not be generated in the controller") final String disallowedOperations) {

    final ClassOrInterfaceTypeDetails cid = typeLocationService
            .getTypeDetails(backingType);
    if (cid == null) {
        LOGGER.warning("The specified entity can not be resolved to a type in your project");
        return;
    }

    if (controller.getSimpleTypeName().equalsIgnoreCase(
            backingType.getSimpleTypeName())) {
        LOGGER.warning("Controller class name needs to be different from the class name of the form backing object (suggestion: '"
                + backingType.getSimpleTypeName() + "Controller')");
        return;
    }

    final Set<String> disallowedOperationSet = new HashSet<String>();
    if (!"".equals(disallowedOperations)) {
        final String[] disallowedOperationsTokens = new StrTokenizer(
                disallowedOperations, ",").getTokenArray();
        for (final String operation : disallowedOperationsTokens) {
            if (!("create".equals(operation) || "update".equals(operation) || "delete"
                    .equals(operation))) {
                LOGGER.warning("-disallowedOperations options can only contain 'create', 'update', 'delete': -disallowedOperations update,delete");
                return;
            }
            disallowedOperationSet.add(operation.toLowerCase());
        }
    }

    if (StringUtils.isBlank(path)) {
        final LogicalPath targetPath = PhysicalTypeIdentifier.getPath(cid
                .getDeclaredByMetadataId());
        final PluralMetadata pluralMetadata = (PluralMetadata) metadataService
                .get(PluralMetadata.createIdentifier(backingType,
                        targetPath));
        Validate.notNull(pluralMetadata,
                "Could not determine plural for '%s'",
                backingType.getSimpleTypeName());
        path = pluralMetadata.getPlural().toLowerCase();
    }
    else if (path.equals("/") || path.equals("/*")) {
        LOGGER.warning("Your application already contains a mapping to '/' or '/*' by default. Please provide a different path.");
        return;
    }
    else if (path.startsWith("/")) {
        path = path.substring(1);
    }

    controllerOperations.createAutomaticController(controller, backingType,
            disallowedOperationSet, path);
}
项目:tstconfig    文件:Config.java   
public void parseTokenized(String line) {
    StrTokenizer tokenizer = createStrTokenizer(line);
    String[] tokens = tokenizer.getTokenArray();
    storeLine(tokens);
}
项目:tstconfig    文件:Config.java   
public StrTokenizer createStrTokenizer(String valueString) {
    StrTokenizer tokenizer = new StrTokenizer(valueString, separator);
    tokenizer.setIgnoreEmptyTokens(ignoreEmptyTokens);
    tokenizer.setQuoteChar(quoteChar);
    return tokenizer;
}
项目:stratos    文件:CommandCompleter.java   
@Override
public int complete(String buffer, int cursor, List<CharSequence> candidates) {

    if (buffer.contains(CliConstants.RESOURCE_PATH_LONG_OPTION)) {
        return fileNameCompleter.complete(buffer, cursor, candidates);
    }
    if (logger.isTraceEnabled()) {
        logger.trace("Buffer: {}, cursor: {}", buffer, cursor);
        logger.trace("Candidates {}", candidates);
    }
    if (StringUtils.isNotBlank(buffer)) {
        // User is typing a command
        StrTokenizer strTokenizer = new StrTokenizer(buffer);
        String action = strTokenizer.next();
        Collection<String> arguments = argumentMap.get(action);
        if (arguments != null) {
            if (logger.isTraceEnabled()) {
                logger.trace("Arguments found for {}, Tokens: {}", action, strTokenizer.getTokenList());
                logger.trace("Arguments for {}: {}", action, arguments);
            }
            List<String> args = new ArrayList<String>(arguments);
            List<Completer> completers = new ArrayList<Completer>();
            for (String token : strTokenizer.getTokenList()) {
                boolean argContains = arguments.contains(token);
                if (token.startsWith("-") && !argContains) {
                    continue;
                }
                if (argContains) {
                    if (logger.isTraceEnabled()) {
                        logger.trace("Removing argument {}", token);
                    }
                    args.remove(token);
                }
                completers.add(new StringsCompleter(token));
            }
            completers.add(new StringsCompleter(args));
            Completer completer = new ArgumentCompleter(completers);
            return completer.complete(buffer, cursor, candidates);
        } else if (CliConstants.HELP_ACTION.equals(action)) {
            // For help action, we need to display available commands as arguments
            return helpCommandCompleter.complete(buffer, cursor, candidates);
        }
    }
    if (logger.isTraceEnabled()) {
        logger.trace("Using Default Completer...");
    }
    return defaultCommandCompleter.complete(buffer, cursor, candidates);
}
项目:jfunk    文件:CsvDataProcessor.java   
/**
 * Processes the specified CSV file. For every line but the header line (which is required), the
 * specified command is executed.
 * 
 * @param reader
 *            the reader for loading the CSV data
 * @param delimiter
 *            the column separator
 * @param quoteChar
 *            the quote character ('\0' for no quoting)
 * @param command
 *            the command (i. e. a Groovy closure if used in a Groovy script) to be executed for
 *            every processed line
 */
public void processFile(final Reader reader, final String delimiter, final char quoteChar, final Runnable command) {
    try {
        List<String> inputLines = CharStreams.readLines(reader);

        StrTokenizer st = StrTokenizer.getCSVInstance();
        st.setDelimiterString(delimiter);
        if (quoteChar != '\0') {
            st.setQuoteChar(quoteChar);
        } else {
            st.setQuoteMatcher(StrMatcher.noneMatcher());
        }

        // extract header
        String headerLine = inputLines.remove(0);
        List<Column> columns = initColumns(st, headerLine);
        for (String line : inputLines) {
            st.reset(line);
            String[] colArray = st.getTokenArray();
            int len = colArray.length;
            checkState(len == columns.size(), "Mismatch between number of header columns and number of line columns.");

            DataSource dataSource = dataSourceProvider.get();
            Configuration config = configProvider.get();
            for (int i = 0; i < len; ++i) {
                String value = StringUtils.trimToEmpty(colArray[i]);

                String dataSetKey = columns.get(i).dataSetKey;
                String key = columns.get(i).key;
                if (dataSetKey != null) {
                    if ("<auto>".equals(value)) {
                        dataSource.resetFixedValue(dataSetKey, key);
                    } else {
                        log.debug("Setting data set entry for " + this + " to value=" + value);
                        dataSource.setFixedValue(dataSetKey, key, value);
                    }
                } else {
                    log.debug("Setting property for " + this + " to value=" + value);
                    config.put(key, value);
                }
            }

            command.run();
        }
    } catch (IOException ex) {
        throw new JFunkException("Error processing CSV data", ex);
    }
}
项目:crfsuite4j    文件:CrfSuiteTraining.java   
@Override
protected void appendTrainingData(CrfSuiteTrainer trainer) throws IOException {
    BufferedReader inReader = trainingDataReader instanceof BufferedReader
            ? (BufferedReader) trainingDataReader
            : new BufferedReader(trainingDataReader);
    // parse training data
    log.info("Parsing training data...");
    String line;
    int lineNumber = 0;
    List<List<Attribute>> items = newList();
    List<String> labels = newList();
    int instancesCounter = 0;
    while ((line = inReader.readLine()) != null) {
        lineNumber++;
        if (line.isEmpty()) {
            if (items.size() != labels.size()) {
                throw new IllegalStateException();
            }
            if (items.isEmpty()) {
                log.warn("Empty instance at line {}", lineNumber);
            } else {
                trainer.append(items, labels, 0);
                instancesCounter++;
            }
            items = newList();
            labels = newList();
        } else {
            StrTokenizer fSplitter = getFeatureSplitter(line);
            if (!fSplitter.hasNext()) {
                log.warn("Empty item at line {}", lineNumber);
                continue;
            }
            String label = fSplitter.next();
            List<Attribute> features = toAttributes(fSplitter, lineNumber);
            labels.add(label);
            items.add(features);
        }
    }
    // add last instance if any
    if (items.size() != labels.size()) {
        throw new IllegalStateException();
    }
    if (!items.isEmpty()) {
        trainer.append(items, labels, 0);
        instancesCounter++;
        items = null;
        labels = null;
    }
    // report
    log.info("{} instances have been read", instancesCounter);
}
项目:crfsuite4j    文件:CrfSuiteTraining.java   
private static StrTokenizer getFeatureSplitter(String src) {
    StrTokenizer result = new StrTokenizer(src);
    result.setDelimiterChar('\t');
    result.setIgnoreEmptyTokens(true);
    return result;
}
项目:perfload-perfalyzer    文件:BinnedFilesMerger.java   
public void mergeFiles() throws IOException {
    if (!inputDir.isDirectory()) {
        throw new IllegalArgumentException("The input File must be a directory");
    }

    StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
    tokenizer.setDelimiterChar(DELIMITER);
    Map<String, FileChannel> destChannels = newHashMap();
    List<OutputStream> outputStreams = newArrayList();
    File[] filesInInputDirectory = inputDir.listFiles();

    try {
        for (File file : filesInInputDirectory) {
            FileInputStream fis = null;
            try {
                fis = new FileInputStream(file);
                for (Scanner scanner = new Scanner(fis, Charsets.UTF_8.name()); scanner.hasNext();) {
                    String line = scanner.nextLine();
                    tokenizer.reset(line);

                    List<String> tokenList = tokenizer.getTokenList();
                    String key = tokenList.get(sortCriteriaColumn);
                    FileChannel destChannel = destChannels.get(key);
                    if (destChannel == null) {
                        FileOutputStream fos = new FileOutputStream(new File(outputDir, FILE_TYPE + "_" + key + ".out"));
                        outputStreams.add(fos);
                        destChannel = fos.getChannel();
                        destChannels.put(key, destChannel);

                        //Write the Header...... Has to be improved
                        IoUtilities.writeLineToChannel(destChannel, getHeader(), Charsets.UTF_8);
                    }

                    StrBuilder outputLine = new StrBuilder();
                    for (String s : tokenList) {
                        StrBuilderUtils.appendEscapedAndQuoted(outputLine, DELIMITER, s);
                    }
                    IoUtilities.writeLineToChannel(destChannel, outputLine.toString(), Charsets.UTF_8);
                }
            } finally {
                closeQuietly(fis);
            }
        }
    } finally {
        outputStreams.forEach(IOUtils::closeQuietly);
    }

}
项目:perfload-perfalyzer    文件:PerfAlyzerUtils.java   
/**
 * Reads a semicolon-delimited CSV file into a list. Each line in the result list will be
 * another list of {@link Number} objects. The file is expected to have two numberic columns
 * which are parsed using the specified number format.
 * 
 * @param file
 *            the file
 * @param charset
 *            the character set to read the file
 * @param numberFormat
 *            the number format for parsing the column values
 * @return the immutable result list
 */
public static List<SeriesPoint> readDataFile(final File file, final Charset charset, final NumberFormat numberFormat)
        throws IOException {
    final StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
    tokenizer.setDelimiterChar(';');

    try (BufferedReader br = newReader(file, charset)) {
        boolean headerLine = true;
        List<SeriesPoint> result = newArrayListWithExpectedSize(200);

        for (String line; (line = br.readLine()) != null;) {
            try {
                if (headerLine) {
                    headerLine = false;
                } else {
                    tokenizer.reset(line);
                    String[] tokens = tokenizer.getTokenArray();
                    double x = numberFormat.parse(tokens[0]).doubleValue();
                    double y = numberFormat.parse(tokens[1]).doubleValue();

                    if (!result.isEmpty()) {
                        // additional point for histogram
                        SeriesPoint previousPoint = getLast(result);
                        result.add(new SeriesPoint(x, previousPoint.getY()));
                    }
                    tokenizer.reset(line);
                    result.add(new SeriesPoint(x, y));
                }
            } catch (ParseException ex) {
                throw new IOException("Error parsing number in file: " + file, ex);
            }
        }

        int size = result.size();
        if (size > 2) {
            // additional point at end for histogram
            SeriesPoint nextToLast = result.get(size - 3);
            SeriesPoint last = result.get(size - 1);
            double dX = last.getX().doubleValue() - nextToLast.getX().doubleValue();
            result.add(new SeriesPoint(last.getX().doubleValue() + dX, last.getY()));
        }
        return ImmutableList.copyOf(result);
    }
}
项目:perfload-perfalyzer    文件:PerfAlyzerUtils.java   
/**
 * Reads a semicolon-delimited CSV file into a map of lists series values. Values for each
 * column are return as list of lists in the map, the key being the column header.
 * 
 * @param file
 *            the file
 * @param charset
 *            the character set to read the file
 * @param numberFormat
 *            the number format for formatting the column values
 * @param columnNames
 *            the columns to consider
 * 
 * @return an immutable map of lists of series values
 */
public static Map<String, List<SeriesPoint>> readDataFile(final File file, final Charset charset,
        final NumberFormat numberFormat,
        final Set<String> columnNames) throws IOException {
    final StrTokenizer tokenizer = StrTokenizer.getCSVInstance();
    tokenizer.setDelimiterChar(';');

    return readLines(file, charset, new LineProcessor<Map<String, List<SeriesPoint>>>() {
        private String[] headers;
        private final Map<String, List<SeriesPoint>> result = newHashMapWithExpectedSize(4);
        private int colCount;

        @Override
        public boolean processLine(final String line) throws IOException {
            try {
                tokenizer.reset(line);
                String[] tokens = tokenizer.getTokenArray();

                if (headers == null) {
                    headers = tokens;
                    colCount = tokens.length;
                } else {
                    Integer counter = Integer.valueOf(tokens[0]);
                    for (int i = 1; i < colCount; ++i) {
                        String header = headers[i];
                        if (columnNames.contains(header)) {
                            List<SeriesPoint> colValues = result.get(header);
                            if (colValues == null) {
                                colValues = newArrayListWithExpectedSize(50);
                                result.put(header, colValues);
                            }
                            colValues.add(new SeriesPoint(counter, numberFormat.parse(tokens[i])));
                        }
                    }
                }
                return true;
            } catch (ParseException ex) {
                throw new IOException("Error parsing number in file: " + file, ex);
            }
        }

        @Override
        public Map<String, List<SeriesPoint>> getResult() {
            return ImmutableMap.copyOf(result);
        }
    });
}