Java 类com.fasterxml.jackson.databind.SequenceWriter 实例源码

项目:csvsum    文件:CSVUpload.java   
static void dumpTable(String tableName, Writer output, Connection conn) throws IOException, SQLException {
    final String sql = "SELECT * FROM \"" + tableName + "\"";
    try (final Statement dumpStatement = conn.createStatement();
            final ResultSet results = dumpStatement.executeQuery(sql);) {
        final ResultSetMetaData metadata = results.getMetaData();
        final int columnCount = metadata.getColumnCount();
        final List<String> columnNames = new ArrayList<>(columnCount);
        for (int i = 1; i <= columnCount; i++) {
            columnNames.add(metadata.getColumnLabel(i));
        }
        final Writer writer = output;
        final SequenceWriter csvWriter = CSVStream.newCSVWriter(writer, columnNames);
        final List<String> nextResult = new ArrayList<>(columnCount);
        while (results.next()) {
            for (int i = 1; i <= columnCount; i++) {
                nextResult.add(i - 1, results.getString(i));
            }
            csvWriter.write(nextResult);
            nextResult.clear();
        }
    }
}
项目:csvsum    文件:AccessMapper.java   
private static void debugTable(Table table, SequenceWriter columnCsv) throws IOException {

        System.out.println("\tTable columns for " + table.getName());

        try {
            for (Column nextColumn : table.getColumns()) {
                System.out.println("\t\t" + nextColumn.getName());
                columnCsv.write(Arrays.asList(table.getName() + "." + nextColumn.getName(),
                        table.getName() + "." + nextColumn.getName(), "", ""));
            }

            Index primaryKeyIndex = table.getPrimaryKeyIndex();
            System.out.println(
                    "\tFound primary key index for table: " + table.getName() + " named " + primaryKeyIndex.getName());
            debugIndex(primaryKeyIndex, new HashSet<>(), columnCsv);

            for (Index nextIndex : table.getIndexes()) {
                if (!nextIndex.getName().equals(primaryKeyIndex.getName())) {
                    System.out.println("\tFound non-primary key index for table: " + table.getName() + " named "
                            + nextIndex.getName());
                    debugIndex(nextIndex, new HashSet<>(), null);
                }
            }
        } catch (IllegalArgumentException e) {
            System.out.println("No primary key index found for table: " + table.getName());
        }

        Cursor cursor = table.getDefaultCursor();
        int i = 0;
        while (cursor.moveToNextRow()) {
            if (i >= 5) {
                break;
            }
            System.out.println(cursor.getCurrentRow().toString());
            i++;
        }
    }
项目:csvsum    文件:AccessMapper.java   
private static void debugIndex(Index index, Set<Index> visited, SequenceWriter csvWriter) throws IOException {
    visited.add(index);
    System.out.println("\t\tIndex columns:");
    StringBuilder columnList = new StringBuilder();
    for (Index.Column nextColumn : index.getColumns()) {
        System.out.print("\t\t\t" + nextColumn.getName());

        if (columnList.length() > 0) {
            columnList.append(",");
        }
        columnList.append(index.getTable().getName() + "." + nextColumn.getName());
    }
    if (csvWriter != null) {
        csvWriter.write(
                Arrays.asList(columnList.toString(), columnList.toString(), "Access", columnList.toString()));
    }

    System.out.println("");
    Index referencedIndex = index.getReferencedIndex();
    if (referencedIndex != null) {
        System.out.println("\t" + index.getName() + " references another index: " + referencedIndex.getName());
        if (!visited.contains(referencedIndex)) {
            visited.add(referencedIndex);
            debugIndex(referencedIndex, visited, null);
        }

    }

}
项目:csvsum    文件:CSVUtilTest.java   
@Test
public final void testWriteFullCode()
    throws Exception
{
    List<String> headers = Arrays.asList("TestHeader1", "TestHeader2");
    List<List<String>> dataSource = Arrays.asList();
    // Or alternatively,
    // List<List<String>> dataSource = Arrays.asList(Arrays.asList("TestValue1", "TestValue2"));
    java.io.Writer writer = new StringWriter();
    CsvSchema.Builder builder = CsvSchema.builder();
    for (String nextHeader : headers) {
        builder = builder.addColumn(nextHeader);
    }
    CsvSchema schema = builder.setUseHeader(true).build();
    try (SequenceWriter csvWriter = new CsvMapper().writerWithDefaultPrettyPrinter().with(schema).forType(
            List.class).writeValues(writer);)
    {
        for (List<String> nextRow : dataSource) {
            csvWriter.write(nextRow);
        }
        // Check to see whether dataSource is empty 
        // and if so write a single empty list to trigger header output
        if (dataSource.isEmpty()) {
            csvWriter.write(Arrays.asList());
        }
    }
    System.out.println(writer.toString());
}
项目:asakusafw-compiler    文件:Persistent.java   
static <T> void write(Class<T> type, Iterable<? extends T> elements, OutputStream output) throws IOException {
    ObjectMapper mapper = new ObjectMapper()
            .setSerializationInclusion(JsonInclude.Include.NON_ABSENT);
    try (SequenceWriter writer = mapper.writerFor(type).writeValues(output).init(false)) {
        writer.writeAll(elements);
    }
}
项目:teavm-libgdx    文件:AssetsCopier.java   
private void writeJsonFS(OutputStream output) throws IOException {
    SequenceWriter seqWriter = writer.writeValues(output);
    boolean first = true;
    output.write((byte)'[');
    for (FileDescriptor desc : rootFileDescriptor.getChildFiles()) {
        if (!first) {
            output.write((byte)',');
        }
        first = false;
        seqWriter.write(desc);
    }
    output.write((byte)']');
    seqWriter.flush();
}
项目:handycapper    文件:Splits.java   
public static String createCSV(final CsvMapper csvMapper,
        final RaceResult raceResult) throws IOException {
    CsvSchema schema =
            CsvSchema.builder().addColumns(getColumns(raceResult)).build().withHeader();

    List<Map<String, Object>> rows = new ArrayList<>();

    List<Starter> starters = raceResult.getStarters();
    if (starters != null && !starters.isEmpty()) {
        for (Starter starter : starters) {
            Map<String, Object> row = new LinkedHashMap<>();
            row.put("date", raceResult.getRaceDate().toString());
            row.put("track", raceResult.getTrack().getCode());
            row.put("raceNumber", raceResult.getRaceNumber());
            row.put("name", starter.getHorse().getName());
            row.put("pp", starter.getPostPosition());
            row.put("weight", starter.getWeight().getWeightCarried());
            row.put("odds", TWO_DECIMALS.format(starter.getOdds()));
            row.put("position", starter.getOfficialPosition());

            List<FractionalPoint.Split> splits = starter.getSplits();
            for (int i = 0; i < splits.size(); i++) {
                FractionalPoint.Split split = splits.get(i);
                if (split.hasFractionalValue()) {
                    row.put(split.getText(),
                            THREE_DECIMALS.format(split.getMillis() / (double) 1000));
                }
            }

            rows.add(row);
        }
    }

    try (StringWriter stringWriter = new StringWriter()) {
        try (SequenceWriter sequenceWriter =
                     csvMapper.writer(schema).writeValues(stringWriter).writeAll(rows)) {
            sequenceWriter.flush();
        }
        return stringWriter.toString();
    }
}
项目:handycapper    文件:Summary.java   
public static String createCSV(final CsvMapper csvMapper,
        final List<RaceResult> raceResults) throws IOException {
    if (raceResults != null && !raceResults.isEmpty()) {
        RaceResult result = raceResults.get(0);
        CsvSchema schema =
                CsvSchema.builder().addColumns(getColumns(result)).build().withHeader();

        List<Map<String, Object>> rows = new ArrayList<>();

        for (RaceResult raceResult : raceResults) {
            Map<String, Object> row = new LinkedHashMap<>();
            row.put("date", raceResult.getRaceDate().toString());
            row.put("track", raceResult.getTrack().getCode());
            row.put("raceNumber", raceResult.getRaceNumber());

            RaceTypeNameBlackTypeBreed typeBreedName =
                    raceResult.getRaceConditions().getRaceTypeNameBlackTypeBreed();
            row.put("breed", typeBreedName.getBreed());
            row.put("type", typeBreedName.getType());
            row.put("name", typeBreedName.getName());

            row.put("distance", TWO_DECIMALS.format((double) raceResult
                    .getDistanceSurfaceTrackRecord().getRaceDistance().getFeet() / 660));
            row.put("surface", raceResult.getDistanceSurfaceTrackRecord().getSurface());
            row.put("trackCondition",
                    raceResult.getDistanceSurfaceTrackRecord().getTrackCondition());
            row.put("runners", raceResult.getNumberOfRunners());

            Starter winner = raceResult.getWinners().get(0);
            row.put("winner", winner.getHorse().getName());
            row.put("pp", winner.getPostPosition());
            row.put("jockey", winner.getJockey().getName());
            row.put("trainer", winner.getTrainer().getName());
            row.put("odds", TWO_DECIMALS.format(winner.getOdds()));

            row.put("time", raceResult.getWinningTime());

            rows.add(row);
        }

        try (StringWriter stringWriter = new StringWriter()) {
            try (SequenceWriter sequenceWriter =
                         csvMapper.writer(schema).writeValues(stringWriter).writeAll(rows)) {
                sequenceWriter.flush();
            }
            return stringWriter.toString();
        }
    }
    return null;
}
项目:libraries    文件:JsonObjectUtilities.java   
public static <I, O> ICloseableConsumer<I, Boolean, IOException> consumer(
    final OutputStream outputStream,
    final Class<O> clazz,
    final IProcedure<JsonGenerator, IOException> initializer,
    final IConverter<I, O, IOException> converter,
    final IProcedure<JsonGenerator, IOException> closer) {
  return new ICloseableConsumer<I, Boolean, IOException>() {

    private boolean isClosed = false;
    private JsonGenerator generator;
    private SequenceWriter writer;

    @Override
    public void close() throws IOException {
      check();
      this.isClosed = true;
      IOException exception = JsonObjectUtilities.close(() -> closer.execute(this.generator), null);
      exception = JsonObjectUtilities.close(this.writer, exception);
      exception = JsonObjectUtilities.close(this.generator, exception);
      if (exception != null) {
        throw exception;
      }
    }

    @Override
    public Boolean consume(final I object) throws IOException {
      check();
      if (this.generator == null) {
        initialize();
      }
      final O value = converter.convert(object);
      if (value == null) {
        return false;
      }
      this.writer.write(value);
      return true;
    }

    private void check() throws IOException {
      if (this.isClosed) {
        throw new IOException("consumer is closed"); //$NON-NLS-1$
      }
    }

    private void initialize() throws IOException {
      this.generator = new JsonFactory().createGenerator(outputStream).configure(Feature.AUTO_CLOSE_TARGET, false);
      initializer.execute(this.generator);
      this.writer = new ObjectMapper()
          .writerFor(clazz)
          .with(SerializationFeature.FLUSH_AFTER_WRITE_VALUE)
          .writeValuesAsArray(this.generator);
    }
  };
}
项目:csvsum    文件:CSVMapper.java   
private static void runMapper(Reader input, List<ValueMapping> map, Writer output, boolean writeHeaders,
        List<String> outputHeaders, List<String> overrideHeaders, int headerLineCount, CsvMapper inputMapper,
        CsvSchema inputSchema) throws ScriptException, IOException {

    final Map<String, String> defaultValues = ValueMapping.getDefaultValuesFromList(map);
    final CsvSchema schema = CSVStream.buildSchema(outputHeaders, writeHeaders);
    final Writer writer = output;

    try (final SequenceWriter csvWriter = CSVStream.newCSVWriter(writer, schema);) {
        final List<String> inputHeaders = new ArrayList<>();
        final List<String> previousLine = new ArrayList<>();
        final List<String> previousMappedLine = new ArrayList<>();
        final JDefaultDict<String, Set<String>> primaryKeys = new JDefaultDict<>(k -> new HashSet<>());
        final JDefaultDict<String, JDefaultDict<String, AtomicInteger>> valueCounts = new JDefaultDict<>(
                k -> new JDefaultDict<>(v -> new AtomicInteger(0)));
        final AtomicInteger lineNumber = new AtomicInteger(0);
        final AtomicInteger filteredLineNumber = new AtomicInteger(0);
        final long startTime = System.currentTimeMillis();
        final BiConsumer<List<String>, List<String>> mapLineConsumer = Unchecked.biConsumer((l, m) -> {
            previousLine.clear();
            previousLine.addAll(l);
            previousMappedLine.clear();
            previousMappedLine.addAll(m);
            csvWriter.write(m);
        });
        CSVStream.parse(input, h -> inputHeaders.addAll(h), (h, l) -> {
            final int nextLineNumber = lineNumber.incrementAndGet();
            if (nextLineNumber % 1000 == 0) {
                double secondsSinceStart = (System.currentTimeMillis() - startTime) / 1000.0d;
                System.out.printf("%d\tSeconds since start: %f\tRecords per second: %f%n", nextLineNumber,
                        secondsSinceStart, nextLineNumber / secondsSinceStart);
            }
            final int nextFilteredLineNumber = filteredLineNumber.incrementAndGet();
            try {
                List<String> mapLine = ValueMapping.mapLine(new ValueMappingContext(inputHeaders, l, previousLine,
                        previousMappedLine, map, primaryKeys, valueCounts, nextLineNumber, nextFilteredLineNumber,
                        mapLineConsumer, outputHeaders, defaultValues, Optional.empty()));
                mapLineConsumer.accept(l, mapLine);
            } catch (final LineFilteredException e) {
                // Swallow line filtered exception and return null below to
                // eliminate it
                // We expect streamCSV to operate in sequential order, print
                // a warning if it doesn't
                boolean success = filteredLineNumber.compareAndSet(nextFilteredLineNumber,
                        nextFilteredLineNumber - 1);
                if (!success) {
                    System.out.println("Line numbers may not be consistent");
                }
            }
            return null;
        }, l -> {
        }, overrideHeaders, Collections.emptyList(), headerLineCount, inputMapper, inputSchema);
    }
}
项目:csvsum    文件:JSONMapper.java   
private static void runMapper(Reader input, List<ValueMapping> map, Writer output, JsonPointer basePath,
        ObjectMapper jsonMapper, boolean writeHeaders) throws ScriptException, IOException {

    final List<String> inputHeaders = ValueMapping.getInputFieldsFromList(map);
    final List<String> outputHeaders = ValueMapping.getOutputFieldsFromList(map);
    final Map<String, String> defaultValues = ValueMapping.getDefaultValuesFromList(map);
    final Map<String, Optional<JsonPointer>> fieldRelativePaths = map.stream()
            .collect(Collectors.toMap(ValueMapping::getOutputField,
                    nextMapping -> nextMapping.getInputField().trim().isEmpty() ? Optional.empty()
                            : Optional.of(JsonPointer.compile(nextMapping.getInputField()))));
    final CsvSchema schema = CSVStream.buildSchema(outputHeaders, writeHeaders);
    final Writer writer = output;

    try (final SequenceWriter csvWriter = CSVStream.newCSVWriter(writer, schema);) {
        final List<String> previousLine = new ArrayList<>();
        final List<String> previousMappedLine = new ArrayList<>();
        final JDefaultDict<String, Set<String>> primaryKeys = new JDefaultDict<>(k -> new HashSet<>());
        final JDefaultDict<String, JDefaultDict<String, AtomicInteger>> valueCounts = new JDefaultDict<>(
                k -> new JDefaultDict<>(v -> new AtomicInteger(0)));
        final AtomicInteger lineNumber = new AtomicInteger(0);
        final AtomicInteger filteredLineNumber = new AtomicInteger(0);
        final long startTime = System.currentTimeMillis();
        final BiConsumer<List<String>, List<String>> mapLineConsumer = Unchecked.biConsumer((l, m) -> {
            previousLine.clear();
            previousLine.addAll(l);
            previousMappedLine.clear();
            previousMappedLine.addAll(m);
            csvWriter.write(m);
        });
        JSONStream.parse(input, h -> {
        }, (node, headers, line) -> {
            final int nextLineNumber = lineNumber.incrementAndGet();
            if (nextLineNumber % 1000 == 0) {
                double secondsSinceStart = (System.currentTimeMillis() - startTime) / 1000.0d;
                System.out.printf("%d\tSeconds since start: %f\tRecords per second: %f%n", nextLineNumber,
                        secondsSinceStart, nextLineNumber / secondsSinceStart);
            }
            final int nextFilteredLineNumber = filteredLineNumber.incrementAndGet();
            try {
                List<String> mapLine = ValueMapping.mapLine(new ValueMappingContext(inputHeaders, line,
                        previousLine, previousMappedLine, map, primaryKeys, valueCounts, nextLineNumber,
                        nextFilteredLineNumber, mapLineConsumer, outputHeaders, defaultValues, Optional.of(node)));
                mapLineConsumer.accept(line, mapLine);
            } catch (final LineFilteredException e) {
                // Swallow line filtered exception and return null below to
                // eliminate it
                // We expect streamCSV to operate in sequential order, print
                // a warning if it doesn't
                boolean success = filteredLineNumber.compareAndSet(nextFilteredLineNumber,
                        nextFilteredLineNumber - 1);
                if (!success) {
                    System.out.println("Line numbers may not be consistent");
                }
            }
            return null;
        }, l -> {
        }, basePath, fieldRelativePaths, defaultValues, jsonMapper);
    }
}
项目:csvsum    文件:AccessMapper.java   
private static void dumpToCSVs(InputStream input, Path outputDir, String csvPrefix, boolean debug,
        boolean addTableNamePrefix) throws IOException {
    Path tempFile = Files.createTempFile("Source-accessdb", ".accdb");
    Files.copy(input, tempFile, StandardCopyOption.REPLACE_EXISTING);

    try {
        final CsvSchema schema = CSVStream
                .buildSchema(Arrays.asList("OldField", "NewField", "Language", "Mapping"));
        try (final Database db = DatabaseBuilder.open(tempFile.toFile());
                final Writer columnCsv = Files
                        .newBufferedWriter(outputDir.resolve(csvPrefix + "AutoMapping-Columns.csv"));
                final SequenceWriter columnCsvWriter = CSVStream.newCSVWriter(new BufferedWriter(columnCsv),
                        schema);) {
            for (String tableName : db.getTableNames()) {
                Table table = db.getTable(tableName);

                if (debug) {
                    debugTable(table, columnCsvWriter);
                }

                System.out.println("");
                String csvName = csvPrefix + tableName + ".csv";
                Path csvPath = outputDir.resolve(csvName);
                System.out.println("Converting " + tableName + " to CSV: " + csvPath.toAbsolutePath().toString());

                String[] tempArray = new String[table.getColumnCount()];
                int x = 0;
                for (Column nextColumn : table.getColumns()) {
                    if (addTableNamePrefix) {
                        tempArray[x++] = tableName + "." + nextColumn.getName();
                    } else {
                        tempArray[x++] = nextColumn.getName();
                    }
                }

                final CsvSchema fullFileSchema = CSVStream.buildSchema(Arrays.asList(tempArray));
                try (final Writer fullFileCsv = Files.newBufferedWriter(csvPath);
                        final SequenceWriter fullFileCsvWriter = CSVStream
                                .newCSVWriter(new BufferedWriter(fullFileCsv), fullFileSchema);) {
                    int rows = 0;
                    for (Row nextRow : table) {
                        int i = 0;
                        for (Object nextValue : nextRow.values()) {
                            if (nextValue == null) {
                                tempArray[i++] = null;
                            } else if (nextValue instanceof Date) {
                                tempArray[i++] = CSVUtil.oldDateToISO8601LocalDateTime((Date) nextValue);
                            } else {
                                tempArray[i++] = nextValue.toString();
                            }
                        }
                        fullFileCsvWriter.write(Arrays.asList(tempArray));
                        rows++;
                    }
                    System.out.println("Converted " + rows + " rows from table " + tableName);
                }
                System.out.println("");
                System.out.println("----------------------------");
            }
        }
    } finally {
        Files.deleteIfExists(tempFile);
    }
}