Java 类org.apache.avro.generic.GenericContainer 实例源码

项目:orizuru-java    文件:Message.java   
/**
 * Encode the message data provided.
 * 
 * @param <O> The type of the data to encode.
 * @param data The message data.
 * @throws EncodeMessageContentException Exception thrown if encoding the message content fails.
 */
public <O extends GenericContainer> void encode(O data) throws EncodeMessageContentException {

    try {

        ByteArrayOutputStream baos = new ByteArrayOutputStream();

        this.schema = data.getSchema();
        DatumWriter<O> outputDatumWriter = new SpecificDatumWriter<O>(this.schema);
        BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(baos, null);
        outputDatumWriter.write(data, encoder);
        encoder.flush();

        this.data = baos.toByteArray();

    } catch (Exception ex) {
        throw new EncodeMessageContentException(ex);
    }

}
项目:wherehowsX    文件:AbstractKafkaAvroSerDe.java   
protected Schema getSchema(Object object) {
  if (object == null) {
    return primitiveSchemas.get("Null");
  } else if (object instanceof Boolean) {
    return primitiveSchemas.get("Boolean");
  } else if (object instanceof Integer) {
    return primitiveSchemas.get("Integer");
  } else if (object instanceof Long) {
    return primitiveSchemas.get("Long");
  } else if (object instanceof Float) {
    return primitiveSchemas.get("Float");
  } else if (object instanceof Double) {
    return primitiveSchemas.get("Double");
  } else if (object instanceof CharSequence) {
    return primitiveSchemas.get("String");
  } else if (object instanceof byte[]) {
    return primitiveSchemas.get("Bytes");
  } else if (object instanceof GenericContainer) {
    return ((GenericContainer) object).getSchema();
  } else {
    throw new IllegalArgumentException(
        "Unsupported Avro type. Supported types are null, Boolean, Integer, Long, "
            + "Float, Double, String, byte[] and IndexedRecord");
  }
}
项目:orizuru-java    文件:Message.java   
/**
 * Decode the message from the transport.
 * 
 * @param input The FinancialForce Orizuru Avro Transport message from which to decode the message.
 * @throws OrizuruConsumerException Exception thrown if decoding the message fails.
 */
public void decodeFromTransport(Transport input) throws OrizuruConsumerException {

    try {

        String messageSchemaName = input.getMessageSchemaName().toString();

        Class<?> avroClass = Class.forName(messageSchemaName);
        Constructor<?> constructor = avroClass.getConstructor();
        GenericContainer container = (GenericContainer) constructor.newInstance();
        this.schema = container.getSchema();

        ByteBuffer messageBuffer = input.getMessageBuffer();
        this.data = messageBuffer.array();

    } catch (Exception ex) {
        throw new DecodeMessageException(ex);
    }

}
项目:orizuru-java    文件:AbstractConsumerTest.java   
@Test
public void consume_callsThePublishMethodIfAPublisherIsDefined() throws Exception {

    // given
    IPublisher<GenericContainer> publisher = mock(IPublisher.class);
    Consumer consumer = new Consumer(QUEUE_NAME);
    consumer.setPublisher(publisher);

    byte[] body = VALID_MESSAGE.getBytes();

    // when
    consumer.consume(body);

    // then
    verify(publisher, times(1)).publish(any(), any());

}
项目:schema-evolution-samples    文件:AvroCodec.java   
private Schema  getSchema(Object payload){
    Schema schema = null;
    logger.debug("Obtaining schema for class {}", payload.getClass());
    if(GenericContainer.class.isAssignableFrom(payload.getClass())) {
        schema = ((GenericContainer) payload).getSchema();
        logger.debug("Avro type detected, using schema from object");
    }else{
        Integer id = localSchemaMap.get(payload.getClass().getName());
        if(id == null){
            if(!properties.isDynamicSchemaGenerationEnabled()) {
                throw new SchemaNotFoundException(String.format("No schema found on local cache for %s", payload.getClass()));
            }
            else{
                Schema localSchema = ReflectData.get().getSchema(payload.getClass());
                id = schemaRegistryClient.register(localSchema);
            }

        }
        schema = schemaRegistryClient.fetch(id);
    }

    return schema;
}
项目:SPADE    文件:Kafka.java   
@Override
public boolean putEdge(AbstractEdge edge){
    try{
        List<GenericContainer> recordsToPublish = new ArrayList<GenericContainer>();
        Edge.Builder edgeBuilder = Edge.newBuilder();
        edgeBuilder.setAnnotations(edge.getAnnotations());
        edgeBuilder.setChildVertexHash(String.valueOf(edge.getChildVertex().hashCode()));
        edgeBuilder.setParentVertexHash(String.valueOf(edge.getParentVertex().hashCode()));
        edgeBuilder.setHash(String.valueOf(edge.hashCode()));
        Edge kafkaEdge = edgeBuilder.build();
        recordsToPublish.add(GraphElement.newBuilder().setElement(kafkaEdge).build());
        return publishRecords(recordsToPublish) > 0;    
    }catch(Exception e){
        logger.log(Level.SEVERE, "Failed to publish edge : " + edge);
        return false;
    }
}
项目:SPADE    文件:Kafka.java   
protected int publishRecords(List<GenericContainer> genericContainers) {
    int recordCount = 0;
    if(genericContainers != null){
        for (GenericContainer genericContainer : genericContainers) {
            //            logger.log(Level.INFO, "Attempting to publish record {0}", genericContainer.toString());
            for(DataWriter dataWriter : dataWriters){
                try {
                    dataWriter.writeRecord(genericContainer);
                    recordCount += 1;
                    //                    logger.log(Level.INFO, "Sent record: ({0})", recordCount);
                } catch (Exception exception) {
                    logger.log(Level.INFO, "Failed to publish record {0}", genericContainer.toString());
                    logger.log(Level.WARNING, "{0}", exception);
                } 
            }                
        }
    }
    return (recordCount / dataWriters.size());
}
项目:SPADE    文件:ServerWriter.java   
public void writeRecord(GenericContainer genericContainer) throws Exception{
    /**
     * Publish the records in Kafka. Note how the serialization framework doesn't care about
     * the record type (any type from the union schema may be sent)
     */
    ProducerRecord<String, GenericContainer> record = new ProducerRecord<>(kafkaTopic, genericContainer);
    serverWriter.send(record, sendCallback); //asynchronous send

    if(reportingEnabled){
        recordCount++;
        long currentTime = System.currentTimeMillis();
        if((currentTime - lastReportedTime) >= reportEveryMs){
            printStats();
            lastReportedTime = currentTime;
            lastReportedRecordCount = recordCount;
        }
    }
}
项目:support-metrics-client    文件:AvroDeserializer.java   
/**
 * Deserializes the bytes as an array of Generic containers.
 *
 * <p>The bytes include a standard Avro header that contains a magic byte, the
 * record's Avro schema (and so on), followed by the byte representation of the record.
 *
 * <p>Implementation detail:  This method uses Avro's {@code DataFileWriter}.
 * @schema Schema associated with this container
 * @return A Generic Container class
 */
public GenericContainer[] deserialize(Schema schema, byte[] container) throws IOException {
  GenericContainer ret = null;
  List<GenericContainer> retList = new ArrayList<>();
  if (container != null) {
    DatumReader<GenericContainer> datumReader = new GenericDatumReader<>(schema);
    ByteArrayInputStream in = new ByteArrayInputStream(container);
    DataFileStream<GenericContainer> reader =
        new DataFileStream<GenericContainer>(in, datumReader);
    while (reader.hasNext()) {
      ret = reader.next(ret);
      retList.add(ret);
    }
    return retList.toArray(new GenericContainer[retList.size()]);
  } else {
    return null;
  }
}
项目:support-metrics-client    文件:AvroDeserializer.java   
/**
 * Deserializes the bytes as an array of Generic containers. Assumes schema is
 * embedded with bytes.
 *
 * <p>The bytes include a standard Avro header that contains a magic byte, the
 * record's Avro schema (and so on), followed by the byte representation of the record.
 *
 * <p>Implementation detail:  This method uses Avro's {@code DataFileWriter}.
 * @return A Generic Container class
 */
public GenericContainer[] deserialize(byte[] container) throws IOException {
  GenericContainer ret = null;
  List<GenericContainer> retList = new ArrayList<>();
  if (container != null) {
    DatumReader<GenericContainer> datumReader = new GenericDatumReader<>();
    ByteArrayInputStream in = new ByteArrayInputStream(container);
    DataFileStream<GenericContainer> reader =
        new DataFileStream<GenericContainer>(in, datumReader);
    while (reader.hasNext()) {
      ret = reader.next(ret);
      retList.add(ret);
    }
    return retList.toArray(new GenericContainer[retList.size()]);
  } else {
    return null;
  }
}
项目:support-metrics-client    文件:BasicCollectorTest.java   
@Test
public void testCollectMetrics() {
  // Given
  TimeUtils time = new TimeUtils();
  Uuid uuid = new Uuid();
  long unixTimeAtTestStart = time.nowInUnixTime();
  Collector metricsCollector = new BasicCollector(mockServer, time, uuid);

  // When
  GenericContainer metricsRecord = metricsCollector.collectMetrics();

  // Then
  assertThat(metricsRecord).isInstanceOf(SupportKafkaMetricsBasic.class);
  assertThat(metricsRecord.getSchema()).isEqualTo(SupportKafkaMetricsBasic.getClassSchema());
  SupportKafkaMetricsBasic basicRecord = (SupportKafkaMetricsBasic) metricsRecord;
  assertThat(basicRecord.getTimestamp()).isBetween(unixTimeAtTestStart, time.nowInUnixTime());
  assertThat(basicRecord.getKafkaVersion()).isEqualTo(AppInfoParser.getVersion());
  assertThat(basicRecord.getConfluentPlatformVersion()).isEqualTo(Version.getVersion());
  assertThat(basicRecord.getCollectorState()).isEqualTo(metricsCollector.getRuntimeState().stateId());
  assertThat(basicRecord.getBrokerProcessUUID()).isEqualTo(uuid.toString());
}
项目:support-metrics-client    文件:AvroSerializerTest.java   
@Test
public void testSerializedDataIncludesAvroSchema() throws IOException {
  // Given
  GenericContainer anyValidRecord = new User("anyName");
  AvroDeserializer decoder = new AvroDeserializer();
  AvroSerializer encoder = new AvroSerializer();

  // When
  byte[] serializedRecord = encoder.serialize(anyValidRecord);

  // Then
  GenericContainer[] decodedRecords =
      decoder.deserialize(anyValidRecord.getSchema(), serializedRecord);
  assertThat(decodedRecords.length).isEqualTo(1);
  assertThat(anyValidRecord.getSchema()).isEqualTo(decodedRecords[0].getSchema());
}
项目:infinispan-avro    文件:AbstractMarshaller.java   
@Override
public byte[] objectToByteBuffer(Object o) throws IOException{
   ByteArrayOutputStream baos = new ByteArrayOutputStream();
   ObjectOutputStream oos = new ObjectOutputStream(baos);
   if (o instanceof GenericContainer) {
      oos.write(RECORD);
      GenericContainer container = (GenericContainer) o;
      oos.writeObject((container.getSchema().getFullName()));
      DatumWriter<GenericContainer> datumWriter = new GenericDatumWriter<>(container.getSchema());
      BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(baos, null);
      datumWriter.write(container,encoder);
      encoder.flush();
   } else if (o instanceof Schema) {
      oos.write(SCHEMA);
      oos.writeObject(o.toString());
   } else {
      oos.write(OTHER);
      oos.writeObject(o);
   }
   return baos.toByteArray();
}
项目:support-metrics-common    文件:AvroDeserializer.java   
/**
 * Deserializes the bytes as an array of Generic containers.
 *
 * <p>The bytes include a standard Avro header that contains a magic byte, the
 * record's Avro schema (and so on), followed by the byte representation of the record.
 *
 * <p>Implementation detail:  This method uses Avro's {@code DataFileWriter}.
 *
 * @return A Generic Container class
 * @schema Schema associated with this container
 */
public GenericContainer[] deserialize(Schema schema, byte[] container) throws IOException {
  GenericContainer ret = null;
  List<GenericContainer> retList = new ArrayList<>();
  if (container != null) {
    DatumReader<GenericContainer> datumReader = new GenericDatumReader<>(schema);
    ByteArrayInputStream in = new ByteArrayInputStream(container);
    DataFileStream<GenericContainer> reader = new DataFileStream<GenericContainer>(
        in,
        datumReader
    );
    while (reader.hasNext()) {
      ret = reader.next(ret);
      retList.add(ret);
    }
    return retList.toArray(new GenericContainer[retList.size()]);
  } else {
    return null;
  }
}
项目:support-metrics-common    文件:AvroDeserializer.java   
/**
 * Deserializes the bytes as an array of Generic containers. Assumes schema is
 * embedded with bytes.
 *
 * <p>The bytes include a standard Avro header that contains a magic byte, the
 * record's Avro schema (and so on), followed by the byte representation of the record.
 *
 * <p>Implementation detail:  This method uses Avro's {@code DataFileWriter}.
 *
 * @return A Generic Container class
 */
public GenericContainer[] deserialize(byte[] container) throws IOException {
  GenericContainer ret = null;
  List<GenericContainer> retList = new ArrayList<>();
  if (container != null) {
    DatumReader<GenericContainer> datumReader = new GenericDatumReader<>();
    ByteArrayInputStream in = new ByteArrayInputStream(container);
    DataFileStream<GenericContainer> reader = new DataFileStream<GenericContainer>(
        in,
        datumReader
    );
    while (reader.hasNext()) {
      ret = reader.next(ret);
      retList.add(ret);
    }
    return retList.toArray(new GenericContainer[retList.size()]);
  } else {
    return null;
  }
}
项目:support-metrics-common    文件:AvroSerializerTest.java   
@Test
public void testSerializedDataIncludesAvroSchema() throws IOException {
  // Given
  GenericContainer anyValidRecord = new User("anyName");
  AvroDeserializer decoder = new AvroDeserializer();
  AvroSerializer encoder = new AvroSerializer();

  // When
  byte[] serializedRecord = encoder.serialize(anyValidRecord);

  // Then
  GenericContainer[] decodedRecords =
      decoder.deserialize(anyValidRecord.getSchema(), serializedRecord);
  assertThat(decodedRecords.length).isEqualTo(1);
  assertThat(anyValidRecord.getSchema()).isEqualTo(decodedRecords[0].getSchema());
}
项目:iis    文件:ProcessWrapper.java   
private static void createOutputsIfDontExist(
        Map<String, PortType> outputPortsSpecification, 
        Map<String, Path> outputPortBindings, Configuration conf) throws IOException{
    FileSystem fs = FileSystem.get(conf);
    for(Map.Entry<String, Path> entry: outputPortBindings.entrySet()){
        Path path = entry.getValue();
        if(!fs.exists(path) || isEmptyDirectory(fs, path)){
            PortType rawType = outputPortsSpecification.get(entry.getKey());
            if(!(rawType instanceof AvroPortType)){
                throw new RuntimeException("The port \""+entry.getKey()+
                        "\" is not of Avro type and only Avro types are "+
                        "supported");
            }
            AvroPortType type = (AvroPortType) rawType;
            FileSystemPath fsPath = new FileSystemPath(fs, path);
            DataFileWriter<GenericContainer> writer = 
                    DataStore.create(fsPath, type.getSchema());
            writer.close();
        }
    }
}
项目:kaa    文件:GenericAvroConverter.java   
/**
 * Convert binary data using schema to Json.
 *
 * @param rawData    the encoded data
 * @param dataSchema the encoded data schema
 * @return the string
 */
public static String toJson(byte[] rawData, String dataSchema) {
  Schema schema = new Schema.Parser().parse(dataSchema);
  GenericAvroConverter<GenericContainer> converter = new
          GenericAvroConverter<GenericContainer>(schema);

  String json;

  try {
    GenericContainer record = converter.decodeBinary(rawData);
    json = converter.encodeToJson(record);
  } catch (IOException ex) {
    LOG.warn("Can't parse json data", ex);
    throw new RuntimeException(ex); //NOSONAR
  }
  return json;
}
项目:kaa    文件:GenericAvroConverter.java   
/**
 * Convert json string using schema to binary data.
 *
 * @param json       the json string
 * @param dataSchema the encoded data schema
 * @return the byte[]
 */
public static byte[] toRawData(String json, String dataSchema) {
  Schema schema = new Schema.Parser().parse(dataSchema);
  GenericAvroConverter<GenericContainer> converter = new
          GenericAvroConverter<GenericContainer>(schema);

  byte[] rawData;

  try {
    GenericContainer record = converter.decodeJson(json);
    rawData = converter.encode(record);
  } catch (IOException ex) {
    LOG.warn("Can't parse json data", ex);
    throw new RuntimeException(ex); //NOSONAR
  }
  return rawData;
}
项目:kaa    文件:DefaultDeltaCalculationAlgorithm.java   
/**
 * Fill delta without merge.
 *
 * @param delta the delta
 * @param root  the root
 * @throws DeltaCalculatorException the delta calculator exception
 */
private void fillDeltaWithoutMerge(GenericRecord delta, GenericRecord root)
        throws DeltaCalculatorException {
  Schema rootSchema = root.getSchema();
  for (Field field : rootSchema.getFields()) {
    Object value = root.get(field.name());
    if (value instanceof List) {
      List<Object> values = (List<Object>) value;
      Schema arraySchema = getArraySchema(delta, field.name());
      GenericArray deltaArray = new GenericData.Array(values.size(), arraySchema);
      for (Object item : values) {
        if (item instanceof GenericContainer) {
          GenericContainer record = (GenericContainer) item;
          addComplexItemToArray(record, deltaArray);
        } else {
          deltaArray.add(item);
        }
      }
      delta.put(field.name(), deltaArray);
    } else if (value instanceof GenericContainer) {
      processComplexField(delta, field.name(), (GenericContainer) value, null, null);
    } else {
      delta.put(field.name(), value);
    }
  }
}
项目:cdk    文件:ReadAvroBuilder.java   
@Override
protected boolean doProcess(Record inputRecord, InputStream in) throws IOException {
  Record template = inputRecord.copy();
  removeAttachments(template);
  template.put(Fields.ATTACHMENT_MIME_TYPE, ReadAvroBuilder.AVRO_MEMORY_MIME_TYPE);
  Decoder decoder = prepare(in);
  try {
    while (true) {
      GenericContainer datum = datumReader.read(null, decoder);
      if (!extract(datum, template)) {
        return false;
      }
    }
  } catch (EOFException e) { 
    ; // ignore
  } finally {
    in.close();
  }
  return true;
}
项目:cdk    文件:ReadAvroBuilder.java   
private Decoder prepare(InputStream in) throws IOException {
  Decoder decoder;
  if (isJson) {
    if (jsonDecoder == null) {
      jsonDecoder = DecoderFactory.get().jsonDecoder(writerSchema, in);
    } else {
      jsonDecoder.configure(in); // reuse for performance
    }
    decoder = jsonDecoder;
  } else {        
    binaryDecoder = DecoderFactory.get().binaryDecoder(in, binaryDecoder); // reuse for performance
    decoder = binaryDecoder;
  }

  if (datumReader == null) { // reuse for performance
    Schema readSchema = readerSchema != null ? readerSchema : writerSchema;
    datumReader = new FastGenericDatumReader<GenericContainer>(writerSchema, readSchema);  
    datumReader.setResolver(createResolver(writerSchema, readSchema));
  }
  return decoder;
}
项目:cdk    文件:ExtractAvroPathsBuilder.java   
@Override
    protected boolean doProcess(Record inputRecord) {
//      Preconditions.checkState(ReadAvroBuilder.AVRO_MEMORY_MIME_TYPE.equals(inputRecord.getFirstValue(Fields.ATTACHMENT_MIME_TYPE)));
      GenericContainer datum = (GenericContainer) inputRecord.getFirstValue(Fields.ATTACHMENT_BODY);
      Preconditions.checkNotNull(datum);
      Preconditions.checkNotNull(datum.getSchema());      
      Record outputRecord = inputRecord.copy();

      for (Map.Entry<String, Collection<String>> entry : stepMap.entrySet()) {
        String fieldName = entry.getKey();
        List<String> steps = (List<String>) entry.getValue();
        extractPath(datum, datum.getSchema(), fieldName, steps, outputRecord, 0);
      }

      // pass record to next command in chain:
      return getChild().process(outputRecord);
    }
项目:cdk    文件:WriteAvroToByteArrayBuilder.java   
private void writeContainerless(Record src, OutputStream dst) {
  try {
    GenericDatumWriter datumWriter = new GenericDatumWriter();
    Encoder encoder = null;
    Schema schema = null;
    for (Object attachment : src.get(Fields.ATTACHMENT_BODY)) {
      Preconditions.checkNotNull(attachment);
      GenericContainer datum = (GenericContainer) attachment;
      schema = getSchema(datum, schema);
      datumWriter.setSchema(schema);
      if (encoder == null) { // init
        if (format == Format.containerlessJSON) {
          encoder = EncoderFactory.get().jsonEncoder(schema, dst);
        } else {
          encoder = EncoderFactory.get().binaryEncoder(dst, null);
        }          
      } 
      datumWriter.write(datum, encoder);
    }
    encoder.flush();
  } catch (IOException e) {
    throw new MorphlineRuntimeException(e);
  }
}
项目:kiji-rest    文件:AvroToJsonStringSerializer.java   
/**
 * Returns an encoded JSON string for the given Avro object.
 *
 * @param record is the record to encode
 * @return the JSON string representing this Avro object.
 *
 * @throws IOException if there is an error.
 */
public static String getJsonString(GenericContainer record) throws IOException {
  ByteArrayOutputStream os = new ByteArrayOutputStream();
  JsonEncoder encoder = EncoderFactory.get().jsonEncoder(record.getSchema(), os);
  DatumWriter<GenericContainer> writer = new GenericDatumWriter<GenericContainer>();
  if (record instanceof SpecificRecord) {
    writer = new SpecificDatumWriter<GenericContainer>();
  }

  writer.setSchema(record.getSchema());
  writer.write(record, encoder);
  encoder.flush();
  String jsonString = new String(os.toByteArray(), Charset.forName("UTF-8"));
  os.close();
  return jsonString;
}
项目:wherehowsX    文件:AbstractKafkaAvroSerDe.java   
/**
 * Get the subject name used by the old Encoder interface, which relies only on the value type rather than the topic.
 */
protected static String getOldSubjectName(Object value) {
  if (value instanceof GenericContainer) {
    return ((GenericContainer) value).getSchema().getName() + "-value";
  } else {
    throw new SerializationException("Primitive types are not supported yet");
  }
}
项目:orizuru-java    文件:Message.java   
/**
 * Decode the message content.
 * 
 * @param <I> The type of the data that is decoded.
 * @return The message data.
 * @throws DecodeMessageContentException Exception thrown if decoding the message content fails.
 */
public <I extends GenericContainer> I decode() throws DecodeMessageContentException {

    try {

        DatumReader<I> messageDatumReader = new SpecificDatumReader<I>(schema);
        BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(data, null);
        return messageDatumReader.read(null, decoder);

    } catch (Exception ex) {
        throw new DecodeMessageContentException(ex);
    }

}
项目:QDrill    文件:AvroRecordReader.java   
@Override
public void setup(final OperatorContext context, final OutputMutator output) throws ExecutionSetupException {
  operatorContext = context;
  writer = new VectorContainerWriter(output);

  try {
    reader = new DataFileReader<>(new FsInput(hadoop, fs.getConf()), new GenericDatumReader<GenericContainer>());
    logger.debug("Processing file : {}, start position : {}, end position : {} ", hadoop, start, end);
    reader.sync(this.start);
  } catch (IOException e) {
    throw new ExecutionSetupException(e);
  }
}
项目:QDrill    文件:AvroRecordReader.java   
@Override
public int next() {
  final Stopwatch watch = new Stopwatch().start();

  if (reader == null) {
    throw new IllegalStateException("Avro reader is not open.");
  }
  if (!reader.hasNext()) {
    return 0;
  }

  int recordCount = 0;
  writer.allocate();
  writer.reset();

  try {

    // XXX - Implement batch size

    for (GenericContainer container = null; reader.hasNext() && !reader.pastSync(end); recordCount++) {
      writer.setPosition(recordCount);
      container = reader.next(container);
      processRecord(container, container.getSchema());
    }

    writer.setValueCount(recordCount);

  } catch (IOException e) {
    throw new DrillRuntimeException(e);
  }

  logger.debug("Read {} records in {} ms", recordCount, watch.elapsed(TimeUnit.MILLISECONDS));
  return recordCount;
}
项目:QDrill    文件:AvroRecordReader.java   
private void processRecord(final GenericContainer container, final Schema schema) {

    final Schema.Type type = schema.getType();

    switch (type) {
      case RECORD:
        process(container, schema, null, new MapOrListWriter(writer.rootAsMap()));
        break;
      default:
        throw new DrillRuntimeException("Root object must be record type. Found: " + type);
    }
  }
项目:dremio-oss    文件:AvroRecordReader.java   
@Override
public void setup(final OutputMutator output) throws ExecutionSetupException {
  writer = new VectorContainerWriter(output);

  try {
    reader = new DataFileReader<>(new FsInput(hadoop, fsConf), new GenericDatumReader<GenericContainer>());
    logger.debug("Processing file : {}, start position : {}, end position : {} ", hadoop, start, end);
    reader.sync(this.start);
  } catch (IOException e) {
    throw new ExecutionSetupException(e);
  }
}
项目:dremio-oss    文件:AvroRecordReader.java   
@Override
public int next() {
  final Stopwatch watch = Stopwatch.createStarted();

  if (reader == null) {
    throw new IllegalStateException("Avro reader is not open.");
  }
  if (!reader.hasNext()) {
    return 0;
  }

  int recordCount = 0;
  writer.allocate();
  writer.reset();

  try {
    for (GenericContainer container = null;
         recordCount < numRowsPerBatch && reader.hasNext() && !reader.pastSync(end);
         recordCount++) {
      writer.setPosition(recordCount);
      container = reader.next(container);
      processRecord(container, container.getSchema());
    }

    writer.setValueCount(recordCount);

  } catch (IOException e) {
    throw new RuntimeException(e);
  }

  logger.debug("Read {} records in {} ms", recordCount, watch.elapsed(TimeUnit.MILLISECONDS));
  return recordCount;
}
项目:dremio-oss    文件:AvroRecordReader.java   
private void processRecord(final GenericContainer container, final Schema schema) {

    final Schema.Type type = schema.getType();

    switch (type) {
      case RECORD:
        process(container, schema, null, new MapOrListWriterImpl(writer.rootAsMap()), fieldSelection);
        break;
      default:
        throw new RuntimeException("Root object must be record type. Found: " + type);
    }
  }
项目:registry    文件:AvroUtils.java   
public static Schema computeSchema(Object input) {
    Schema schema = null;
    if (input instanceof GenericContainer) {
        schema = ((GenericContainer) input).getSchema();
    } else {
        schema = AvroUtils.getSchemaForPrimitives(input);
    }
    return schema;
}
项目:drill    文件:AvroDrillTable.java   
public AvroDrillTable(String storageEngineName,
                     FileSystemPlugin plugin,
                     String userName,
                     FormatSelection selection) {
  super(storageEngineName, plugin, userName, selection);
  List<String> asFiles = selection.getAsFiles();
  Path path = new Path(asFiles.get(0));
  try {
    reader = new DataFileReader<>(new FsInput(path, plugin.getFsConf()), new GenericDatumReader<GenericContainer>());
  } catch (IOException e) {
    throw UserException.dataReadError(e).build(logger);
  }
}
项目:drill    文件:AvroRecordReader.java   
private DataFileReader<GenericContainer> getReader(final Path hadoop, final FileSystem fs) throws ExecutionSetupException {
  try {
    final UserGroupInformation ugi = ImpersonationUtil.createProxyUgi(this.opUserName, this.queryUserName);
    return ugi.doAs(new PrivilegedExceptionAction<DataFileReader<GenericContainer>>() {
      @Override
      public DataFileReader<GenericContainer> run() throws Exception {
        return new DataFileReader<>(new FsInput(hadoop, fs.getConf()), new GenericDatumReader<GenericContainer>());
      }
    });
  } catch (IOException | InterruptedException e) {
    throw new ExecutionSetupException(
      String.format("Error in creating avro reader for file: %s", hadoop), e);
  }
}
项目:drill    文件:AvroRecordReader.java   
@Override
public int next() {
  final Stopwatch watch = Stopwatch.createStarted();

  if (reader == null) {
    throw new IllegalStateException("Avro reader is not open.");
  }
  if (!reader.hasNext()) {
    return 0;
  }

  int recordCount = 0;
  writer.allocate();
  writer.reset();

  try {
    for (GenericContainer container = null;
         recordCount < DEFAULT_BATCH_SIZE && reader.hasNext() && !reader.pastSync(end);
         recordCount++) {
      writer.setPosition(recordCount);
      container = reader.next(container);
      processRecord(container, container.getSchema());
    }

    writer.setValueCount(recordCount);

  } catch (IOException e) {
    throw new DrillRuntimeException(e);
  }

  logger.debug("Read {} records in {} ms", recordCount, watch.elapsed(TimeUnit.MILLISECONDS));
  return recordCount;
}
项目:drill    文件:AvroRecordReader.java   
private void processRecord(final GenericContainer container, final Schema schema) {

    final Schema.Type type = schema.getType();

    switch (type) {
      case RECORD:
        process(container, schema, null, new MapOrListWriterImpl(writer.rootAsMap()), fieldSelection);
        break;
      default:
        throw new DrillRuntimeException("Root object must be record type. Found: " + type);
    }
  }
项目:ksql    文件:BasicCollector.java   
@Override
public GenericContainer collectMetrics() {
  KsqlVersionMetrics metricsRecord = new KsqlVersionMetrics();
  metricsRecord.setTimestamp(time);
  metricsRecord.setConfluentPlatformVersion(Version.getVersion());
  metricsRecord.setKsqlComponentType(moduleType.name());
  return metricsRecord;
}
项目:SPADE    文件:Kafka.java   
@Override
public boolean putVertex(AbstractVertex vertex){
    try{
        List<GenericContainer> recordsToPublish = new ArrayList<GenericContainer>();
        Vertex.Builder vertexBuilder = Vertex.newBuilder();
        vertexBuilder.setAnnotations(vertex.getAnnotations());
        vertexBuilder.setHash(String.valueOf(vertex.hashCode()));
        Vertex kafkaVertex = vertexBuilder.build();
        recordsToPublish.add(GraphElement.newBuilder().setElement(kafkaVertex).build());
        return publishRecords(recordsToPublish) > 0;
    }catch(Exception e){
        logger.log(Level.SEVERE, "Failed to publish vertex : " + vertex);
        return false;
    }
}