Java 类org.apache.commons.io.input.BoundedInputStream 实例源码

项目:trellis    文件:FileBasedBinaryService.java   
@Override
public Optional<InputStream> getContent(final IRI identifier, final List<Range<Integer>> ranges) {
    requireNonNull(ranges, "Byte ranges may not be null");
    return getFileFromIdentifier(identifier).map(file -> {
        try {
            if (ranges.isEmpty()) {
                return new FileInputStream(file);
            } else {
                final List<InputStream> iss = new ArrayList<>();
                for (final Range<Integer> r : ranges) {
                    final InputStream input = new FileInputStream(file);
                    final long skipped = input.skip(r.getMinimum());
                    LOGGER.debug("Skipped {} bytes", skipped);
                    iss.add(new BoundedInputStream(input, r.getMaximum() - r.getMinimum()));
                }
                return new SequenceInputStream(asEnumeration(iss.iterator()));
            }
        } catch (final IOException ex) {
            throw new UncheckedIOException(ex);
        }
    });
}
项目:mobile-store    文件:BluetoothDownloader.java   
@Override
protected InputStream getDownloadersInputStream() throws IOException {
    Request request = Request.createGET(sourcePath, connection);
    Response response = request.send();
    fileDetails = response.toFileDetails();

    // TODO: Manage the dependency which includes this class better?
    // Right now, I only needed the one class from apache commons.
    // There are countless classes online which provide this functionality,
    // including some which are available from the Android SDK - the only
    // problem is that they have a funky API which doesn't just wrap a
    // plain old InputStream (the class is ContentLengthInputStream -
    // whereas this BoundedInputStream is much more generic and useful
    // to us).
    BoundedInputStream stream = new BoundedInputStream(response.toContentStream(), fileDetails.getFileSize());
    stream.setPropagateClose(false);

    return stream;
}
项目:S3Mock    文件:FileStore.java   
private void copyPart(final String bucket,
    final String key,
    final int from,
    final int to,
    final boolean useV4Signing,
    final File partFile) throws IOException {
  final int len = to - from + 1;
  final S3Object s3Object = resolveS3Object(bucket, key);

  try (InputStream sourceStream =
      wrapStream(FileUtils.openInputStream(s3Object.getDataFile()), useV4Signing);
      OutputStream targetStream = new FileOutputStream(partFile)) {
    sourceStream.skip(from);
    IOUtils.copy(new BoundedInputStream(sourceStream, len), targetStream);
  }
}
项目:trellis    文件:FileBasedBinaryService.java   
@Override
public Optional<InputStream> getContent(final IRI identifier, final List<Range<Integer>> ranges) {
    requireNonNull(ranges, "Byte ranges may not be null");
    return getFileFromIdentifier(identifier).map(file -> {
        try {
            if (ranges.isEmpty()) {
                return new FileInputStream(file);
            } else {
                final List<InputStream> iss = new ArrayList<>();
                for (final Range<Integer> r : ranges) {
                    final InputStream input = new FileInputStream(file);
                    final long skipped = input.skip(r.getMinimum());
                    LOGGER.debug("Skipped {} bytes", skipped);
                    iss.add(new BoundedInputStream(input, r.getMaximum() - r.getMinimum()));
                }
                return new SequenceInputStream(asEnumeration(iss.iterator()));
            }
        } catch (final IOException ex) {
            throw new UncheckedIOException(ex);
        }
    });
}
项目:fdroid    文件:BluetoothDownloader.java   
@Override
protected InputStream getDownloadersInputStream() throws IOException {
    Request request = Request.createGET(sourcePath, connection);
    Response response = request.send();
    fileDetails = response.toFileDetails();

    // TODO: Manage the dependency which includes this class better?
    // Right now, I only needed the one class from apache commons.
    // There are countless classes online which provide this functionality,
    // including some which are available from the Android SDK - the only
    // problem is that they have a funky API which doesn't just wrap a
    // plain old InputStream (the class is ContentLengthInputStream -
    // whereas this BoundedInputStream is much more generic and useful
    // to us).
    BoundedInputStream stream = new BoundedInputStream(response.toContentStream(), fileDetails.getFileSize());
    stream.setPropagateClose(false);

    return stream;
}
项目:AppHub    文件:BluetoothDownloader.java   
@Override
protected InputStream getDownloadersInputStream() throws IOException {
    Request request = Request.createGET(sourcePath, connection);
    Response response = request.send();
    fileDetails = response.toFileDetails();

    // TODO: Manage the dependency which includes this class better?
    // Right now, I only needed the one class from apache commons.
    // There are countless classes online which provide this functionality,
    // including some which are available from the Android SDK - the only
    // problem is that they have a funky API which doesn't just wrap a
    // plain old InputStream (the class is ContentLengthInputStream -
    // whereas this BoundedInputStream is much more generic and useful
    // to us).
    BoundedInputStream stream = new BoundedInputStream(response.toContentStream(), fileDetails.getFileSize());
    stream.setPropagateClose(false);

    return stream;
}
项目:hadoop-plus    文件:BufferUtils.java   
static public int readFileToByteBuffer(File file, long offset,
        long readSize, ByteBuffer buffer) throws IOException {
    final long fileSize = Math.max(file.length(), 0l);
    offset  = Math.max(offset, 0l);
    offset = Math.min(offset, fileSize);

    if (0 >= readSize)
    {
        readSize = fileSize - offset;
    }
    readSize = Math.min(readSize, fileSize - offset);
    readSize = Math.min(readSize, buffer.remaining());

    int positionBefore = buffer.position();
    if (0 < readSize && file.isFile()) {
        final FileInputStream inFileStr = FileUtils.openInputStream(file);
        IOUtils.skip(inFileStr, offset);
        final BoundedInputStream inStr = new BoundedInputStream(inFileStr, readSize);

        final OutputStream outStr = new ByteBufferBackedOutputStream(buffer);

        IOUtils.copy(inStr, outStr);
    }

    return buffer.position() - positionBefore;
}
项目:fdroidclient    文件:BluetoothDownloader.java   
@Override
protected InputStream getDownloadersInputStream() throws IOException {
    Request request = Request.createGET(sourcePath, connection);
    Response response = request.send();
    fileDetails = response.toFileDetails();

    // TODO: Manage the dependency which includes this class better?
    // Right now, I only needed the one class from apache commons.
    // There are countless classes online which provide this functionality,
    // including some which are available from the Android SDK - the only
    // problem is that they have a funky API which doesn't just wrap a
    // plain old InputStream (the class is ContentLengthInputStream -
    // whereas this BoundedInputStream is much more generic and useful
    // to us).
    BoundedInputStream stream = new BoundedInputStream(response.toContentStream(), fileDetails.getFileSize());
    stream.setPropagateClose(false);

    return stream;
}
项目:oxAuth    文件:CRLCertificateVerifier.java   
public X509CRL requestCRL(String url) throws IOException, MalformedURLException, CertificateException, CRLException {
    HttpURLConnection con = (HttpURLConnection) new URL(url).openConnection();
    try {
        con.setUseCaches(false);

        InputStream in = new BoundedInputStream(con.getInputStream(), maxCrlSize);
        try {
            CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
            X509CRL crl = (X509CRL) certificateFactory.generateCRL(in);
            log.debug("CRL size: " + crl.getEncoded().length + " bytes");

            return crl;
        } finally {
            IOUtils.closeQuietly(in);
        }
    } catch (IOException ex) {
        log.error("Failed to download CRL from '" + url + "'", ex);
    } finally {
        if (con != null) {
            con.disconnect();
        }
    }

    return null;
}
项目:cyberduck    文件:B2LargeUploadService.java   
private Future<B2UploadPartResponse> submit(final ThreadPool pool, final Path file, final Local local,
                                            final BandwidthThrottle throttle, final StreamListener listener,
                                            final TransferStatus overall,
                                            final int partNumber,
                                            final Long offset, final Long length, final ConnectionCallback callback) {
    if(log.isInfoEnabled()) {
        log.info(String.format("Submit part %d of %s to queue with offset %d and length %d", partNumber, file, offset, length));
    }
    return pool.execute(new DefaultRetryCallable<B2UploadPartResponse>(new BackgroundExceptionCallable<B2UploadPartResponse>() {
        @Override
        public B2UploadPartResponse call() throws BackgroundException {
            if(overall.isCanceled()) {
                throw new ConnectionCanceledException();
            }
            final TransferStatus status = new TransferStatus()
                    .length(length)
                    .skip(offset);
            status.setHeader(overall.getHeader());
            status.setNonces(overall.getNonces());
            status.setChecksum(writer.checksum(file).compute(
                    StreamCopier.skip(new BoundedInputStream(local.getInputStream(), offset + length), offset),
                    status));
            status.setSegment(true);
            status.setPart(partNumber);
            return (B2UploadPartResponse) B2LargeUploadService.super.upload(file, local, throttle, listener, status, overall, new StreamProgress() {
                @Override
                public void progress(final long bytes) {
                    status.progress(bytes);
                    // Discard sent bytes in overall progress if there is an error reply for segment.
                    overall.progress(bytes);
                }

                @Override
                public void setComplete() {
                    status.setComplete();
                }
            }, callback);
        }
    }, overall));
}
项目:cyberduck    文件:SwiftLargeObjectUploadFeature.java   
private Future<StorageObject> submit(final ThreadPool pool, final Path segment, final Local local,
                                     final BandwidthThrottle throttle, final StreamListener listener,
                                     final TransferStatus overall, final Long offset, final Long length, final ConnectionCallback callback) {
    return pool.execute(new DefaultRetryCallable<StorageObject>(new BackgroundExceptionCallable<StorageObject>() {
        @Override
        public StorageObject call() throws BackgroundException {
            if(overall.isCanceled()) {
                throw new ConnectionCanceledException();
            }
            final TransferStatus status = new TransferStatus()
                    .length(length)
                    .skip(offset);
            status.setHeader(overall.getHeader());
            status.setNonces(overall.getNonces());
            status.setChecksum(writer.checksum(segment).compute(
                    StreamCopier.skip(new BoundedInputStream(local.getInputStream(), offset + length), offset), status));
            status.setSegment(true);
            return SwiftLargeObjectUploadFeature.super.upload(
                    segment, local, throttle, listener, status, overall, new StreamProgress() {
                        @Override
                        public void progress(final long bytes) {
                            status.progress(bytes);
                            // Discard sent bytes in overall progress if there is an error reply for segment.
                            overall.progress(bytes);
                        }

                        @Override
                        public void setComplete() {
                            status.setComplete();
                        }
                    }, callback);
        }
    }, overall));
}
项目:S3Mock    文件:FileStoreController.java   
/**
 * supports range different range ends. eg. if content has 100 bytes, the range request could
 * be: bytes=10-100,
 * 10--1 and 10-200
 *
 * see: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
 *
 * @param response {@link HttpServletResponse}
 * @param range {@link String}
 * @param s3Object {@link S3Object}
 * @throws IOException if invalid range request value
 */
private void getObjectWithRange(final HttpServletResponse response, final Range range,
    final S3Object s3Object)
    throws IOException {
  final long fileSize = s3Object.getDataFile().length();
  final long bytesToRead = Math.min(fileSize - 1, range.getEnd()) - range.getStart() + 1;

  if (bytesToRead < 0 || fileSize < range.getStart()) {
    response.setStatus(HttpStatus.REQUESTED_RANGE_NOT_SATISFIABLE.value());
    response.flushBuffer();
    return;
  }

  response.setStatus(HttpStatus.PARTIAL_CONTENT.value());
  response.setHeader(HttpHeaders.ACCEPT_RANGES, RANGES_BYTES);
  response.setHeader(HttpHeaders.CONTENT_RANGE,
      String.format("bytes %s-%s", range.getStart(), bytesToRead + range.getStart() - 1));
  response.setHeader(HttpHeaders.ETAG, "\"" + s3Object.getMd5() + "\"");
  response.setDateHeader(HttpHeaders.LAST_MODIFIED, s3Object.getLastModified());

  response.setContentType(s3Object.getContentType());
  response.setContentLengthLong(bytesToRead);

  try (OutputStream outputStream = response.getOutputStream()) {
    try (FileInputStream fis = new FileInputStream(s3Object.getDataFile())) {
      fis.skip(range.getStart());
      IOUtils.copy(new BoundedInputStream(fis, bytesToRead), outputStream);
    }
  }
}
项目:hadoop-manta    文件:MantaFileSystem.java   
/**
 * Get the checksum of a file, from the beginning of the file till the
 * specific length. Warning this operation is slow because we have to
 * download the entire file in order to calculate the checksum.
 *
 * @param mantaPath The file path
 * @param length The length of the file range for checksum calculation*
 * @return the checksum of an arbitrary amount of bytes from the start of a file
 * @throws IOException thrown when unable to compute the checksum
 */
FileChecksum getFileChecksumLocally(final String mantaPath, final long length) throws IOException {
    LOG.debug("Calculating checksum for file {} locally by downloading all content",
            mantaPath);

    try (InputStream in = client.getAsInputStream(mantaPath);
         BoundedInputStream bin = new BoundedInputStream(in, length)) {
        byte[] bytes = DigestUtils.md5(bin);
        return new MantaChecksum(bytes);
    }
}
项目:lavaplayer    文件:MessageInput.java   
/**
 * @return Data input for the next message. Note that it does not automatically skip over the last message if it was
 *         not fully read, for that purpose, skipRemainingBytes() should be explicitly called after reading every
 *         message. A null return value indicates the position where MessageOutput#finish() had written the end
 *         marker.
 * @throws IOException On IO error
 */
public DataInput nextMessage() throws IOException {
  int value = dataInputStream.readInt();
  messageFlags = (int) ((value & 0xC0000000L) >> 30L);
  messageSize = value & 0x3FFFFFFF;

  if (messageSize == 0) {
    return null;
  }

  return new DataInputStream(new BoundedInputStream(countingInputStream, messageSize));
}
项目:hadoop-plus    文件:AggregatedLogFormat.java   
/**
 * Writes all logs for a single container to the provided writer.
 * @param valueStream
 * @param writer
 * @throws IOException
 */
public static void readAcontainerLogs(DataInputStream valueStream,
    Writer writer) throws IOException {
  int bufferSize = 65536;
  char[] cbuf = new char[bufferSize];
  String fileType;
  String fileLengthStr;
  long fileLength;

  while (true) {
    try {
      fileType = valueStream.readUTF();
    } catch (EOFException e) {
      // EndOfFile
      return;
    }
    fileLengthStr = valueStream.readUTF();
    fileLength = Long.parseLong(fileLengthStr);
    writer.write("\n\nLogType:");
    writer.write(fileType);
    writer.write("\nLogLength:");
    writer.write(fileLengthStr);
    writer.write("\nLog Contents:\n");
    // ByteLevel
    BoundedInputStream bis =
        new BoundedInputStream(valueStream, fileLength);
    InputStreamReader reader = new InputStreamReader(bis);
    int currentRead = 0;
    int totalRead = 0;
    while ((currentRead = reader.read(cbuf, 0, bufferSize)) != -1) {
      writer.write(cbuf, 0, currentRead);
      totalRead += currentRead;
    }
  }
}
项目:hadoop-plus    文件:BufferUtils.java   
static public ByteBuffer readFileToByteBuffer(File file, long offset,
        long readSize, boolean isDirect) throws IOException {
    final long fileSize = Math.max(file.length(), 0l);
    offset = Math.max(offset, 0l);
    offset = Math.min(offset, fileSize);

    if (0 >= readSize)
    {
        readSize = fileSize - offset;
    }
    readSize = Math.min(readSize, fileSize - offset);
    readSize = Math.min(readSize, Integer.MAX_VALUE);

    if (0 < readSize && file.isFile()) {
        allocateBuffer((int) readSize, isDirect);

        final FileInputStream inFileStr = FileUtils.openInputStream(file);
        IOUtils.skip(inFileStr, offset);
        final BoundedInputStream inStr = new BoundedInputStream(inFileStr, readSize);

        final ByteBuffer buffer = allocateBuffer((int) readSize, isDirect);
        final OutputStream outStr = new ByteBufferBackedOutputStream(buffer);

        IOUtils.copy(inStr, outStr);
        return buffer;
    }
    return null;
}
项目:FOXopen    文件:KryoManager.java   
@Override
public Object read(Kryo kryo, Input input, Class type) {

  //First item to read is an integer representing the length of the written byte array
  int lDOMLength = input.readInt();

  //Restrict the InputStream to be read to the length of the serialised DOM, so the DOM constructor doesn't deplete the raw Kryo InputStream
  BoundedInputStream lBoundedInputStream = new BoundedInputStream(input, lDOMLength);
  //Don't allow close calls from the XML deserialiser to propogate to the wrapped InputStream
  lBoundedInputStream.setPropagateClose(false);

  return DOM.createDocument(lBoundedInputStream, false);
}
项目:hopsworks    文件:LogReader.java   
/**
 * Writes all logs for a single container to the provided writer.
 *
 * @param valueStream
 * @param writer
 * @throws IOException
 */
public static void readAcontainerLogs(DataInputStream valueStream,
        Writer writer) throws IOException {
  int bufferSize = 65536;
  char[] cbuf = new char[bufferSize];
  String fileType;
  String fileLengthStr;
  long fileLength;

  while (true) {
    try {
      fileType = valueStream.readUTF();
    } catch (EOFException e) {
      // EndOfFile
      return;
    }
    fileLengthStr = valueStream.readUTF();
    fileLength = Long.parseLong(fileLengthStr);
    writer.write("\n\nLogType:");
    writer.write(fileType);
    writer.write("\nLogLength:");
    writer.write(fileLengthStr);
    writer.write("\nLog Contents:\n");
    // ByteLevel
    BoundedInputStream bis = new BoundedInputStream(valueStream, fileLength);
    InputStreamReader reader = new InputStreamReader(bis);
    int currentRead = 0;
    int totalRead = 0;
    while ((currentRead = reader.read(cbuf, 0, bufferSize)) != -1) {
      writer.write(cbuf, 0, currentRead);
      totalRead += currentRead;
    }
  }
}
项目:datacollector    文件:SyncPreviewer.java   
@Override
public RawPreview getRawSource(int maxLength, MultivaluedMap<String, String> previewParams) throws PipelineException {
  changeState(PreviewStatus.RUNNING, null);
  int bytesToRead = configuration.get(MAX_SOURCE_PREVIEW_SIZE_KEY, MAX_SOURCE_PREVIEW_SIZE_DEFAULT);
  bytesToRead = Math.min(bytesToRead, maxLength);

  PipelineConfiguration pipelineConf = pipelineStore.load(name, rev);
  if(pipelineConf.getStages().isEmpty()) {
    throw new PipelineRuntimeException(ContainerError.CONTAINER_0159, name);
  }

  //find the source stage in the pipeline configuration
  StageDefinition sourceStageDef = getSourceStageDef(pipelineConf);

  RawSourcePreviewer rawSourcePreviewer = createRawSourcePreviewer(sourceStageDef, previewParams);
  RawPreview rawPreview;
  ClassLoader classLoader = sourceStageDef.getStageClassLoader();
  ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
  try {
    Thread.currentThread().setContextClassLoader(classLoader);
    try(BoundedInputStream bIn = new BoundedInputStream(rawSourcePreviewer.preview(bytesToRead), bytesToRead)) {
      rawPreview = new RawPreviewImpl(IOUtils.toString(bIn), rawSourcePreviewer.getMimeType());
    }
  } catch (IOException ex) {
    throw new PipelineRuntimeException(PreviewError.PREVIEW_0003, ex.toString(), ex);
  } finally {
    Thread.currentThread().setContextClassLoader(contextClassLoader);
  }
  changeState(PreviewStatus.FINISHED, null);
  return rawPreview;
}
项目:bamboo    文件:TextExtractor.java   
private void extractHtml(InputStream record, Document doc) throws TextExtractionException {
    try {
        BoundedInputStream in = new BoundedInputStream(record, maxDocSize);
        TextDocument textDoc = new BoilerpipeSAXInput(new InputSource(in)).getTextDocument();
        doc.setTitle(textDoc.getTitle());
        doc.setText(textDoc.getText(true, true).replace("\uFFFF", ""));
        if (boilingEnabled) {
            DefaultExtractor.INSTANCE.process(textDoc);
            doc.setBoiled(textDoc.getContent().replace("\uFFFF", ""));
        }
    } catch (SAXException | BoilerpipeProcessingException | IllegalArgumentException | ArrayIndexOutOfBoundsException e) {
        throw new TextExtractionException(e);
    }
}
项目:hadoop-TCP    文件:AggregatedLogFormat.java   
/**
 * Writes all logs for a single container to the provided writer.
 * @param valueStream
 * @param writer
 * @throws IOException
 */
public static void readAcontainerLogs(DataInputStream valueStream,
    Writer writer) throws IOException {
  int bufferSize = 65536;
  char[] cbuf = new char[bufferSize];
  String fileType;
  String fileLengthStr;
  long fileLength;

  while (true) {
    try {
      fileType = valueStream.readUTF();
    } catch (EOFException e) {
      // EndOfFile
      return;
    }
    fileLengthStr = valueStream.readUTF();
    fileLength = Long.parseLong(fileLengthStr);
    writer.write("\n\nLogType:");
    writer.write(fileType);
    writer.write("\nLogLength:");
    writer.write(fileLengthStr);
    writer.write("\nLog Contents:\n");
    // ByteLevel
    BoundedInputStream bis =
        new BoundedInputStream(valueStream, fileLength);
    InputStreamReader reader = new InputStreamReader(bis);
    int currentRead = 0;
    int totalRead = 0;
    while ((currentRead = reader.read(cbuf, 0, bufferSize)) != -1) {
      writer.write(cbuf, 0, currentRead);
      totalRead += currentRead;
    }
  }
}
项目:bouncestorage    文件:WriteBackPolicyTest.java   
private void putLargeObject(String blobName, long size) throws Exception {
    /* running 5GB through transient blob store is asking for trouble */
    skipIfTransient(policy);

    Blob blob = policy.blobBuilder(blobName)
            .payload(new ByteSourcePayload(new ByteSource() {
                /* ByteSource by default keeps reading the stream to figure out the size */
                @Override
                public long size() throws IOException {
                    return size;
                }

                @Override
                public InputStream openStream() throws IOException {
                    return new BoundedInputStream(new ZeroInputStream(), size);
                }
            }))
            .contentLength(size)
            .build();

    logger.info("putting large object {}", blobName);
    policy.putBlob(containerName, blob, new PutOptions().multipart());
    logger.info("done putting large object {}", blobName);
    assertThat(policy.getDestination().blobExists(containerName, blobName)).isFalse();
    assertThat(policy.getSource().blobExists(containerName, blobName)).isTrue();
    BlobMetadata nearBlob = policy.getSource().blobMetadata(containerName, blobName);
    assertThat(nearBlob.getContentMetadata().getContentLength()).isEqualTo(size);
}
项目:hardfs    文件:AggregatedLogFormat.java   
/**
 * Writes all logs for a single container to the provided writer.
 * @param valueStream
 * @param writer
 * @throws IOException
 */
public static void readAcontainerLogs(DataInputStream valueStream,
    Writer writer) throws IOException {
  int bufferSize = 65536;
  char[] cbuf = new char[bufferSize];
  String fileType;
  String fileLengthStr;
  long fileLength;

  while (true) {
    try {
      fileType = valueStream.readUTF();
    } catch (EOFException e) {
      // EndOfFile
      return;
    }
    fileLengthStr = valueStream.readUTF();
    fileLength = Long.parseLong(fileLengthStr);
    writer.write("\n\nLogType:");
    writer.write(fileType);
    writer.write("\nLogLength:");
    writer.write(fileLengthStr);
    writer.write("\nLog Contents:\n");
    // ByteLevel
    BoundedInputStream bis =
        new BoundedInputStream(valueStream, fileLength);
    InputStreamReader reader = new InputStreamReader(bis);
    int currentRead = 0;
    int totalRead = 0;
    while ((currentRead = reader.read(cbuf, 0, bufferSize)) != -1) {
      writer.write(cbuf, 0, currentRead);
      totalRead += currentRead;
    }
  }
}
项目:hadoop-on-lustre2    文件:AggregatedLogFormat.java   
/**
 * Writes all logs for a single container to the provided writer.
 * @param valueStream
 * @param writer
 * @throws IOException
 */
public static void readAcontainerLogs(DataInputStream valueStream,
    Writer writer) throws IOException {
  int bufferSize = 65536;
  char[] cbuf = new char[bufferSize];
  String fileType;
  String fileLengthStr;
  long fileLength;

  while (true) {
    try {
      fileType = valueStream.readUTF();
    } catch (EOFException e) {
      // EndOfFile
      return;
    }
    fileLengthStr = valueStream.readUTF();
    fileLength = Long.parseLong(fileLengthStr);
    writer.write("\n\nLogType:");
    writer.write(fileType);
    writer.write("\nLogLength:");
    writer.write(fileLengthStr);
    writer.write("\nLog Contents:\n");
    // ByteLevel
    BoundedInputStream bis =
        new BoundedInputStream(valueStream, fileLength);
    InputStreamReader reader = new InputStreamReader(bis);
    int currentRead = 0;
    int totalRead = 0;
    while ((currentRead = reader.read(cbuf, 0, bufferSize)) != -1) {
      writer.write(cbuf, 0, currentRead);
      totalRead += currentRead;
    }
  }
}
项目:James    文件:NettyImapRequestLineReader.java   
/**
 * Return a {@link ChannelBufferInputStream} if the wrapped
 * {@link ChannelBuffer} contains enough data. If not it will throw a
 * {@link NotEnoughDataException}
 */
public InputStream read(int size, boolean extraCRLF) throws DecodingException {
    int crlf = 0;
    if (extraCRLF) {
        crlf = 2;
    }

    if (maxLiteralSize > 0 && maxLiteralSize > size) {
        throw new DecodingException(HumanReadableText.FAILED, "Specified literal is greater then the allowed size");
    }
    // Check if we have enough data
    if (size + crlf > buffer.readableBytes()) {
        // ok let us throw a exception which till the decoder how many more
        // bytes we need
        throw new NotEnoughDataException(size + read + crlf);
    }

    // Unset the next char.
    nextSeen = false;
    nextChar = 0;

    // limit the size via commons-io as ChannelBufferInputStream size limiting is buggy
    InputStream in = new BoundedInputStream(new ChannelBufferInputStream(buffer), size); 
    if (extraCRLF) {
        return new EolInputStream(this, in);
    } else {
        return in;
    }
}
项目:disunity    文件:BundleReader.java   
private InputStream dataInputStream(long offset, long size) throws IOException {
    InputStream is;

    // use LZMA stream if the bundle is compressed
    if (bundle.header().compressed()) {
        // create initial input stream if required
        if (lzma == null) {
            lzma = lzmaInputStream();
        }

        // recreate stream if the offset is behind
        long lzmaOffset = lzma.getByteCount();
        if (lzmaOffset > offset) {
            lzma.close();
            lzma = lzmaInputStream();
        }

        // skip forward if required
        if (lzmaOffset < offset) {
            lzma.skip(offset - lzmaOffset);
        }

        is = lzma;
    } else {
        in.position(bundle.header().headerSize() + offset);
        is = in.stream();
    }

    return new BoundedInputStream(is, size);
}
项目:cyberduck    文件:S3MultipartUploadService.java   
private Future<MultipartPart> submit(final ThreadPool pool, final Path file, final Local local,
                                     final BandwidthThrottle throttle, final StreamListener listener,
                                     final TransferStatus overall, final MultipartUpload multipart,
                                     final int partNumber, final long offset, final long length, final ConnectionCallback callback) throws BackgroundException {
    if(log.isInfoEnabled()) {
        log.info(String.format("Submit part %d of %s to queue with offset %d and length %d", partNumber, file, offset, length));
    }
    return pool.execute(new DefaultRetryCallable<MultipartPart>(new BackgroundExceptionCallable<MultipartPart>() {
        @Override
        public MultipartPart call() throws BackgroundException {
            if(overall.isCanceled()) {
                throw new ConnectionCanceledException();
            }
            final Map<String, String> requestParameters = new HashMap<String, String>();
            requestParameters.put("uploadId", multipart.getUploadId());
            requestParameters.put("partNumber", String.valueOf(partNumber));
            final TransferStatus status = new TransferStatus()
                    .length(length)
                    .skip(offset)
                    .withParameters(requestParameters);
            status.setHeader(overall.getHeader());
            status.setNonces(overall.getNonces());
            switch(session.getSignatureVersion()) {
                case AWS4HMACSHA256:
                    status.setChecksum(writer.checksum(file)
                            .compute(StreamCopier.skip(new BoundedInputStream(local.getInputStream(), offset + length), offset), status)
                    );
                    break;
            }
            status.setSegment(true);
            final StorageObject part = S3MultipartUploadService.super.upload(
                    file, local, throttle, listener, status, overall, new StreamProgress() {
                        @Override
                        public void progress(final long bytes) {
                            status.progress(bytes);
                            // Discard sent bytes in overall progress if there is an error reply for segment.
                            overall.progress(bytes);
                        }

                        @Override
                        public void setComplete() {
                            status.setComplete();
                        }
                    }, callback);
            if(log.isInfoEnabled()) {
                log.info(String.format("Received response %s for part number %d", part, partNumber));
            }
            // Populate part with response data that is accessible via the object's metadata
            return new MultipartPart(partNumber,
                    null == part.getLastModifiedDate() ? new Date(System.currentTimeMillis()) : part.getLastModifiedDate(),
                    null == part.getETag() ? StringUtils.EMPTY : part.getETag(),
                    part.getContentLength());

        }
    }, overall));
}
项目:fastcatsearch3    文件:DocumentInflateTest.java   
@Test
    public void inflaterFileTest() throws IOException {

        byte[] tmpBuf = new byte[10];
        byte[] workingBuffer = new byte[10];

        File f = new File("/tmp/doc.2");
        InputStream is = new BufferedFileInput(f);
        long len = f.length();
        BoundedInputStream fis = new BoundedInputStream(is, len);
        Inflater decompressor = new Inflater();
        decompressor.reset();
        try {
            while (!decompressor.finished()) {

                int count = 0;
                while((count = decompressor.inflate(workingBuffer)) == 0) {

                    if (decompressor.finished() || decompressor.needsDictionary()) {
                        // reached EOF error!
                        System.out.println("error while read document.");
                        break;
                    }
                    while (decompressor.needsInput()) {
                        System.out.println("needsInput!");
                        int n = fis.read(tmpBuf, 0, tmpBuf.length);
                        if (n == -1) {
                            throw new EOFException("Unexpected end of ZLIB input stream");
                        }
                        decompressor.setInput(tmpBuf, 0, n);
                    }
                }

                System.out.println("count = "+ count);
//              if(count > 0) {
//                  inflaterOutput.write(workingBuffer, 0, count);
//              }

            }
        } catch (DataFormatException e) {
            e.printStackTrace();
        } finally {
            decompressor.end();
            fis.close();
        }
    }
项目:java-triton    文件:CloudApiResponseHandler.java   
/**
 * Builds a new response exception.
 *
 * @param responseContentStream entity {@link InputStream} with error details
 * @param statusLine HTTP response status object
 * @param response HTTP response object
 * @return a new exception instance
 */
protected CloudApiResponseException buildResponseException(
        final InputStream responseContentStream, final StatusLine statusLine,
        final HttpResponse response) {
    // Error case because we didn't get the expected status code
    ErrorDetail detail;
    String msg;
    String entityText = null;

    // Handle cases where we have an error but it isn't in JSON
    try {
        detail = mapper.readValue(responseContentStream, ErrorDetail.class);
        msg = null;
    } catch (IOException e) {
        detail = null;
        msg = "Unexpected response body - unable to parse as JSON:\n" + e.getMessage();

        if (response.getEntity().isRepeatable()) {
            final HttpEntity entity = response.getEntity();
            final int maxDebugEntitySize = 2048;

            try (InputStream ein = entity.getContent();
                 InputStream bounded = new BoundedInputStream(ein, maxDebugEntitySize)) {
                entityText = IOUtils.toString(bounded);
            } catch (IOException ioe) {
                logger.error("Error parsing entity text", ioe);
            }
        }

        logger.warn("Problem parsing error response", e);
    }

    final CloudApiResponseException exception = new CloudApiResponseException(msg, operationName, statusLine,
            detail, extractRequestId(response));

    if (entityText != null) {
        exception.setContextValue("entityText", entityText);
    }

    exception.setContextValue("operationName", operationName);
    exception.setContextValue("deserializationMode", deserializationMode);
    exception.setContextValue("responseHeaders", CloudApiUtils.asString(response.getAllHeaders()));

    return exception;
}
项目:InflatableDonkey    文件:ChunkListDecrypter.java   
BoundedInputStream boundedInputStream(InputStream inputStream, int length) {
    BoundedInputStream bis = new BoundedInputStream(inputStream, length); // No close() required/ not propagated.
    bis.setPropagateClose(false);
    return bis;
}
项目:swift-explorer    文件:FastSegmentationPlanFile.java   
@Override
protected InputStream createSegment() throws IOException {
    InputStream res = new BoundedInputStream(Channels.newInputStream(this.randomAccessFile.getChannel().position(currentSegment * segmentationSize)), segmentationSize);
    ((BoundedInputStream) res).setPropagateClose(false) ;
    return res ;                
}
项目:community-edition-old    文件:SolrInformationServer.java   
private void addContentPropertyToDocUsingAlfrescoRepository(SolrInputDocument doc,
            QName propertyQName, long dbId, String locale) 
                        throws AuthenticationException, IOException, UnsupportedEncodingException
{
    long start = System.nanoTime();

    // Expensive call to be done with ContentTracker
    GetTextContentResponse response = repositoryClient.getTextContent(dbId, propertyQName, null);

    addContentPropertyMetadata(doc, propertyQName, AlfrescoSolrDataModel.ContentFieldType.TRANSFORMATION_STATUS,
            response);
    addContentPropertyMetadata(doc, propertyQName, AlfrescoSolrDataModel.ContentFieldType.TRANSFORMATION_EXCEPTION,
            response);
    addContentPropertyMetadata(doc, propertyQName, AlfrescoSolrDataModel.ContentFieldType.TRANSFORMATION_TIME,
            response);

    InputStream ris = response.getContent();
    String textContent = "";
    try
    {
        if (ris != null)
        {
            // Get and copy content
            byte[] bytes = FileCopyUtils.copyToByteArray(new BoundedInputStream(ris, contentStreamLimit));
            textContent = new String(bytes, "UTF8");
        }
    }
    finally
    {
        // release the response only when the content has been read
        response.release();
    }

    long end = System.nanoTime();
    this.getTrackerStats().addDocTransformationTime(end - start);

    StringBuilder builder = new StringBuilder(textContent.length() + 16);
    builder.append("\u0000").append(locale).append("\u0000");
    builder.append(textContent);
    String localisedText = builder.toString();

    for (FieldInstance field : AlfrescoSolrDataModel.getInstance().getIndexedFieldNamesForProperty(propertyQName).getFields())
    {
        doc.removeField(field.getField());
        if(field.isLocalised())
        {
            doc.addField(field.getField(), localisedText);
        }
        else
        {
            doc.addField(field.getField(), textContent);
        }
        addFieldIfNotSet(doc, field);
    }
}
项目:fastcatsearch    文件:DocumentInflateTest.java   
@Test
    public void inflaterFileTest() throws IOException {

        byte[] tmpBuf = new byte[10];
        byte[] workingBuffer = new byte[10];

        File f = new File("/tmp/doc.2");
        InputStream is = new BufferedFileInput(f);
        long len = f.length();
        BoundedInputStream fis = new BoundedInputStream(is, len);
        Inflater decompressor = new Inflater();
        decompressor.reset();
        try {
            while (!decompressor.finished()) {

                int count = 0;
                while((count = decompressor.inflate(workingBuffer)) == 0) {

                    if (decompressor.finished() || decompressor.needsDictionary()) {
                        // reached EOF error!
                        System.out.println("error while read document.");
                        break;
                    }
                    while (decompressor.needsInput()) {
                        System.out.println("needsInput!");
                        int n = fis.read(tmpBuf, 0, tmpBuf.length);
                        if (n == -1) {
                            throw new EOFException("Unexpected end of ZLIB input stream");
                        }
                        decompressor.setInput(tmpBuf, 0, n);
                    }
                }

                System.out.println("count = "+ count);
//              if(count > 0) {
//                  inflaterOutput.write(workingBuffer, 0, count);
//              }

            }
        } catch (DataFormatException e) {
            e.printStackTrace();
        } finally {
            decompressor.end();
            fis.close();
        }
    }
项目:tez    文件:LocalDiskFetchedInput.java   
@Override
public InputStream getInputStream() throws IOException {
  FSDataInputStream inputStream = localFS.open(inputFile);
  inputStream.seek(startOffset);
  return new BoundedInputStream(inputStream, getSize());
}