Java 类com.google.common.io.NullOutputStream 实例源码

项目:LCIndex-HBase-0.94.16    文件:FixedFileTrailer.java   
private static int[] computeTrailerSizeByVersion() {
  int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1];
  for (int version = MIN_FORMAT_VERSION;
       version <= MAX_FORMAT_VERSION;
       ++version) {
    FixedFileTrailer fft = new FixedFileTrailer(version, 
                                 HFileBlock.MINOR_VERSION_NO_CHECKSUM);
    DataOutputStream dos = new DataOutputStream(new NullOutputStream());
    try {
      fft.serialize(dos);
    } catch (IOException ex) {
      // The above has no reason to fail.
      throw new RuntimeException(ex);
    }
    versionToSize[version] = dos.size();
  }
  return versionToSize;
}
项目:IRIndex    文件:FixedFileTrailer.java   
private static int[] computeTrailerSizeByVersion() {
  int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1];
  for (int version = MIN_FORMAT_VERSION;
       version <= MAX_FORMAT_VERSION;
       ++version) {
    FixedFileTrailer fft = new FixedFileTrailer(version, 
                                 HFileBlock.MINOR_VERSION_NO_CHECKSUM);
    DataOutputStream dos = new DataOutputStream(new NullOutputStream());
    try {
      fft.serialize(dos);
    } catch (IOException ex) {
      // The above has no reason to fail.
      throw new RuntimeException(ex);
    }
    versionToSize[version] = dos.size();
  }
  return versionToSize;
}
项目:RStore    文件:FixedFileTrailer.java   
private static int[] computeTrailerSizeByVersion() {
  int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1];
  for (int version = MIN_FORMAT_VERSION;
       version <= MAX_FORMAT_VERSION;
       ++version) {
    FixedFileTrailer fft = new FixedFileTrailer(version);
    DataOutputStream dos = new DataOutputStream(new NullOutputStream());
    try {
      fft.serialize(dos);
    } catch (IOException ex) {
      // The above has no reason to fail.
      throw new RuntimeException(ex);
    }
    versionToSize[version] = dos.size();
  }
  return versionToSize;
}
项目:HBase-Research    文件:FixedFileTrailer.java   
private static int[] computeTrailerSizeByVersion() {
  int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1];
  for (int version = MIN_FORMAT_VERSION;
       version <= MAX_FORMAT_VERSION;
       ++version) {
    FixedFileTrailer fft = new FixedFileTrailer(version, 
                                 HFileBlock.MINOR_VERSION_NO_CHECKSUM);
    DataOutputStream dos = new DataOutputStream(new NullOutputStream());
    try {
      fft.serialize(dos);
    } catch (IOException ex) {
      // The above has no reason to fail.
      throw new RuntimeException(ex);
    }
    versionToSize[version] = dos.size();
  }
  return versionToSize;
}
项目:hbase-0.94.8-qod    文件:FixedFileTrailer.java   
private static int[] computeTrailerSizeByVersion() {
  int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1];
  for (int version = MIN_FORMAT_VERSION;
       version <= MAX_FORMAT_VERSION;
       ++version) {
    FixedFileTrailer fft = new FixedFileTrailer(version, 
                                 HFileBlock.MINOR_VERSION_NO_CHECKSUM);
    DataOutputStream dos = new DataOutputStream(new NullOutputStream());
    try {
      fft.serialize(dos);
    } catch (IOException ex) {
      // The above has no reason to fail.
      throw new RuntimeException(ex);
    }
    versionToSize[version] = dos.size();
  }
  return versionToSize;
}
项目:hbase-0.94.8-qod    文件:FixedFileTrailer.java   
private static int[] computeTrailerSizeByVersion() {
  int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1];
  for (int version = MIN_FORMAT_VERSION;
       version <= MAX_FORMAT_VERSION;
       ++version) {
    FixedFileTrailer fft = new FixedFileTrailer(version, 
                                 HFileBlock.MINOR_VERSION_NO_CHECKSUM);
    DataOutputStream dos = new DataOutputStream(new NullOutputStream());
    try {
      fft.serialize(dos);
    } catch (IOException ex) {
      // The above has no reason to fail.
      throw new RuntimeException(ex);
    }
    versionToSize[version] = dos.size();
  }
  return versionToSize;
}
项目:DominoHBase    文件:FixedFileTrailer.java   
private static int[] computeTrailerSizeByVersion() {
  int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1];
  for (int version = HFile.MIN_FORMAT_VERSION;
       version <= HFile.MAX_FORMAT_VERSION;
       ++version) {
    FixedFileTrailer fft = new FixedFileTrailer(version, HFileBlock.MINOR_VERSION_NO_CHECKSUM);
    DataOutputStream dos = new DataOutputStream(new NullOutputStream());
    try {
      fft.serialize(dos);
    } catch (IOException ex) {
      // The above has no reason to fail.
      throw new RuntimeException(ex);
    }
    versionToSize[version] = dos.size();
  }
  return versionToSize;
}
项目:DominoHBase    文件:EncodedDataBlock.java   
/**
 * Find the size of compressed data assuming that buffer will be compressed
 * using given algorithm.
 * @param algo compression algorithm
 * @param compressor compressor already requested from codec
 * @param inputBuffer Array to be compressed.
 * @param offset Offset to beginning of the data.
 * @param length Length to be compressed.
 * @return Size of compressed data in bytes.
 * @throws IOException
 */
public static int getCompressedSize(Algorithm algo, Compressor compressor,
    byte[] inputBuffer, int offset, int length) throws IOException {
  DataOutputStream compressedStream = new DataOutputStream(
      new NullOutputStream());
  if (compressor != null) {
    compressor.reset();
  }
  OutputStream compressingStream = algo.createCompressionStream(
      compressedStream, compressor, 0);

  compressingStream.write(inputBuffer, offset, length);
  compressingStream.flush();
  compressingStream.close();

  return compressedStream.size();
}
项目:hindex    文件:FixedFileTrailer.java   
private static int[] computeTrailerSizeByVersion() {
  int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1];
  for (int version = MIN_FORMAT_VERSION;
       version <= MAX_FORMAT_VERSION;
       ++version) {
    FixedFileTrailer fft = new FixedFileTrailer(version, 
                                 HFileBlock.MINOR_VERSION_NO_CHECKSUM);
    DataOutputStream dos = new DataOutputStream(new NullOutputStream());
    try {
      fft.serialize(dos);
    } catch (IOException ex) {
      // The above has no reason to fail.
      throw new RuntimeException(ex);
    }
    versionToSize[version] = dos.size();
  }
  return versionToSize;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDataTransferKeepalive.java   
@Test(timeout=30000)
public void testManyClosedSocketsInCache() throws Exception {
  // Make a small file
  Configuration clientConf = new Configuration(conf);
  clientConf.set(DFS_CLIENT_CONTEXT, "testManyClosedSocketsInCache");
  DistributedFileSystem fs =
      (DistributedFileSystem)FileSystem.get(cluster.getURI(),
          clientConf);
  PeerCache peerCache = ClientContext.getFromConf(clientConf).getPeerCache();
  DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);

  // Insert a bunch of dead sockets in the cache, by opening
  // many streams concurrently, reading all of the data,
  // and then closing them.
  InputStream[] stms = new InputStream[5];
  try {
    for (int i = 0; i < stms.length; i++) {
      stms[i] = fs.open(TEST_FILE);
    }
    for (InputStream stm : stms) {
      IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
    }
  } finally {
    IOUtils.cleanup(null, stms);
  }

  assertEquals(5, peerCache.size());

  // Let all the xceivers timeout
  Thread.sleep(1500);
  assertXceiverCount(0);

  // Client side still has the sockets cached
  assertEquals(5, peerCache.size());

  // Reading should not throw an exception.
  DFSTestUtil.readFile(fs, TEST_FILE);
}
项目:hadoop-plus    文件:TestDataTransferKeepalive.java   
@Test(timeout=30000)
public void testManyClosedSocketsInCache() throws Exception {
  // Make a small file
  DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);

  // Insert a bunch of dead sockets in the cache, by opening
  // many streams concurrently, reading all of the data,
  // and then closing them.
  InputStream[] stms = new InputStream[5];
  try {
    for (int i = 0; i < stms.length; i++) {
      stms[i] = fs.open(TEST_FILE);
    }
    for (InputStream stm : stms) {
      IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
    }
  } finally {
    IOUtils.cleanup(null, stms);
  }

  DFSClient client = ((DistributedFileSystem)fs).dfs;
  assertEquals(5, client.peerCache.size());

  // Let all the xceivers timeout
  Thread.sleep(1500);
  assertXceiverCount(0);

  // Client side still has the sockets cached
  assertEquals(5, client.peerCache.size());

  // Reading should not throw an exception.
  DFSTestUtil.readFile(fs, TEST_FILE);
}
项目:FlexMap    文件:TestDataTransferKeepalive.java   
@Test(timeout=30000)
public void testManyClosedSocketsInCache() throws Exception {
  // Make a small file
  Configuration clientConf = new Configuration(conf);
  clientConf.set(DFS_CLIENT_CONTEXT, "testManyClosedSocketsInCache");
  DistributedFileSystem fs =
      (DistributedFileSystem)FileSystem.get(cluster.getURI(),
          clientConf);
  PeerCache peerCache = ClientContext.getFromConf(clientConf).getPeerCache();
  DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);

  // Insert a bunch of dead sockets in the cache, by opening
  // many streams concurrently, reading all of the data,
  // and then closing them.
  InputStream[] stms = new InputStream[5];
  try {
    for (int i = 0; i < stms.length; i++) {
      stms[i] = fs.open(TEST_FILE);
    }
    for (InputStream stm : stms) {
      IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
    }
  } finally {
    IOUtils.cleanup(null, stms);
  }

  assertEquals(5, peerCache.size());

  // Let all the xceivers timeout
  Thread.sleep(1500);
  assertXceiverCount(0);

  // Client side still has the sockets cached
  assertEquals(5, peerCache.size());

  // Reading should not throw an exception.
  DFSTestUtil.readFile(fs, TEST_FILE);
}
项目:tajo    文件:TestTajoCli.java   
@Test
public void testResultRowNumWhenSelectingOnPartitionedTable() throws Exception {
  try (TajoCli cli2 = new TajoCli(cluster.getConfiguration(), new String[]{}, null, System.in,
      new NullOutputStream(), new NullOutputStream())) {
    cli2.executeScript("create table region_part (r_regionkey int8, r_name text) " +
        "partition by column (r_comment text) as select * from region");

    setVar(tajoCli, SessionVars.CLI_FORMATTER_CLASS, TajoCliOutputTestFormatter.class.getName());
    tajoCli.executeScript("select r_comment from region_part where r_comment = 'hs use ironic, even requests. s'");
    String consoleResult = new String(out.toByteArray());
    assertOutputResult(consoleResult);
  } finally {
    tajoCli.executeScript("drop table region_part purge");
  }
}
项目:hadoop-TCP    文件:TestDataTransferKeepalive.java   
@Test(timeout=30000)
public void testManyClosedSocketsInCache() throws Exception {
  // Make a small file
  DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);

  // Insert a bunch of dead sockets in the cache, by opening
  // many streams concurrently, reading all of the data,
  // and then closing them.
  InputStream[] stms = new InputStream[5];
  try {
    for (int i = 0; i < stms.length; i++) {
      stms[i] = fs.open(TEST_FILE);
    }
    for (InputStream stm : stms) {
      IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
    }
  } finally {
    IOUtils.cleanup(null, stms);
  }

  DFSClient client = ((DistributedFileSystem)fs).dfs;
  assertEquals(5, client.peerCache.size());

  // Let all the xceivers timeout
  Thread.sleep(1500);
  assertXceiverCount(0);

  // Client side still has the sockets cached
  assertEquals(5, client.peerCache.size());

  // Reading should not throw an exception.
  DFSTestUtil.readFile(fs, TEST_FILE);
}
项目:hardfs    文件:TestDataTransferKeepalive.java   
@Test(timeout=30000)
public void testManyClosedSocketsInCache() throws Exception {
  // Make a small file
  DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);

  // Insert a bunch of dead sockets in the cache, by opening
  // many streams concurrently, reading all of the data,
  // and then closing them.
  InputStream[] stms = new InputStream[5];
  try {
    for (int i = 0; i < stms.length; i++) {
      stms[i] = fs.open(TEST_FILE);
    }
    for (InputStream stm : stms) {
      IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
    }
  } finally {
    IOUtils.cleanup(null, stms);
  }

  DFSClient client = ((DistributedFileSystem)fs).dfs;
  assertEquals(5, client.peerCache.size());

  // Let all the xceivers timeout
  Thread.sleep(1500);
  assertXceiverCount(0);

  // Client side still has the sockets cached
  assertEquals(5, client.peerCache.size());

  // Reading should not throw an exception.
  DFSTestUtil.readFile(fs, TEST_FILE);
}
项目:hadoop-on-lustre2    文件:TestDataTransferKeepalive.java   
@Test(timeout=30000)
public void testManyClosedSocketsInCache() throws Exception {
  // Make a small file
  Configuration clientConf = new Configuration(conf);
  clientConf.set(DFS_CLIENT_CONTEXT, "testManyClosedSocketsInCache");
  DistributedFileSystem fs =
      (DistributedFileSystem)FileSystem.get(cluster.getURI(),
          clientConf);
  PeerCache peerCache = ClientContext.getFromConf(clientConf).getPeerCache();
  DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);

  // Insert a bunch of dead sockets in the cache, by opening
  // many streams concurrently, reading all of the data,
  // and then closing them.
  InputStream[] stms = new InputStream[5];
  try {
    for (int i = 0; i < stms.length; i++) {
      stms[i] = fs.open(TEST_FILE);
    }
    for (InputStream stm : stms) {
      IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
    }
  } finally {
    IOUtils.cleanup(null, stms);
  }

  assertEquals(5, peerCache.size());

  // Let all the xceivers timeout
  Thread.sleep(1500);
  assertXceiverCount(0);

  // Client side still has the sockets cached
  assertEquals(5, peerCache.size());

  // Reading should not throw an exception.
  DFSTestUtil.readFile(fs, TEST_FILE);
}
项目:accumulo-mojo    文件:AbstractServerTestRunner.java   
public OutputRedirector(InputStream inputStream, OutputStream outputStream) {
    this.inputStream = new BufferedInputStream(inputStream);

    if (outputStream != null) {
        this.outputStream = outputStream;
    } else {
        this.outputStream = new NullOutputStream();
    }
}
项目:openshift-deployer-plugin    文件:Logger.java   
public OutputStream getOutputStream() {
    return new NullOutputStream();
}