Java 类org.apache.hadoop.fs.FSDataInputStream 实例源码

项目:dremio-oss    文件:PseudoDistributedFileSystem.java   
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
  Path absolutePath = toAbsolutePath(f);
  checkPath(absolutePath);

  // Handle root
  if (absolutePath.isRoot()) {
    throw new AccessControlException("Cannot open " + f);
  }

  try {
    RemotePath remotePath = getRemotePath(absolutePath);

    FileSystem delegate = getDelegateFileSystem(remotePath.address);
    return delegate.open(remotePath.path, bufferSize);
  } catch (IllegalArgumentException e) {
    throw (FileNotFoundException) (new FileNotFoundException("No file " + absolutePath).initCause(e));
  }
}
项目:mmsns    文件:HadoopUtil.java   
/**
 * 从hadoop中下载文件
 *
 * @param taskName
 * @param filePath
 */
public static void download(String taskName, String filePath, boolean existDelete) {
    File file = new File(filePath);
    if (file.exists()) {
        if (existDelete) {
            file.deleteOnExit();
        } else {
            return;
        }
    }
    String hadoopAddress = propertyConfig.getProperty("sqoop.task." + taskName + ".tolink.linkConfig.uri");
    String itemmodels = propertyConfig.getProperty("sqoop.task." + taskName + ".recommend.itemmodels");
    try {
        DistributedFileSystem distributedFileSystem = distributedFileSystem(hadoopAddress);
        FSDataInputStream fsDataInputStream = distributedFileSystem.open(new Path(itemmodels));
        byte[] bs = new byte[fsDataInputStream.available()];
        fsDataInputStream.read(bs);
        log.info(new String(bs));

        FileOutputStream fileOutputStream = new FileOutputStream(new File(filePath));
        IOUtils.write(bs, fileOutputStream);
        IOUtils.closeQuietly(fileOutputStream);
    } catch (IOException e) {
        log.error(e);
    }
}
项目:angel    文件:ModelLoader.java   
private static void loadSparseDoublePartition(SparseDoubleModel model, FSDataInputStream input,
    ModelPartitionMeta partMeta) throws IOException {
  int rowNum = input.readInt();
  int rowId = 0;
  int nnz = 0;
  int totalNNZ = 0;
  Int2DoubleOpenHashMap row = null;
  for (int i = 0; i < rowNum; i++) {
    rowId = input.readInt();
    nnz = input.readInt();
    totalNNZ = (int) (nnz * (model.col) / (partMeta.getEndCol() - partMeta.getStartCol()));
    row = model.getRow(rowId, partMeta.getPartId(), totalNNZ);
    for (int j = 0; j < nnz; j++) {
      row.put(input.readInt(), input.readDouble());
    }
  }
}
项目:hadoop    文件:TestMerger.java   
private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path,
    List<String> keys, List<String> values) throws IOException {
  FSDataInputStream in = CryptoUtils.wrapIfNecessary(conf, fs.open(path));

  IFile.Reader<Text, Text> reader = new IFile.Reader<Text, Text>(conf, in,
      fs.getFileStatus(path).getLen(), null, null);
  DataInputBuffer keyBuff = new DataInputBuffer();
  DataInputBuffer valueBuff = new DataInputBuffer();
  Text key = new Text();
  Text value = new Text();
  while (reader.nextRawKey(keyBuff)) {
    key.readFields(keyBuff);
    keys.add(key.toString());
    reader.nextRawValue(valueBuff);
    value.readFields(valueBuff);
    values.add(value.toString());
  }
}
项目:dremio-oss    文件:TestPDFSProtocol.java   
@Test
public void testOnMessageEOF() throws IOException {
  InputStream mis = mock(InputStream.class, withSettings().extraInterfaces(Seekable.class, PositionedReadable.class));
  doReturn(-1).when(mis).read(any(byte[].class), anyInt(), anyInt());

  FSDataInputStream fdis = new FSDataInputStream(mis);
  Response response = getResponse(7L, 4096, fdis);

  InOrder inOrder = Mockito.inOrder(mis);

  inOrder.verify((Seekable) mis).seek(7);
  inOrder.verify(mis).read(any(byte[].class), anyInt(), anyInt());

  assertEquals(-1, ((DFS.GetFileDataResponse) response.pBody).getRead());
  assertEquals(0, response.dBodies.length);
}
项目:hadoop    文件:TestS3InMemoryFileSystem.java   
public void testBasicReadWriteIO() throws IOException {
  FSDataOutputStream writeStream = fs.create(new Path(TEST_PATH));
  writeStream.write(TEST_DATA.getBytes());
  writeStream.flush();
  writeStream.close();

  FSDataInputStream readStream = fs.open(new Path(TEST_PATH));
  BufferedReader br = new BufferedReader(new InputStreamReader(readStream));
  String line = "";
  StringBuffer stringBuffer = new StringBuffer();
  while ((line = br.readLine()) != null) {
      stringBuffer.append(line);
  }
  br.close();

  assert(TEST_DATA.equals(stringBuffer.toString()));
}
项目:hadoop    文件:JobHistoryCopyService.java   
public static FSDataInputStream getPreviousJobHistoryFileStream(
    Configuration conf, ApplicationAttemptId applicationAttemptId)
    throws IOException {
  FSDataInputStream in = null;
  Path historyFile = null;
  String jobId =
      TypeConverter.fromYarn(applicationAttemptId.getApplicationId())
        .toString();
  String jobhistoryDir =
      JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
  Path histDirPath =
      FileContext.getFileContext(conf).makeQualified(new Path(jobhistoryDir));
  FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
  // read the previous history file
  historyFile =
      fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(histDirPath,
        jobId, (applicationAttemptId.getAttemptId() - 1)));
  LOG.info("History file is at " + historyFile);
  in = fc.open(historyFile);
  return in;
}
项目:hadoop    文件:TestWebHDFS.java   
/** test seek */
static void verifySeek(FileSystem fs, Path p, long offset, long length,
    byte[] buf, byte[] expected) throws IOException { 
  long remaining = length - offset;
  long checked = 0;
  LOG.info("XXX SEEK: offset=" + offset + ", remaining=" + remaining);

  final Ticker t = new Ticker("SEEK", "offset=%d, remaining=%d",
      offset, remaining);
  final FSDataInputStream in = fs.open(p, 64 << 10);
  in.seek(offset);
  for(; remaining > 0; ) {
    t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
    final int n = (int)Math.min(remaining, buf.length);
    in.readFully(buf, 0, n);
    checkData(offset, remaining, n, buf, expected);

    offset += n;
    remaining -= n;
    checked += n;
  }
  in.close();
  t.end(checked);
}
项目:ditb    文件:TestHFileBlockPositionalRead.java   
@Test
public void testPositionalReadPrematureEOF() throws IOException {
  long position = 0;
  int bufOffset = 0;
  int necessaryLen = 10;
  int extraLen = 0;
  int totalLen = necessaryLen + extraLen;
  byte[] buf = new byte[totalLen];
  FSDataInputStream in = mock(FSDataInputStream.class);
  when(in.read(position, buf, bufOffset, totalLen)).thenReturn(9);
  when(in.read(position, buf, bufOffset, totalLen)).thenReturn(-1);
  exception.expect(IOException.class);
  exception.expectMessage("EOF");
  HFileBlock.positionalReadWithExtra(in, position, buf, bufOffset,
      necessaryLen, extraLen);
}
项目:hadoop    文件:TestSmallBlock.java   
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
项目:hadoop-oss    文件:TFile.java   
/**
 * Constructor
 * 
 * @param fsdis
 *          FS input stream of the TFile.
 * @param fileLength
 *          The length of TFile. This is required because we have no easy
 *          way of knowing the actual size of the input file through the
 *          File input stream.
 * @param conf
 * @throws IOException
 */
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
    throws IOException {
  readerBCF = new BCFile.Reader(fsdis, fileLength, conf);

  // first, read TFile meta
  BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
  try {
    tfileMeta = new TFileMeta(brMeta);
  } finally {
    brMeta.close();
  }

  comparator = tfileMeta.getComparator();
  // Set begin and end locations.
  begin = new Location(0, 0);
  end = new Location(readerBCF.getBlockCount(), 0);
}
项目:hadoop-oss    文件:SecureIOUtils.java   
/**
 * Same as openFSDataInputStream except that it will run even if security is
 * off. This is used by unit tests.
 */
@VisibleForTesting
protected static FSDataInputStream forceSecureOpenFSDataInputStream(
    File file,
    String expectedOwner, String expectedGroup) throws IOException {
  final FSDataInputStream in =
      rawFilesystem.open(new Path(file.getAbsolutePath()));
  boolean success = false;
  try {
    Stat stat = NativeIO.POSIX.getFstat(in.getFileDescriptor());
    checkStat(file, stat.getOwner(), stat.getGroup(), expectedOwner,
        expectedGroup);
    success = true;
    return in;
  } finally {
    if (!success) {
      in.close();
    }
  }
}
项目:hadoop-oss    文件:TestSFTPFileSystem.java   
/**
 * Test writing to a file and reading its value.
 *
 * @throws Exception
 */
@Test
public void testReadFile() throws Exception {
  byte[] data = "yaks".getBytes();
  Path file = touch(localFs, name.getMethodName().toLowerCase(), data);
  FSDataInputStream is = null;
  try {
    is = sftpFs.open(file);
    byte[] b = new byte[data.length];
    is.read(b);
    assertArrayEquals(data, b);
  } finally {
    if (is != null) {
      is.close();
    }
  }
  assertTrue(sftpFs.delete(file, false));
}
项目:hadoop-oss    文件:ContractTestUtils.java   
/**
 * Read the file and convert to a byte dataset.
 * This implements readfully internally, so that it will read
 * in the file without ever having to seek()
 * @param fs filesystem
 * @param path path to read from
 * @param len length of data to read
 * @return the bytes
 * @throws IOException IO problems
 */
public static byte[] readDataset(FileSystem fs, Path path, int len)
    throws IOException {
  FSDataInputStream in = fs.open(path);
  byte[] dest = new byte[len];
  int offset =0;
  int nread = 0;
  try {
    while (nread < len) {
      int nbytes = in.read(dest, offset + nread, len - nread);
      if (nbytes < 0) {
        throw new EOFException("End of file reached before reading fully.");
      }
      nread += nbytes;
    }
  } finally {
    in.close();
  }
  return dest;
}
项目:hadoop    文件:TestSeekBug.java   
/**
* Test (expected to throw IOE) for negative
* <code>FSDataInpuStream#seek</code> argument
*/
@Test (expected=IOException.class)
public void testNegativeSeek() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path seekFile = new Path("seekboundaries.dat");
    DFSTestUtil.createFile(
      fs,
      seekFile,
      ONEMB,
      fs.getDefaultReplication(seekFile),
      seed);
    FSDataInputStream stream = fs.open(seekFile);
    // Perform "safe seek" (expected to pass)
    stream.seek(65536);
    assertEquals(65536, stream.getPos());
    // expect IOE for this call
    stream.seek(-73);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestFiDataTransferProtocol.java   
/**
 * 1. create files with dfs
 * 2. write 1 byte
 * 3. close file
 * 4. open the same file
 * 5. read the 1 byte and compare results
 */
static void write1byte(String methodName) throws IOException {
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION + 1).build();
  final FileSystem dfs = cluster.getFileSystem();
  try {
    final Path p = new Path("/" + methodName + "/foo");
    final FSDataOutputStream out = createFile(dfs, p);
    out.write(1);
    out.close();

    final FSDataInputStream in = dfs.open(p);
    final int b = in.read();
    in.close();
    Assert.assertEquals(1, b);
  }
  finally {
    dfs.close();
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestLeaseRecovery2.java   
private void verifyFile(FileSystem dfs, Path filepath, byte[] actual,
    int size) throws IOException {
  AppendTestUtil.LOG.info("Lease for file " +  filepath + " is recovered. "
      + "Validating its contents now...");

  // verify that file-size matches
  assertTrue("File should be " + size + " bytes, but is actually " +
             " found to be " + dfs.getFileStatus(filepath).getLen() +
             " bytes",
             dfs.getFileStatus(filepath).getLen() == size);

  // verify that there is enough data to read.
  System.out.println("File size is good. Now validating sizes from datanodes...");
  FSDataInputStream stmin = dfs.open(filepath);
  stmin.readFully(0, actual, 0, size);
  stmin.close();
}
项目:ditb    文件:TestHFile.java   
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(AbstractHFileWriter.compressionByName(compress))
                      .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
项目:hadoop-oss    文件:TestVLong.java   
@Test
public void testVLongByte() throws IOException {
  FSDataOutputStream out = fs.create(path);
  for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; ++i) {
    Utils.writeVLong(out, i);
  }
  out.close();
  Assert.assertEquals("Incorrect encoded size", (1 << Byte.SIZE) + 96, fs
      .getFileStatus(
      path).getLen());

  FSDataInputStream in = fs.open(path);
  for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; ++i) {
    long n = Utils.readVLong(in);
    Assert.assertEquals(n, i);
  }
  in.close();
  fs.delete(path, false);
}
项目:hadoop    文件:SecureIOUtils.java   
/**
 * Same as openFSDataInputStream except that it will run even if security is
 * off. This is used by unit tests.
 */
@VisibleForTesting
protected static FSDataInputStream forceSecureOpenFSDataInputStream(
    File file,
    String expectedOwner, String expectedGroup) throws IOException {
  final FSDataInputStream in =
      rawFilesystem.open(new Path(file.getAbsolutePath()));
  boolean success = false;
  try {
    Stat stat = NativeIO.POSIX.getFstat(in.getFileDescriptor());
    checkStat(file, stat.getOwner(), stat.getGroup(), expectedOwner,
        expectedGroup);
    success = true;
    return in;
  } finally {
    if (!success) {
      in.close();
    }
  }
}
项目:ditb    文件:SnapshotDescriptionUtils.java   
/**
 * Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} stored for the snapshot in the passed directory
 * @param fs filesystem where the snapshot was taken
 * @param snapshotDir directory where the snapshot was stored
 * @return the stored snapshot description
 * @throws CorruptedSnapshotException if the
 * snapshot cannot be read
 */
public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir)
    throws CorruptedSnapshotException {
  Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE);
  try {
    FSDataInputStream in = null;
    try {
      in = fs.open(snapshotInfo);
      SnapshotDescription desc = SnapshotDescription.parseFrom(in);
      return desc;
    } finally {
      if (in != null) in.close();
    }
  } catch (IOException e) {
    throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e);
  }
}
项目:hadoop    文件:TestDataJoin.java   
private static void confirmOutput(Path out, JobConf job, int srcs)
    throws IOException {
  FileSystem fs = out.getFileSystem(job);
  FileStatus[] outlist = fs.listStatus(out);
  assertEquals(1, outlist.length);
  assertTrue(0 < outlist[0].getLen());
  FSDataInputStream in = fs.open(outlist[0].getPath());
  LineRecordReader rr = new LineRecordReader(in, 0, Integer.MAX_VALUE, job);
  LongWritable k = new LongWritable();
  Text v = new Text();
  int count = 0;
  while (rr.next(k, v)) {
    String[] vals = v.toString().split("\t");
    assertEquals(srcs + 1, vals.length);
    int[] ivals = new int[vals.length];
    for (int i = 0; i < vals.length; ++i)
      ivals[i] = Integer.parseInt(vals[i]);
    assertEquals(0, ivals[0] % (srcs * srcs));
    for (int i = 1; i < vals.length; ++i) {
      assertEquals((ivals[i] - (i - 1)) * srcs, 10 * ivals[0]);
    }
    ++count;
  }
  assertEquals(4, count);
}
项目:hadoop    文件:AbstractContractOpenTest.java   
@Test
public void testOpenFileTwice() throws Throwable {
  describe("verify that two opened file streams are independent");
  Path path = path("testopenfiletwice.txt");
  byte[] block = dataset(TEST_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(getFileSystem(), path, false, block);
  //open first
  FSDataInputStream instream1 = getFileSystem().open(path);
  int c = instream1.read();
  assertEquals(0,c);
  FSDataInputStream instream2 = null;
  try {
    instream2 = getFileSystem().open(path);
    assertEquals("first read of instream 2", 0, instream2.read());
    assertEquals("second read of instream 1", 1, instream1.read());
    instream1.close();
    assertEquals("second read of instream 2", 1, instream2.read());
    //close instream1 again
    instream1.close();
  } finally {
    IOUtils.closeStream(instream1);
    IOUtils.closeStream(instream2);
  }
}
项目:flume-release-1.7.0    文件:AvroEventSerializer.java   
private Schema loadFromUrl(String schemaUrl) throws IOException {
  Configuration conf = new Configuration();
  Schema.Parser parser = new Schema.Parser();
  if (schemaUrl.toLowerCase(Locale.ENGLISH).startsWith("hdfs://")) {
    FileSystem fs = FileSystem.get(conf);
    FSDataInputStream input = null;
    try {
      input = fs.open(new Path(schemaUrl));
      return parser.parse(input);
    } finally {
      if (input != null) {
        input.close();
      }
    }
  } else {
    InputStream is = null;
    try {
      is = new URL(schemaUrl).openStream();
      return parser.parse(is);
    } finally {
      if (is != null) {
        is.close();
      }
    }
  }
}
项目:QDrill    文件:BasicFormatMatcher.java   
public boolean matches(DrillFileSystem fs, FileStatus status) throws IOException{
  if (ranges.isEmpty()) {
    return false;
  }
  final Range<Long> fileRange = Range.closedOpen( 0L, status.getLen());

  try (FSDataInputStream is = fs.open(status.getPath())) {
    for(RangeMagics rMagic : ranges) {
      Range<Long> r = rMagic.range;
      if (!fileRange.encloses(r)) {
        continue;
      }
      int len = (int) (r.upperEndpoint() - r.lowerEndpoint());
      byte[] bytes = new byte[len];
      is.readFully(r.lowerEndpoint(), bytes);
      for (byte[] magic : rMagic.magics) {
        if (Arrays.equals(magic, bytes)) {
          return true;
        }
      }
    }
  }
  return false;
}
项目:hadoop    文件:TestPread.java   
private void datanodeRestartTest(MiniDFSCluster cluster, FileSystem fileSys,
    Path name) throws IOException {
  // skip this test if using simulated storage since simulated blocks
  // don't survive datanode restarts.
  if (simulatedStorage) {
    return;
  }
  int numBlocks = 1;
  assertTrue(numBlocks <= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
  byte[] expected = new byte[numBlocks * blockSize];
  Random rand = new Random(seed);
  rand.nextBytes(expected);
  byte[] actual = new byte[numBlocks * blockSize];
  FSDataInputStream stm = fileSys.open(name);
  // read a block and get block locations cached as a result
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Setup");
  // restart all datanodes. it is expected that they will
  // restart on different ports, hence, cached block locations
  // will no longer work.
  assertTrue(cluster.restartDataNodes());
  cluster.waitActive();
  // verify the block can be read again using the same InputStream 
  // (via re-fetching of block locations from namenode). there is a 
  // 3 sec sleep in chooseDataNode(), which can be shortened for 
  // this test if configurable.
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Test");
}
项目:monarch    文件:ADataReader.java   
static DiskRangeList readDiskRanges(FSDataInputStream file, HadoopShims.ZeroCopyReaderShim zcr,
    long base, DiskRangeList range, boolean doForceDirect) throws IOException {
  if (range == null)
    return null;
  DiskRangeList prev = range.prev;
  if (prev == null) {
    prev = new DiskRangeList.MutateHelper(range);
  }
  while (range != null) {
    if (range.hasData()) {
      range = range.next;
      continue;
    }
    int len = (int) (range.getEnd() - range.getOffset());
    long off = range.getOffset();
    ByteBuffer bb = null;
    if (file.getWrappedStream() instanceof ADataInputStream) {
      ADataInputStream ads = (ADataInputStream) file.getWrappedStream();
      bb = ByteBuffer.wrap(ads.getBuffer(), (int) (base + off), len);
    } else {
      // Don't use HDFS ByteBuffer API because it has no readFully, and is buggy and pointless.
      byte[] buffer = new byte[len];
      file.readFully((base + off), buffer, 0, buffer.length);
      if (doForceDirect) {
        bb = ByteBuffer.allocateDirect(len);
        bb.put(buffer);
        bb.position(0);
        bb.limit(len);
      } else {
        bb = ByteBuffer.wrap(buffer);
      }
    }
    range = range.replaceSelfWith(new BufferChunk(bb, range.getOffset()));
    range = range.next;
  }
  return prev.next;
}
项目:hadoop    文件:StatePool.java   
private void reloadState(Path stateFile, Configuration conf) 
throws Exception {
  FileSystem fs = stateFile.getFileSystem(conf);
  if (fs.exists(stateFile)) {
    System.out.println("Reading state from " + stateFile.toString());
    FSDataInputStream in = fs.open(stateFile);

    read(in);
    in.close();
  } else {
    System.out.println("No state information found for " + stateFile);
  }
}
项目:hadoop    文件:HadoopArchives.java   
public void copyData(Path input, FSDataInputStream fsin, 
    FSDataOutputStream fout, Reporter reporter) throws IOException {
  try {
    for (int cbread=0; (cbread = fsin.read(buffer))>= 0;) {
      fout.write(buffer, 0,cbread);
      reporter.progress();
    }
  } finally {
    fsin.close();
  }
}
项目:hadoop    文件:TestPread.java   
private void doPread(FSDataInputStream stm, long position, byte[] buffer,
                     int offset, int length) throws IOException {
  int nread = 0;
  long totalRead = 0;
  DFSInputStream dfstm = null;

  if (stm.getWrappedStream() instanceof DFSInputStream) {
    dfstm = (DFSInputStream) (stm.getWrappedStream());
    totalRead = dfstm.getReadStatistics().getTotalBytesRead();
  }

  while (nread < length) {
    int nbytes =
        stm.read(position + nread, buffer, offset + nread, length - nread);
    assertTrue("Error in pread", nbytes > 0);
    nread += nbytes;
  }

  if (dfstm != null) {
    if (isHedgedRead) {
      assertTrue("Expected read statistic to be incremented", length <= dfstm
          .getReadStatistics().getTotalBytesRead() - totalRead);
    } else {
      assertEquals("Expected read statistic to be incremented", length, dfstm
          .getReadStatistics().getTotalBytesRead() - totalRead);
    }
  }
}
项目:sstable-adaptor    文件:ChannelProxy.java   
public ChannelProxy sharedCopy()
{
    try {
        FSDataInputStream inputStream = HadoopFileUtils.buildInputStream(this.fs, this.filePath,
                                                                         this.bufferSize);
        Cleanup cleanup = new Cleanup(this.filePath(), inputStream);
        return new ChannelProxy(cleanup, this.fs, inputStream, this.filePath, this.bufferSize, this.conf);
    } catch (IOException e) {
        logger.error(e.getMessage());
        throw new RuntimeException((e.getCause()));
    }
}
项目:hadoop    文件:TestCopyFiles.java   
private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files,
    boolean existingOnly) throws IOException {
  Path root = new Path(topdir);

  for (int idx = 0; idx < files.length; idx++) {
    Path fPath = new Path(root, files[idx].getName());
    try {
      fs.getFileStatus(fPath);
      FSDataInputStream in = fs.open(fPath);
      byte[] toRead = new byte[files[idx].getSize()];
      byte[] toCompare = new byte[files[idx].getSize()];
      Random rb = new Random(files[idx].getSeed());
      rb.nextBytes(toCompare);
      assertEquals("Cannnot read file.", toRead.length, in.read(toRead));
      in.close();
      for (int i = 0; i < toRead.length; i++) {
        if (toRead[i] != toCompare[i]) {
          return false;
        }
      }
      toRead = null;
      toCompare = null;
    }
    catch(FileNotFoundException fnfe) {
      if (!existingOnly) {
        throw fnfe;
      }
    }
  }

  return true;
}
项目:hadoop    文件:WebHdfsFileSystem.java   
@Override
public FSDataInputStream open(final Path f, final int buffersize
    ) throws IOException {
  statistics.incrementReadOps(1);
  final HttpOpParam.Op op = GetOpParam.Op.OPEN;
  // use a runner so the open can recover from an invalid token
  FsPathConnectionRunner runner =
      new FsPathConnectionRunner(op, f, new BufferSizeParam(buffersize));
  return new FSDataInputStream(new OffsetUrlInputStream(
      new UnresolvedUrlOpener(runner), new OffsetUrlOpener(null)));
}
项目:hadoop    文件:SpillRecord.java   
public SpillRecord(Path indexFileName, JobConf job, Checksum crc,
                   String expectedIndexOwner)
    throws IOException {

  final FileSystem rfs = FileSystem.getLocal(job).getRaw();
  final FSDataInputStream in =
      SecureIOUtils.openFSDataInputStream(new File(indexFileName.toUri()
          .getRawPath()), expectedIndexOwner, null);
  try {
    final long length = rfs.getFileStatus(indexFileName).getLen();
    final int partitions = (int) length / MAP_OUTPUT_INDEX_RECORD_LENGTH;
    final int size = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
    buf = ByteBuffer.allocate(size);
    if (crc != null) {
      crc.reset();
      CheckedInputStream chk = new CheckedInputStream(in, crc);
      IOUtils.readFully(chk, buf.array(), 0, size);

      if (chk.getChecksum().getValue() != in.readLong()) {
        throw new ChecksumException("Checksum error reading spill index: " +
                              indexFileName, -1);
      }
    } else {
      IOUtils.readFully(in, buf.array(), 0, size);
    }
    entries = buf.asLongBuffer();
  } finally {
    in.close();
  }
}
项目:hadoop    文件:TestHftpFileSystem.java   
/**
 * Test file creation and access with file names that need encoding.
 */
@Test
public void testFileNameEncoding() throws IOException, URISyntaxException {
  for (Path p : TEST_PATHS) {
    // Create and access the path (data and streamFile servlets)
    FSDataOutputStream out = hdfs.create(p, true);
    out.writeBytes("0123456789");
    out.close();
    FSDataInputStream in = hftpFs.open(p);
    assertEquals('0', in.read());
    in.close();

    // Check the file status matches the path. Hftp returns a FileStatus
    // with the entire URI, extract the path part.
    assertEquals(p, new Path(hftpFs.getFileStatus(p).getPath().toUri()
        .getPath()));

    // Test list status (listPath servlet)
    assertEquals(1, hftpFs.listStatus(p).length);

    // Test content summary (contentSummary servlet)
    assertNotNull("No content summary", hftpFs.getContentSummary(p));

    // Test checksums (fileChecksum and getFileChecksum servlets)
    assertNotNull("No file checksum", hftpFs.getFileChecksum(p));
  }
}
项目:dremio-oss    文件:FileSystemWrapper.java   
/**
 * If OperatorStats are provided return a instrumented {@link org.apache.hadoop.fs.FSDataInputStream}.
 */
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
  try {
    return newFSDataInputStreamWrapper(f, underlyingFs.open(f, bufferSize));
  } catch(FSError e) {
    throw propagateFSError(e);
  }
}
项目:angel    文件:ModelLoader.java   
private static void loadDenseFloatPartition(DenseFloatModel model, FSDataInputStream input,
    ModelPartitionMeta partMeta) throws IOException {
  int rowNum = input.readInt();
  int startCol = (int) partMeta.getStartCol();
  int endCol = (int) partMeta.getEndCol();
  int rowId = 0;
  float[] row = null;
  for (int i = 0; i < rowNum; i++) {
    rowId = input.readInt();
    row = model.getRow(rowId);
    for (int j = startCol; j < endCol; j++) {
      row[j] = input.readFloat();
    }
  }
}
项目:angel    文件:ModelLoader.java   
public static float[] loadDenseFloatRowFromPartition(FSDataInputStream input,
    ModelPartitionMeta partMeta, int rowId)
    throws IOException {
  RowOffset rowOffset = partMeta.getRowMetas().get(rowId);
  input.seek(rowOffset.getOffset());
  Preconditions.checkState (input.readInt() == rowId);
  int num = (int) (partMeta.getEndCol() - partMeta.getStartCol());
  float[] row = new float[num];
  for (int i = 0; i < num; i++) {
    row[i] = input.readFloat();
  }
  return row;
}
项目:hadoop    文件:DFSClientCache.java   
FSDataInputStream getDfsInputStream(String userName, String inodePath) {
  DFSInputStreamCaheKey k = new DFSInputStreamCaheKey(userName, inodePath);
  FSDataInputStream s = null;
  try {
    s = inputstreamCache.get(k);
  } catch (ExecutionException e) {
    LOG.warn("Failed to create DFSInputStream for user:" + userName
        + " Cause:" + e);
  }
  return s;
}
项目:dremio-oss    文件:ColumnChunkIncReadStore.java   
public void close() throws IOException {
  for (FSDataInputStream stream : streams) {
    stream.close();
  }
  for (ColumnChunkIncPageReader reader : columns.values()) {
    reader.close();
  }
}