Java 类org.apache.hadoop.hbase.io.FileLink 实例源码

项目:ditb    文件:TestUpgradeTo96.java   
@Test
public void testHFileLink() throws Exception {
  // pass a link, and verify that correct paths are returned.
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  Path aFileLink = new Path(rootDir, "table/2086db948c48/cf/table=21212abcdc33-0906db948c48");
  Path preNamespaceTablePath = new Path(rootDir, "table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceArchivePath =
    new Path(rootDir, ".archive/table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceTempPath = new Path(rootDir, ".tmp/table/21212abcdc33/cf/0906db948c48");
  boolean preNSTablePathExists = false;
  boolean preNSArchivePathExists = false;
  boolean preNSTempPathExists = false;
  assertTrue(HFileLink.isHFileLink(aFileLink));
  HFileLink hFileLink = 
    HFileLink.buildFromHFileLinkPattern(TEST_UTIL.getConfiguration(), aFileLink);
  assertTrue(hFileLink.getArchivePath().toString().startsWith(rootDir.toString()));

  HFileV1Detector t = new HFileV1Detector();
  t.setConf(TEST_UTIL.getConfiguration());
  FileLink fileLink = t.getFileLinkWithPreNSPath(aFileLink);
  //assert it has 6 paths (2 NS, 2 Pre NS, and 2 .tmp)  to look.
  assertTrue(fileLink.getLocations().length == 6);
  for (Path p : fileLink.getLocations()) {
    if (p.equals(preNamespaceArchivePath)) preNSArchivePathExists = true;
    if (p.equals(preNamespaceTablePath)) preNSTablePathExists = true;
    if (p.equals(preNamespaceTempPath)) preNSTempPathExists = true;
  }
  assertTrue(preNSArchivePathExists & preNSTablePathExists & preNSTempPathExists);
}
项目:pbase    文件:FSDataInputStreamWrapper.java   
private FSDataInputStreamWrapper(FileSystem fs, FileLink link, Path path) throws IOException {
  assert (path == null) != (link == null);
  this.path = path;
  this.link = link;
  this.doCloseStreams = true;
  // If the fs is not an instance of HFileSystem, then create an instance of HFileSystem
  // that wraps over the specified fs. In this case, we will not be able to avoid
  // checksumming inside the filesystem.
  this.hfs = (fs instanceof HFileSystem) ? (HFileSystem)fs : new HFileSystem(fs);

  // Initially we are going to read the tail block. Open the reader w/FS checksum.
  this.useHBaseChecksumConfigured = this.useHBaseChecksum = false;
  this.stream = (link != null) ? link.open(hfs) : hfs.open(path);
}
项目:pbase    文件:TestUpgradeTo96.java   
@Test
public void testHFileLink() throws Exception {
  // pass a link, and verify that correct paths are returned.
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  Path aFileLink = new Path(rootDir, "table/2086db948c48/cf/table=21212abcdc33-0906db948c48");
  Path preNamespaceTablePath = new Path(rootDir, "table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceArchivePath =
    new Path(rootDir, ".archive/table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceTempPath = new Path(rootDir, ".tmp/table/21212abcdc33/cf/0906db948c48");
  boolean preNSTablePathExists = false;
  boolean preNSArchivePathExists = false;
  boolean preNSTempPathExists = false;
  assertTrue(HFileLink.isHFileLink(aFileLink));
  HFileLink hFileLink = new HFileLink(TEST_UTIL.getConfiguration(), aFileLink);
  assertTrue(hFileLink.getArchivePath().toString().startsWith(rootDir.toString()));

  HFileV1Detector t = new HFileV1Detector();
  t.setConf(TEST_UTIL.getConfiguration());
  FileLink fileLink = t.getFileLinkWithPreNSPath(aFileLink);
  //assert it has 6 paths (2 NS, 2 Pre NS, and 2 .tmp)  to look.
  assertTrue(fileLink.getLocations().length == 6);
  for (Path p : fileLink.getLocations()) {
    if (p.equals(preNamespaceArchivePath)) preNSArchivePathExists = true;
    if (p.equals(preNamespaceTablePath)) preNSTablePathExists = true;
    if (p.equals(preNamespaceTempPath)) preNSTempPathExists = true;
  }
  assertTrue(preNSArchivePathExists & preNSTablePathExists & preNSTempPathExists);
}
项目:HIndex    文件:FSDataInputStreamWrapper.java   
private FSDataInputStreamWrapper(FileSystem fs, FileLink link, Path path) throws IOException {
  assert (path == null) != (link == null);
  this.path = path;
  this.link = link;
  this.doCloseStreams = true;
  // If the fs is not an instance of HFileSystem, then create an instance of HFileSystem
  // that wraps over the specified fs. In this case, we will not be able to avoid
  // checksumming inside the filesystem.
  this.hfs = (fs instanceof HFileSystem) ? (HFileSystem)fs : new HFileSystem(fs);

  // Initially we are going to read the tail block. Open the reader w/FS checksum.
  this.useHBaseChecksumConfigured = this.useHBaseChecksum = false;
  this.stream = (link != null) ? link.open(hfs) : hfs.open(path);
}
项目:HIndex    文件:TestUpgradeTo96.java   
@Test
public void testHFileLink() throws Exception {
  // pass a link, and verify that correct paths are returned.
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  Path aFileLink = new Path(rootDir, "table/2086db948c48/cf/table=21212abcdc33-0906db948c48");
  Path preNamespaceTablePath = new Path(rootDir, "table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceArchivePath =
    new Path(rootDir, ".archive/table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceTempPath = new Path(rootDir, ".tmp/table/21212abcdc33/cf/0906db948c48");
  boolean preNSTablePathExists = false;
  boolean preNSArchivePathExists = false;
  boolean preNSTempPathExists = false;
  assertTrue(HFileLink.isHFileLink(aFileLink));
  HFileLink hFileLink = new HFileLink(TEST_UTIL.getConfiguration(), aFileLink);
  assertTrue(hFileLink.getArchivePath().toString().startsWith(rootDir.toString()));

  HFileV1Detector t = new HFileV1Detector();
  t.setConf(TEST_UTIL.getConfiguration());
  FileLink fileLink = t.getFileLinkWithPreNSPath(aFileLink);
  //assert it has 6 paths (2 NS, 2 Pre NS, and 2 .tmp)  to look.
  assertTrue(fileLink.getLocations().length == 6);
  for (Path p : fileLink.getLocations()) {
    if (p.equals(preNamespaceArchivePath)) preNSArchivePathExists = true;
    if (p.equals(preNamespaceTablePath)) preNSTablePathExists = true;
    if (p.equals(preNamespaceTempPath)) preNSTempPathExists = true;
  }
  assertTrue(preNSArchivePathExists & preNSTablePathExists & preNSTempPathExists);
}
项目:hbase    文件:ExportSnapshot.java   
private FileLink getFileLink(Path path, Configuration conf) throws IOException{
  String regionName = HFileLink.getReferencedRegionName(path.getName());
  TableName tableName = HFileLink.getReferencedTableName(path.getName());
  if(MobUtils.getMobRegionInfo(tableName).getEncodedName().equals(regionName)) {
    return HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf),
            HFileArchiveUtil.getArchivePath(conf), path);
  }
  return HFileLink.buildFromHFileLinkPattern(inputRoot, inputArchive, path);
}
项目:PyroDB    文件:FSDataInputStreamWrapper.java   
private FSDataInputStreamWrapper(FileSystem fs, FileLink link, Path path) throws IOException {
  assert (path == null) != (link == null);
  this.path = path;
  this.link = link;
  this.doCloseStreams = true;
  // If the fs is not an instance of HFileSystem, then create an instance of HFileSystem
  // that wraps over the specified fs. In this case, we will not be able to avoid
  // checksumming inside the filesystem.
  this.hfs = (fs instanceof HFileSystem) ? (HFileSystem)fs : new HFileSystem(fs);

  // Initially we are going to read the tail block. Open the reader w/FS checksum.
  this.useHBaseChecksumConfigured = this.useHBaseChecksum = false;
  this.stream = (link != null) ? link.open(hfs) : hfs.open(path);
}
项目:PyroDB    文件:TestUpgradeTo96.java   
@Test
public void testHFileLink() throws Exception {
  // pass a link, and verify that correct paths are returned.
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  Path aFileLink = new Path(rootDir, "table/2086db948c48/cf/table=21212abcdc33-0906db948c48");
  Path preNamespaceTablePath = new Path(rootDir, "table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceArchivePath =
    new Path(rootDir, ".archive/table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceTempPath = new Path(rootDir, ".tmp/table/21212abcdc33/cf/0906db948c48");
  boolean preNSTablePathExists = false;
  boolean preNSArchivePathExists = false;
  boolean preNSTempPathExists = false;
  assertTrue(HFileLink.isHFileLink(aFileLink));
  HFileLink hFileLink = new HFileLink(TEST_UTIL.getConfiguration(), aFileLink);
  assertTrue(hFileLink.getArchivePath().toString().startsWith(rootDir.toString()));

  HFileV1Detector t = new HFileV1Detector();
  t.setConf(TEST_UTIL.getConfiguration());
  FileLink fileLink = t.getFileLinkWithPreNSPath(aFileLink);
  //assert it has 6 paths (2 NS, 2 Pre NS, and 2 .tmp)  to look.
  assertTrue(fileLink.getLocations().length == 6);
  for (Path p : fileLink.getLocations()) {
    if (p.equals(preNamespaceArchivePath)) preNSArchivePathExists = true;
    if (p.equals(preNamespaceTablePath)) preNSTablePathExists = true;
    if (p.equals(preNamespaceTempPath)) preNSTempPathExists = true;
  }
  assertTrue(preNSArchivePathExists & preNSTablePathExists & preNSTempPathExists);
}
项目:c5    文件:FSDataInputStreamWrapper.java   
private FSDataInputStreamWrapper(FileSystem fs, FileLink link, Path path) throws IOException {
  assert (path == null) != (link == null);
  this.path = path;
  this.link = link;
  this.doCloseStreams = true;
  // If the fs is not an instance of HFileSystem, then create an instance of HFileSystem
  // that wraps over the specified fs. In this case, we will not be able to avoid
  // checksumming inside the filesystem.
  this.hfs = (fs instanceof HFileSystem) ? (HFileSystem)fs : new HFileSystem(fs);

  // Initially we are going to read the tail block. Open the reader w/FS checksum.
  this.useHBaseChecksumConfigured = this.useHBaseChecksum = false;
  this.stream = (link != null) ? link.open(hfs) : hfs.open(path);
}
项目:c5    文件:TestUpgradeTo96.java   
@Test
public void testHFileLink() throws Exception {
  // pass a link, and verify that correct paths are returned.
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  Path aFileLink = new Path(rootDir, "table/2086db948c48/cf/table=21212abcdc33-0906db948c48");
  Path preNamespaceTablePath = new Path(rootDir, "table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceArchivePath =
    new Path(rootDir, ".archive/table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceTempPath = new Path(rootDir, ".tmp/table/21212abcdc33/cf/0906db948c48");
  boolean preNSTablePathExists = false;
  boolean preNSArchivePathExists = false;
  boolean preNSTempPathExists = false;
  assertTrue(HFileLink.isHFileLink(aFileLink));
  HFileLink hFileLink = new HFileLink(TEST_UTIL.getConfiguration(), aFileLink);
  assertTrue(hFileLink.getArchivePath().toString().startsWith(rootDir.toString()));

  HFileV1Detector t = new HFileV1Detector();
  t.setConf(TEST_UTIL.getConfiguration());
  FileLink fileLink = t.getFileLinkWithPreNSPath(aFileLink);
  //assert it has 6 paths (2 NS, 2 Pre NS, and 2 .tmp)  to look.
  assertTrue(fileLink.getLocations().length == 6);
  for (Path p : fileLink.getLocations()) {
    if (p.equals(preNamespaceArchivePath)) preNSArchivePathExists = true;
    if (p.equals(preNamespaceTablePath)) preNSTablePathExists = true;
    if (p.equals(preNamespaceTempPath)) preNSTempPathExists = true;
  }
  assertTrue(preNSArchivePathExists & preNSTablePathExists & preNSTempPathExists);
}
项目:ditb    文件:HFileV1Detector.java   
/**
 * Each region is processed by a separate handler. If a HRegion has a hfileV1, its path is
 * returned as the future result, otherwise, a null value is returned.
 * @param regionDir Region to process.
 * @return corresponding Future object.
 */
private Future<Path> processRegion(final Path regionDir) {
  LOG.debug("processing region: " + regionDir);
  Callable<Path> regionCallable = new Callable<Path>() {
    @Override
    public Path call() throws Exception {
      for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) {
        FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
        if (storeFiles == null || storeFiles.length == 0) continue;
        for (FileStatus storeFile : storeFiles) {
          Path storeFilePath = storeFile.getPath();
          FSDataInputStream fsdis = null;
          long lenToRead = 0;
          try {
            // check whether this path is a reference.
            if (StoreFileInfo.isReference(storeFilePath)) continue;
            // check whether this path is a HFileLink.
            else if (HFileLink.isHFileLink(storeFilePath)) {
              FileLink fLink = getFileLinkWithPreNSPath(storeFilePath);
              fsdis = fLink.open(fs);
              lenToRead = fLink.getFileStatus(fs).getLen();
            } else {
              // a regular hfile
              fsdis = fs.open(storeFilePath);
              lenToRead = storeFile.getLen();
            }
            int majorVersion = computeMajorVersion(fsdis, lenToRead);
            if (majorVersion == 1) {
              hFileV1Set.add(storeFilePath);
              // return this region path, as it needs to be compacted.
              return regionDir;
            }
            if (majorVersion > 2 || majorVersion < 1) throw new IllegalArgumentException(
                "Incorrect major version: " + majorVersion);
          } catch (Exception iae) {
            corruptedHFiles.add(storeFilePath);
            LOG.error("Got exception while reading trailer for file: "+ storeFilePath, iae);
          } finally {
            if (fsdis != null) fsdis.close();
          }
        }
      }
      return null;
    }

    private int computeMajorVersion(FSDataInputStream istream, long fileSize)
     throws IOException {
      //read up the last int of the file. Major version is in the last 3 bytes.
      long seekPoint = fileSize - Bytes.SIZEOF_INT;
      if (seekPoint < 0)
        throw new IllegalArgumentException("File too small, no major version found");

      // Read the version from the last int of the file.
      istream.seek(seekPoint);
      int version = istream.readInt();
      // Extract and return the major version
      return version & 0x00ffffff;
    }
  };
  Future<Path> f = exec.submit(regionCallable);
  return f;
}
项目:LCIndex-HBase-0.94.16    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets renamed.
 */
private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOException {
  Path originalPath = new Path(rootDir, "test.file");
  Path archivedPath = new Path(rootDir, "archived.file");

  writeSomeData(fs, originalPath, 256 << 20, (byte)2);

  List<Path> files = new ArrayList<Path>();
  files.add(originalPath);
  files.add(archivedPath);

  FileLink link = new FileLink(files);
  FSDataInputStream in = link.open(fs);
  try {
    byte[] data = new byte[8192];
    long size = 0;

    // Read from origin
    int n = in.read(data);
    dataVerify(data, n, (byte)2);
    size += n;

    // Move origin to archive
    assertFalse(fs.exists(archivedPath));
    fs.rename(originalPath, archivedPath);
    assertFalse(fs.exists(originalPath));
    assertTrue(fs.exists(archivedPath));

    // Try to read to the end
    while ((n = in.read(data)) > 0) {
      dataVerify(data, n, (byte)2);
      size += n;
    }

    assertEquals(256 << 20, size);
  } finally {
    in.close();
    if (fs.exists(originalPath)) fs.delete(originalPath);
    if (fs.exists(archivedPath)) fs.delete(archivedPath);
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<Path>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0));
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1));
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2));
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
项目:pbase    文件:HFileV1Detector.java   
/**
 * Each region is processed by a separate handler. If a HRegion has a hfileV1, its path is
 * returned as the future result, otherwise, a null value is returned.
 * @param regionDir Region to process.
 * @return corresponding Future object.
 */
private Future<Path> processRegion(final Path regionDir) {
  LOG.debug("processing region: " + regionDir);
  Callable<Path> regionCallable = new Callable<Path>() {
    @Override
    public Path call() throws Exception {
      for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) {
        FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
        if (storeFiles == null || storeFiles.length == 0) continue;
        for (FileStatus storeFile : storeFiles) {
          Path storeFilePath = storeFile.getPath();
          FSDataInputStream fsdis = null;
          long lenToRead = 0;
          try {
            // check whether this path is a reference.
            if (StoreFileInfo.isReference(storeFilePath)) continue;
            // check whether this path is a HFileLink.
            else if (HFileLink.isHFileLink(storeFilePath)) {
              FileLink fLink = getFileLinkWithPreNSPath(storeFilePath);
              fsdis = fLink.open(fs);
              lenToRead = fLink.getFileStatus(fs).getLen();
            } else {
              // a regular hfile
              fsdis = fs.open(storeFilePath);
              lenToRead = storeFile.getLen();
            }
            int majorVersion = computeMajorVersion(fsdis, lenToRead);
            if (majorVersion == 1) {
              hFileV1Set.add(storeFilePath);
              // return this region path, as it needs to be compacted.
              return regionDir;
            }
            if (majorVersion > 2 || majorVersion < 1) throw new IllegalArgumentException(
                "Incorrect major version: " + majorVersion);
          } catch (Exception iae) {
            corruptedHFiles.add(storeFilePath);
            LOG.error("Got exception while reading trailer for file: "+ storeFilePath, iae);
          } finally {
            if (fsdis != null) fsdis.close();
          }
        }
      }
      return null;
    }

    private int computeMajorVersion(FSDataInputStream istream, long fileSize)
     throws IOException {
      //read up the last int of the file. Major version is in the last 3 bytes.
      long seekPoint = fileSize - Bytes.SIZEOF_INT;
      if (seekPoint < 0)
        throw new IllegalArgumentException("File too small, no major version found");

      // Read the version from the last int of the file.
      istream.seek(seekPoint);
      int version = istream.readInt();
      // Extract and return the major version
      return version & 0x00ffffff;
    }
  };
  Future<Path> f = exec.submit(regionCallable);
  return f;
}
项目:pbase    文件:FSDataInputStreamWrapper.java   
public FSDataInputStreamWrapper(FileSystem fs, FileLink link) throws IOException {
  this(fs, link, null);
}
项目:HIndex    文件:HFileV1Detector.java   
/**
 * Each region is processed by a separate handler. If a HRegion has a hfileV1, its path is
 * returned as the future result, otherwise, a null value is returned.
 * @param regionDir Region to process.
 * @return corresponding Future object.
 */
private Future<Path> processRegion(final Path regionDir) {
  LOG.debug("processing region: " + regionDir);
  Callable<Path> regionCallable = new Callable<Path>() {
    @Override
    public Path call() throws Exception {
      for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) {
        FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
        if (storeFiles == null || storeFiles.length == 0) continue;
        for (FileStatus storeFile : storeFiles) {
          Path storeFilePath = storeFile.getPath();
          FSDataInputStream fsdis = null;
          long lenToRead = 0;
          try {
            // check whether this path is a reference.
            if (StoreFileInfo.isReference(storeFilePath)) continue;
            // check whether this path is a HFileLink.
            else if (HFileLink.isHFileLink(storeFilePath)) {
              FileLink fLink = getFileLinkWithPreNSPath(storeFilePath);
              fsdis = fLink.open(fs);
              lenToRead = fLink.getFileStatus(fs).getLen();
            } else {
              // a regular hfile
              fsdis = fs.open(storeFilePath);
              lenToRead = storeFile.getLen();
            }
            int majorVersion = computeMajorVersion(fsdis, lenToRead);
            if (majorVersion == 1) {
              hFileV1Set.add(storeFilePath);
              // return this region path, as it needs to be compacted.
              return regionDir;
            }
            if (majorVersion > 2 || majorVersion < 1) throw new IllegalArgumentException(
                "Incorrect major version: " + majorVersion);
          } catch (Exception iae) {
            corruptedHFiles.add(storeFilePath);
            LOG.error("Got exception while reading trailer for file: "+ storeFilePath, iae);
          } finally {
            if (fsdis != null) fsdis.close();
          }
        }
      }
      return null;
    }

    private int computeMajorVersion(FSDataInputStream istream, long fileSize)
     throws IOException {
      //read up the last int of the file. Major version is in the last 3 bytes.
      long seekPoint = fileSize - Bytes.SIZEOF_INT;
      if (seekPoint < 0)
        throw new IllegalArgumentException("File too small, no major version found");

      // Read the version from the last int of the file.
      istream.seek(seekPoint);
      int version = istream.readInt();
      // Extract and return the major version
      return version & 0x00ffffff;
    }
  };
  Future<Path> f = exec.submit(regionCallable);
  return f;
}
项目:HIndex    文件:FSDataInputStreamWrapper.java   
public FSDataInputStreamWrapper(FileSystem fs, FileLink link) throws IOException {
  this(fs, link, null);
}
项目:IRIndex    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets renamed.
 */
private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOException {
  Path originalPath = new Path(rootDir, "test.file");
  Path archivedPath = new Path(rootDir, "archived.file");

  writeSomeData(fs, originalPath, 256 << 20, (byte)2);

  List<Path> files = new ArrayList<Path>();
  files.add(originalPath);
  files.add(archivedPath);

  FileLink link = new FileLink(files);
  FSDataInputStream in = link.open(fs);
  try {
    byte[] data = new byte[8192];
    long size = 0;

    // Read from origin
    int n = in.read(data);
    dataVerify(data, n, (byte)2);
    size += n;

    // Move origin to archive
    assertFalse(fs.exists(archivedPath));
    fs.rename(originalPath, archivedPath);
    assertFalse(fs.exists(originalPath));
    assertTrue(fs.exists(archivedPath));

    // Try to read to the end
    while ((n = in.read(data)) > 0) {
      dataVerify(data, n, (byte)2);
      size += n;
    }

    assertEquals(256 << 20, size);
  } finally {
    in.close();
    if (fs.exists(originalPath)) fs.delete(originalPath);
    if (fs.exists(archivedPath)) fs.delete(archivedPath);
  }
}
项目:IRIndex    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<Path>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0));
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1));
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2));
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
项目:hbase    文件:HBaseFsck.java   
/**
 * Scan all the store file names to find any lingering HFileLink files,
 * which refer to some none-exiting files. If "fix" option is enabled,
 * any lingering HFileLink file will be sidelined if found.
 */
private void offlineHLinkFileRepair() throws IOException, InterruptedException {
  Configuration conf = getConf();
  Path hbaseRoot = FSUtils.getRootDir(conf);
  FileSystem fs = hbaseRoot.getFileSystem(conf);
  LOG.info("Computing mapping of all link files");
  Map<String, Path> allFiles = FSUtils
      .getTableStoreFilePathMap(fs, hbaseRoot, new FSUtils.HFileLinkFilter(), executor, errors);
  errors.print("");

  LOG.info("Validating mapping using HDFS state");
  for (Path path : allFiles.values()) {
    // building HFileLink object to gather locations
    HFileLink actualLink = HFileLink.buildFromHFileLinkPattern(conf, path);
    if (actualLink.exists(fs)) continue; // good, expected

    // Found a lingering HFileLink
    errors.reportError(ERROR_CODE.LINGERING_HFILELINK, "Found lingering HFileLink " + path);
    if (!shouldFixHFileLinks()) continue;

    // Now, trying to fix it since requested
    setShouldRerun();

    // An HFileLink path should be like
    // ${hbase.rootdir}/data/namespace/table_name/region_id/family_name/linkedtable=linkedregionname-linkedhfilename
    // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same folder structure.
    boolean success = sidelineFile(fs, hbaseRoot, path);

    if (!success) {
      LOG.error("Failed to sideline HFileLink file " + path);
    }

    // An HFileLink backreference path should be like
    // ${hbase.rootdir}/archive/data/namespace/table_name/region_id/family_name/.links-linkedhfilename
    // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same folder structure.
    Path backRefPath = FileLink.getBackReferencesDir(HFileArchiveUtil
            .getStoreArchivePath(conf, HFileLink.getReferencedTableName(path.getName().toString()),
                HFileLink.getReferencedRegionName(path.getName().toString()),
                path.getParent().getName()),
        HFileLink.getReferencedHFileName(path.getName().toString()));
    success = sidelineFile(fs, hbaseRoot, backRefPath);

    if (!success) {
      LOG.error("Failed to sideline HFileLink backreference file " + path);
    }
  }
}
项目:PyroDB    文件:HFileV1Detector.java   
/**
 * Each region is processed by a separate handler. If a HRegion has a hfileV1, its path is
 * returned as the future result, otherwise, a null value is returned.
 * @param regionDir Region to process.
 * @return corresponding Future object.
 */
private Future<Path> processRegion(final Path regionDir) {
  LOG.debug("processing region: " + regionDir);
  Callable<Path> regionCallable = new Callable<Path>() {
    @Override
    public Path call() throws Exception {
      for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) {
        FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
        if (storeFiles == null || storeFiles.length == 0) continue;
        for (FileStatus storeFile : storeFiles) {
          Path storeFilePath = storeFile.getPath();
          FSDataInputStream fsdis = null;
          long lenToRead = 0;
          try {
            // check whether this path is a reference.
            if (StoreFileInfo.isReference(storeFilePath)) continue;
            // check whether this path is a HFileLink.
            else if (HFileLink.isHFileLink(storeFilePath)) {
              FileLink fLink = getFileLinkWithPreNSPath(storeFilePath);
              fsdis = fLink.open(fs);
              lenToRead = fLink.getFileStatus(fs).getLen();
            } else {
              // a regular hfile
              fsdis = fs.open(storeFilePath);
              lenToRead = storeFile.getLen();
            }
            int majorVersion = computeMajorVersion(fsdis, lenToRead);
            if (majorVersion == 1) {
              hFileV1Set.add(storeFilePath);
              // return this region path, as it needs to be compacted.
              return regionDir;
            }
            if (majorVersion > 2 || majorVersion < 1) throw new IllegalArgumentException(
                "Incorrect major version: " + majorVersion);
          } catch (Exception iae) {
            corruptedHFiles.add(storeFilePath);
            LOG.error("Got exception while reading trailer for file: "+ storeFilePath, iae);
          } finally {
            if (fsdis != null) fsdis.close();
          }
        }
      }
      return null;
    }

    private int computeMajorVersion(FSDataInputStream istream, long fileSize)
     throws IOException {
      //read up the last int of the file. Major version is in the last 3 bytes.
      long seekPoint = fileSize - Bytes.SIZEOF_INT;
      if (seekPoint < 0)
        throw new IllegalArgumentException("File too small, no major version found");

      // Read the version from the last int of the file.
      istream.seek(seekPoint);
      int version = istream.readInt();
      // Extract and return the major version
      return version & 0x00ffffff;
    }
  };
  Future<Path> f = exec.submit(regionCallable);
  return f;
}
项目:PyroDB    文件:FSDataInputStreamWrapper.java   
public FSDataInputStreamWrapper(FileSystem fs, FileLink link) throws IOException {
  this(fs, link, null);
}
项目:c5    文件:HFileV1Detector.java   
/**
 * Each region is processed by a separate handler. If a HRegion has a hfileV1, its path is
 * returned as the future result, otherwise, a null value is returned.
 * @param regionDir Region to process.
 * @return corresponding Future object.
 */
private Future<Path> processRegion(final Path regionDir) {
  LOG.debug("processing region: " + regionDir);
  Callable<Path> regionCallable = new Callable<Path>() {
    @Override
    public Path call() throws Exception {
      for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) {
        FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
        if (storeFiles == null || storeFiles.length == 0) continue;
        for (FileStatus storeFile : storeFiles) {
          Path storeFilePath = storeFile.getPath();
          FSDataInputStream fsdis = null;
          long lenToRead = 0;
          try {
            // check whether this path is a reference.
            if (StoreFileInfo.isReference(storeFilePath)) continue;
            // check whether this path is a HFileLink.
            else if (HFileLink.isHFileLink(storeFilePath)) {
              FileLink fLink = getFileLinkWithPreNSPath(storeFilePath);
              fsdis = fLink.open(fs);
              lenToRead = fLink.getFileStatus(fs).getLen();
            } else {
              // a regular hfile
              fsdis = fs.open(storeFilePath);
              lenToRead = storeFile.getLen();
            }
            int majorVersion = computeMajorVersion(fsdis, lenToRead);
            if (majorVersion == 1) {
              hFileV1Set.add(storeFilePath);
              // return this region path, as it needs to be compacted.
              return regionDir;
            }
            if (majorVersion > 2 || majorVersion < 1) throw new IllegalArgumentException(
                "Incorrect major version: " + majorVersion);
          } catch (Exception iae) {
            corruptedHFiles.add(storeFilePath);
            LOG.error("Got exception while reading trailer for file: "+ storeFilePath, iae);
          } finally {
            if (fsdis != null) fsdis.close();
          }
        }
      }
      return null;
    }

    private int computeMajorVersion(FSDataInputStream istream, long fileSize)
     throws IOException {
      //read up the last int of the file. Major version is in the last 3 bytes.
      long seekPoint = fileSize - Bytes.SIZEOF_INT;
      if (seekPoint < 0)
        throw new IllegalArgumentException("File too small, no major version found");

      // Read the version from the last int of the file.
      istream.seek(seekPoint);
      int version = istream.readInt();
      // Extract and return the major version
      return version & 0x00ffffff;
    }
  };
  Future<Path> f = exec.submit(regionCallable);
  return f;
}
项目:c5    文件:FSDataInputStreamWrapper.java   
public FSDataInputStreamWrapper(FileSystem fs, FileLink link) throws IOException {
  this(fs, link, null);
}
项目:HBase-Research    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets renamed.
 */
private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOException {
  Path originalPath = new Path(rootDir, "test.file");
  Path archivedPath = new Path(rootDir, "archived.file");

  writeSomeData(fs, originalPath, 256 << 20, (byte)2);

  List<Path> files = new ArrayList<Path>();
  files.add(originalPath);
  files.add(archivedPath);

  FileLink link = new FileLink(files);
  FSDataInputStream in = link.open(fs);
  try {
    byte[] data = new byte[8192];
    long size = 0;

    // Read from origin
    int n = in.read(data);
    dataVerify(data, n, (byte)2);
    size += n;

    // Move origin to archive
    assertFalse(fs.exists(archivedPath));
    fs.rename(originalPath, archivedPath);
    assertFalse(fs.exists(originalPath));
    assertTrue(fs.exists(archivedPath));

    // Try to read to the end
    while ((n = in.read(data)) > 0) {
      dataVerify(data, n, (byte)2);
      size += n;
    }

    assertEquals(256 << 20, size);
  } finally {
    in.close();
    if (fs.exists(originalPath)) fs.delete(originalPath);
    if (fs.exists(archivedPath)) fs.delete(archivedPath);
  }
}
项目:HBase-Research    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<Path>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0));
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1));
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2));
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
项目:hbase-0.94.8-qod    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets renamed.
 */
private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOException {
  Path originalPath = new Path(rootDir, "test.file");
  Path archivedPath = new Path(rootDir, "archived.file");

  writeSomeData(fs, originalPath, 256 << 20, (byte)2);

  List<Path> files = new ArrayList<Path>();
  files.add(originalPath);
  files.add(archivedPath);

  FileLink link = new FileLink(files);
  FSDataInputStream in = link.open(fs);
  try {
    byte[] data = new byte[8192];
    long size = 0;

    // Read from origin
    int n = in.read(data);
    dataVerify(data, n, (byte)2);
    size += n;

    // Move origin to archive
    assertFalse(fs.exists(archivedPath));
    fs.rename(originalPath, archivedPath);
    assertFalse(fs.exists(originalPath));
    assertTrue(fs.exists(archivedPath));

    // Try to read to the end
    while ((n = in.read(data)) > 0) {
      dataVerify(data, n, (byte)2);
      size += n;
    }

    assertEquals(256 << 20, size);
  } finally {
    in.close();
    if (fs.exists(originalPath)) fs.delete(originalPath);
    if (fs.exists(archivedPath)) fs.delete(archivedPath);
  }
}
项目:hbase-0.94.8-qod    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<Path>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0));
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1));
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2));
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
项目:hbase-0.94.8-qod    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets renamed.
 */
private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOException {
  Path originalPath = new Path(rootDir, "test.file");
  Path archivedPath = new Path(rootDir, "archived.file");

  writeSomeData(fs, originalPath, 256 << 20, (byte)2);

  List<Path> files = new ArrayList<Path>();
  files.add(originalPath);
  files.add(archivedPath);

  FileLink link = new FileLink(files);
  FSDataInputStream in = link.open(fs);
  try {
    byte[] data = new byte[8192];
    long size = 0;

    // Read from origin
    int n = in.read(data);
    dataVerify(data, n, (byte)2);
    size += n;

    // Move origin to archive
    assertFalse(fs.exists(archivedPath));
    fs.rename(originalPath, archivedPath);
    assertFalse(fs.exists(originalPath));
    assertTrue(fs.exists(archivedPath));

    // Try to read to the end
    while ((n = in.read(data)) > 0) {
      dataVerify(data, n, (byte)2);
      size += n;
    }

    assertEquals(256 << 20, size);
  } finally {
    in.close();
    if (fs.exists(originalPath)) fs.delete(originalPath);
    if (fs.exists(archivedPath)) fs.delete(archivedPath);
  }
}
项目:hbase-0.94.8-qod    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<Path>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0));
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1));
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2));
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
项目:DominoHBase    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets renamed.
 */
private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOException {
  Path originalPath = new Path(rootDir, "test.file");
  Path archivedPath = new Path(rootDir, "archived.file");

  writeSomeData(fs, originalPath, 256 << 20, (byte)2);

  List<Path> files = new ArrayList<Path>();
  files.add(originalPath);
  files.add(archivedPath);

  FileLink link = new FileLink(files);
  FSDataInputStream in = link.open(fs);
  try {
    byte[] data = new byte[8192];
    long size = 0;

    // Read from origin
    int n = in.read(data);
    dataVerify(data, n, (byte)2);
    size += n;

    // Move origin to archive
    assertFalse(fs.exists(archivedPath));
    fs.rename(originalPath, archivedPath);
    assertFalse(fs.exists(originalPath));
    assertTrue(fs.exists(archivedPath));

    // Try to read to the end
    while ((n = in.read(data)) > 0) {
      dataVerify(data, n, (byte)2);
      size += n;
    }

    assertEquals(256 << 20, size);
  } finally {
    in.close();
    if (fs.exists(originalPath)) fs.delete(originalPath);
    if (fs.exists(archivedPath)) fs.delete(archivedPath);
  }
}
项目:DominoHBase    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<Path>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0));
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1));
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2));
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
项目:hindex    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets renamed.
 */
private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOException {
  Path originalPath = new Path(rootDir, "test.file");
  Path archivedPath = new Path(rootDir, "archived.file");

  writeSomeData(fs, originalPath, 256 << 20, (byte)2);

  List<Path> files = new ArrayList<Path>();
  files.add(originalPath);
  files.add(archivedPath);

  FileLink link = new FileLink(files);
  FSDataInputStream in = link.open(fs);
  try {
    byte[] data = new byte[8192];
    long size = 0;

    // Read from origin
    int n = in.read(data);
    dataVerify(data, n, (byte)2);
    size += n;

    // Move origin to archive
    assertFalse(fs.exists(archivedPath));
    fs.rename(originalPath, archivedPath);
    assertFalse(fs.exists(originalPath));
    assertTrue(fs.exists(archivedPath));

    // Try to read to the end
    while ((n = in.read(data)) > 0) {
      dataVerify(data, n, (byte)2);
      size += n;
    }

    assertEquals(256 << 20, size);
  } finally {
    in.close();
    if (fs.exists(originalPath)) fs.delete(originalPath);
    if (fs.exists(archivedPath)) fs.delete(archivedPath);
  }
}
项目:hindex    文件:TestFileLink.java   
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<Path>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0));
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1));
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2));
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
项目:ditb    文件:HFileV1Detector.java   
/**
 * Creates a FileLink which adds pre-namespace paths in its list of available paths. This is used
 * when reading a snapshot file in a pre-namespace file layout, for example, while upgrading.
 * @param storeFilePath
 * @return a FileLink which could read from pre-namespace paths.
 * @throws IOException
 */
public FileLink getFileLinkWithPreNSPath(Path storeFilePath) throws IOException {
  HFileLink link = HFileLink.buildFromHFileLinkPattern(getConf(), storeFilePath);
  List<Path> pathsToProcess = getPreNSPathsForHFileLink(link);
  pathsToProcess.addAll(Arrays.asList(link.getLocations()));
  return new FileLink(pathsToProcess);
}
项目:pbase    文件:HFileV1Detector.java   
/**
 * Creates a FileLink which adds pre-namespace paths in its list of available paths. This is used
 * when reading a snapshot file in a pre-namespace file layout, for example, while upgrading.
 * @param storeFilePath
 * @return a FileLink which could read from pre-namespace paths.
 * @throws IOException
 */
public FileLink getFileLinkWithPreNSPath(Path storeFilePath) throws IOException {
  HFileLink link = new HFileLink(getConf(), storeFilePath);
  List<Path> pathsToProcess = getPreNSPathsForHFileLink(link);
  pathsToProcess.addAll(Arrays.asList(link.getLocations()));
  return new FileLink(pathsToProcess);
}
项目:HIndex    文件:HFileV1Detector.java   
/**
 * Creates a FileLink which adds pre-namespace paths in its list of available paths. This is used
 * when reading a snapshot file in a pre-namespace file layout, for example, while upgrading.
 * @param storeFilePath
 * @return a FileLink which could read from pre-namespace paths.
 * @throws IOException
 */
public FileLink getFileLinkWithPreNSPath(Path storeFilePath) throws IOException {
  HFileLink link = new HFileLink(getConf(), storeFilePath);
  List<Path> pathsToProcess = getPreNSPathsForHFileLink(link);
  pathsToProcess.addAll(Arrays.asList(link.getLocations()));
  return new FileLink(pathsToProcess);
}
项目:PyroDB    文件:HFileV1Detector.java   
/**
 * Creates a FileLink which adds pre-namespace paths in its list of available paths. This is used
 * when reading a snapshot file in a pre-namespace file layout, for example, while upgrading.
 * @param storeFilePath
 * @return a FileLink which could read from pre-namespace paths.
 * @throws IOException
 */
public FileLink getFileLinkWithPreNSPath(Path storeFilePath) throws IOException {
  HFileLink link = new HFileLink(getConf(), storeFilePath);
  List<Path> pathsToProcess = getPreNSPathsForHFileLink(link);
  pathsToProcess.addAll(Arrays.asList(link.getLocations()));
  return new FileLink(pathsToProcess);
}
项目:c5    文件:HFileV1Detector.java   
/**
 * Creates a FileLink which adds pre-namespace paths in its list of available paths. This is used
 * when reading a snapshot file in a pre-namespace file layout, for example, while upgrading.
 * @param storeFilePath
 * @return a FileLink which could read from pre-namespace paths.
 * @throws IOException
 */
public FileLink getFileLinkWithPreNSPath(Path storeFilePath) throws IOException {
  HFileLink link = new HFileLink(getConf(), storeFilePath);
  List<Path> pathsToProcess = getPreNSPathsForHFileLink(link);
  pathsToProcess.addAll(Arrays.asList(link.getLocations()));
  return new FileLink(pathsToProcess);
}