Java 类org.apache.hadoop.fs.FileStatus 实例源码

项目:dremio-oss    文件:PseudoDistributedFileSystem.java   
private FileStatus fixFileStatus(String endpoint, FileStatus status) throws IOException {
  final Path remotePath = Path.getPathWithoutSchemeAndAuthority(status.getPath());

  if (status.isDirectory()) {
    return new PDFSFileStatus(makeQualified(remotePath), status);
  }

  String basename = remotePath.getName();
  boolean hidden = isHidden(basename);

  StringBuilder sb = new StringBuilder();
  if (hidden) {
    sb.append(basename.charAt(0));
  }
  sb.append(endpoint).append('@');
  sb.append(hidden ? basename.substring(1) : basename);

  return new PDFSFileStatus(makeQualified(new Path(remotePath.getParent(), sb.toString())), status);
}
项目:hadoop    文件:TestFileOutputCommitter.java   
private void validateMapFileOutputContent(
    FileSystem fs, Path dir) throws IOException {
  // map output is a directory with index and data files
  Path expectedMapDir = new Path(dir, partFile);
  assert(fs.getFileStatus(expectedMapDir).isDirectory());    
  FileStatus[] files = fs.listStatus(expectedMapDir);
  int fileCount = 0;
  boolean dataFileFound = false; 
  boolean indexFileFound = false; 
  for (FileStatus f : files) {
    if (f.isFile()) {
      ++fileCount;
      if (f.getPath().getName().equals(MapFile.INDEX_FILE_NAME)) {
        indexFileFound = true;
      }
      else if (f.getPath().getName().equals(MapFile.DATA_FILE_NAME)) {
        dataFileFound = true;
      }
    }
  }
  assert(fileCount > 0);
  assert(dataFileFound && indexFileFound);
}
项目:ditb    文件:IndexFile.java   
/**
 * Returns all files belonging to the given region directory. Could return an
 * empty list.
 *
 * @param fs  The file system reference.
 * @param regionDir  The region directory to scan.
 * @return The list of files found.
 * @throws IOException When scanning the files fails.
 */
static List<Path> getStoreFiles(FileSystem fs, Path regionDir)
throws IOException {
  List<Path> res = new ArrayList<Path>();
  PathFilter dirFilter = new FSUtils.DirFilter(fs);
  FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
  for(FileStatus dir : familyDirs) {
    FileStatus[] files = fs.listStatus(dir.getPath());
    for (FileStatus file : files) {
      if (!file.isDir()) {
        res.add(file.getPath());
      }
    }
  }
  return res;
}
项目:ditb    文件:SecureBulkLoadEndpoint.java   
@Override
public void start(CoprocessorEnvironment env) {
  this.env = (RegionCoprocessorEnvironment)env;
  random = new SecureRandom();
  conf = env.getConfiguration();
  baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);
  this.userProvider = UserProvider.instantiate(conf);

  try {
    fs = FileSystem.get(conf);
    fs.mkdirs(baseStagingDir, PERM_HIDDEN);
    fs.setPermission(baseStagingDir, PERM_HIDDEN);
    //no sticky bit in hadoop-1.0, making directory nonempty so it never gets erased
    fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN);
    FileStatus status = fs.getFileStatus(baseStagingDir);
    if(status == null) {
      throw new IllegalStateException("Failed to create staging directory");
    }
    if(!status.getPermission().equals(PERM_HIDDEN)) {
      throw new IllegalStateException(
          "Directory already exists but permissions aren't set to '-rwx--x--x' ");
    }
  } catch (IOException e) {
    throw new IllegalStateException("Failed to get FileSystem instance",e);
  }
}
项目:oryx2    文件:DeleteOldDataFn.java   
@Override
public void call(T ignored) throws IOException {
  Path dataDirPath = new Path(dataDirString + "/*");
  FileSystem fs = FileSystem.get(dataDirPath.toUri(), hadoopConf);
  FileStatus[] inputPathStatuses = fs.globStatus(dataDirPath);
  if (inputPathStatuses != null) {
    long oldestTimeAllowed =
        System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(maxAgeHours, TimeUnit.HOURS);
    Arrays.stream(inputPathStatuses).filter(FileStatus::isDirectory).map(FileStatus::getPath).
        filter(subdir -> {
          Matcher m = dirTimestampPattern.matcher(subdir.getName());
          return m.find() && Long.parseLong(m.group(1)) < oldestTimeAllowed;
        }).forEach(subdir -> {
          log.info("Deleting old data at {}", subdir);
          try {
            fs.delete(subdir, true);
          } catch (IOException e) {
            log.warn("Unable to delete {}; continuing", subdir, e);
          }
        });
  }
}
项目:dremio-oss    文件:PseudoDistributedFileSystem.java   
@Override
public FileStatus getFileStatus(Path f) throws IOException {
  Path absolutePath = toAbsolutePath(f);
  checkPath(absolutePath);

  // if the path is not a remote file path
  if (!isRemoteFile(absolutePath)) {
    return new GetFileStatusTask(absolutePath).get();
  }

  // Parse top level directory
  try {
    RemotePath remotePath = getRemotePath(absolutePath);

    FileSystem delegate = getDelegateFileSystem(remotePath.address);
    FileStatus status = delegate.getFileStatus(remotePath.path);

    return fixFileStatus(remotePath.address, status);
  } catch (IllegalArgumentException e) {
    throw (FileNotFoundException) (new FileNotFoundException("No file " + absolutePath).initCause(e));
  }
}
项目:circus-train    文件:SimpleCopyListing.java   
private void writeToFileListing(
    SequenceFile.Writer fileListWriter,
    CopyListingFileStatus fileStatus,
    Path sourcePathRoot,
    S3MapReduceCpOptions options)
  throws IOException {
  LOG.debug("REL PATH: {}, FULL PATH: {}", PathUtil.getRelativePath(sourcePathRoot, fileStatus.getPath()),
      fileStatus.getPath());

  FileStatus status = fileStatus;

  if (!shouldCopy(fileStatus.getPath(), options)) {
    return;
  }

  fileListWriter.append(new Text(PathUtil.getRelativePath(sourcePathRoot, fileStatus.getPath())), status);
  fileListWriter.sync();

  if (!fileStatus.isDirectory()) {
    totalBytesToCopy += fileStatus.getLen();
  }
  totalPaths++;
}
项目:hadoop-oss    文件:FTPFileSystem.java   
@Override
public FSDataInputStream open(Path file, int bufferSize) throws IOException {
  FTPClient client = connect();
  Path workDir = new Path(client.printWorkingDirectory());
  Path absolute = makeAbsolute(workDir, file);
  FileStatus fileStat = getFileStatus(client, absolute);
  if (fileStat.isDirectory()) {
    disconnect(client);
    throw new FileNotFoundException("Path " + file + " is a directory.");
  }
  client.allocate(bufferSize);
  Path parent = absolute.getParent();
  // Change to parent directory on the
  // server. Only then can we read the
  // file
  // on the server by opening up an InputStream. As a side effect the working
  // directory on the server is changed to the parent directory of the file.
  // The FTP client connection is closed when close() is called on the
  // FSDataInputStream.
  client.changeWorkingDirectory(parent.toUri().getPath());
  InputStream is = client.retrieveFileStream(file.getName());
  FSDataInputStream fis = new FSDataInputStream(new FTPInputStream(is,
      client, statistics));
  if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
    // The ftpClient is an inconsistent state. Must close the stream
    // which in turn will logout and disconnect from FTP server
    fis.close();
    throw new IOException("Unable to open file: " + file + ", Aborting");
  }
  return fis;
}
项目:hadoop    文件:LocatedFileStatusFetcher.java   
@Override
public Result call() throws Exception {
  Result result = new Result();
  FileSystem fs = path.getFileSystem(conf);
  result.fs = fs;
  FileStatus[] matches = fs.globStatus(path, inputFilter);
  if (matches == null) {
    result.addError(new IOException("Input path does not exist: " + path));
  } else if (matches.length == 0) {
    result.addError(new IOException("Input Pattern " + path
        + " matches 0 files"));
  } else {
    result.matchedFileStatuses = matches;
  }
  return result;
}
项目:dremio-oss    文件:TestRemoteNodeFileSystem.java   
@Test
public void testToProtobuFileStatusWithDefault() throws IOException {
  FileStatus status = new FileStatus();

  DFS.FileStatus result = RemoteNodeFileSystem.toProtoFileStatus(status);
  assertFalse(result.hasPath());
  assertEquals(0, result.getLength());
  assertFalse(result.getIsDirectory());
  assertEquals(0, result.getBlockReplication());
  assertEquals(0, result.getBlockSize());
  assertEquals(0, result.getAccessTime());
  assertEquals(0, result.getModificationTime());
  assertEquals(FsPermission.getFileDefault().toExtendedShort(), result.getPermission());
  assertEquals("", result.getOwner());
  assertEquals("", result.getGroup());
  assertFalse(result.hasSymlink());
}
项目:ViraPipe    文件:InterleaveMulti.java   
private static void splitFastq(FileStatus fst, String fqPath, String splitDir, int splitlen, JavaSparkContext sc) throws IOException {
  Path fqpath = new Path(fqPath);
  String fqname = fqpath.getName();
  String[] ns = fqname.split("\\.");
  //TODO: Handle also compressed files
  List<FileSplit> nlif = NLineInputFormat.getSplitsForFile(fst, sc.hadoopConfiguration(), splitlen);

  JavaRDD<FileSplit> splitRDD = sc.parallelize(nlif);

  splitRDD.foreach( split ->  {

    FastqRecordReader fqreader = new FastqRecordReader(new Configuration(), split);
    writeFastqFile(fqreader, new Configuration(), splitDir + "/split_" + split.getStart() + "." + ns[1]);

   });
}
项目:hadoop    文件:NativeAzureFileSystemBaseTest.java   
@Test
public void testUriEncodingMoreComplexCharacters() throws Exception {
  // Create a file name with URI reserved characters, plus the percent
  String fileName = "!#$'()*;=[]%";
  String directoryName = "*;=[]%!#$'()";
  fs.create(new Path(directoryName, fileName)).close();
  FileStatus[] listing = fs.listStatus(new Path(directoryName));
  assertEquals(1, listing.length);
  assertEquals(fileName, listing[0].getPath().getName());
  FileStatus status = fs.getFileStatus(new Path(directoryName, fileName));
  assertEquals(fileName, status.getPath().getName());
  InputStream stream = fs.open(new Path(directoryName, fileName));
  assertNotNull(stream);
  stream.close();
  assertTrue(fs.delete(new Path(directoryName, fileName), true));
  assertTrue(fs.delete(new Path(directoryName), true));
}
项目:ditb    文件:TestCatalogJanitor.java   
private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
    throws IOException {
  // get the existing store files
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  fs.mkdirs(storedir);
  // create the store files in the parent
  for (int i = 0; i < count; i++) {
    Path storeFile = new Path(storedir, "_store" + i);
    FSDataOutputStream dos = fs.create(storeFile, true);
    dos.writeBytes("Some data: " + i);
    dos.close();
  }
  LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
  // make sure the mock store files are there
  FileStatus[] storeFiles = fs.listStatus(storedir);
  assertEquals("Didn't have expected store files", count, storeFiles.length);
  return storeFiles;
}
项目:dremio-oss    文件:PseudoDistributedFileSystem.java   
@Override
protected Callable<FileStatus[]> newMapTask(final String address) throws IOException {
  return new Callable<FileStatus[]>() {
    @Override
    public FileStatus[] call() throws Exception {
      // Only directories should be listed with a fork/join task
      final FileSystem fs = getDelegateFileSystem(address);
      FileStatus status = fs.getFileStatus(path);
      if (status.isFile()) {
        throw new FileNotFoundException("Directory not found: " + path);
      }
      FileStatus[] remoteStatuses = fs.listStatus(path);

      FileStatus[] statuses = new FileStatus[remoteStatuses.length];
      for (int i = 0; i < statuses.length; i++) {
        statuses[i] = fixFileStatus(address, remoteStatuses[i]);
      }

      return statuses;
    }
  };
}
项目:circus-train    文件:CircusTrainCopyListing.java   
@Override
public void doBuildListing(Path pathToListFile, DistCpOptions options) throws IOException {
  try (Writer writer = newWriter(pathToListFile)) {

    Path sourceRootPath = getRootPath(getConf());

    for (Path sourcePath : options.getSourcePaths()) {

      FileSystem fileSystem = sourcePath.getFileSystem(getConf());
      FileStatus directory = fileSystem.getFileStatus(sourcePath);

      Map<String, CopyListingFileStatus> children = new FileStatusTreeTraverser(fileSystem)
          .preOrderTraversal(directory)
          .transform(new CopyListingFileStatusFunction(fileSystem, options))
          .uniqueIndex(new RelativePathFunction(sourceRootPath));

      for (Entry<String, CopyListingFileStatus> entry : children.entrySet()) {
        LOG.debug("Adding '{}' with relative path '{}'", entry.getValue().getPath(), entry.getKey());
        writer.append(new Text(entry.getKey()), entry.getValue());
        writer.sync();
      }
    }
  }
}
项目:hadoop-oss    文件:TestCredentialProviderFactory.java   
public void checkPermissionRetention(Configuration conf, String ourUrl,
    Path path) throws Exception {
  CredentialProvider provider = CredentialProviderFactory.getProviders(conf).get(0);
  // let's add a new credential and flush and check that permissions are still set to 777
  char[] cred = new char[32];
  for(int i =0; i < cred.length; ++i) {
    cred[i] = (char) i;
  }
  // create a new key
  try {
    provider.createCredentialEntry("key5", cred);
  } catch (Exception e) {
    e.printStackTrace();
    throw e;
  }
  provider.flush();
  // get a new instance of the provider to ensure it was saved correctly
  provider = CredentialProviderFactory.getProviders(conf).get(0);
  assertArrayEquals(cred, provider.getCredentialEntry("key5").getCredential());

  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue("Permissions should have been retained from the preexisting " +
        "keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
项目:hadoop    文件:FTPFileSystem.java   
/**
 * Convenience method, so that we don't open a new connection when using this
 * method from within another method. Otherwise every API invocation incurs
 * the overhead of opening/closing a TCP connection.
 */
private FileStatus getFileStatus(FTPClient client, Path file)
    throws IOException {
  FileStatus fileStat = null;
  Path workDir = new Path(client.printWorkingDirectory());
  Path absolute = makeAbsolute(workDir, file);
  Path parentPath = absolute.getParent();
  if (parentPath == null) { // root dir
    long length = -1; // Length of root dir on server not known
    boolean isDir = true;
    int blockReplication = 1;
    long blockSize = DEFAULT_BLOCK_SIZE; // Block Size not known.
    long modTime = -1; // Modification time of root dir not known.
    Path root = new Path("/");
    return new FileStatus(length, isDir, blockReplication, blockSize,
        modTime, root.makeQualified(this));
  }
  String pathName = parentPath.toUri().getPath();
  FTPFile[] ftpFiles = client.listFiles(pathName);
  if (ftpFiles != null) {
    for (FTPFile ftpFile : ftpFiles) {
      if (ftpFile.getName().equals(file.getName())) { // file found in dir
        fileStat = getFileStatus(ftpFile, parentPath);
        break;
      }
    }
    if (fileStat == null) {
      throw new FileNotFoundException("File " + file + " does not exist.");
    }
  } else {
    throw new FileNotFoundException("File " + file + " does not exist.");
  }
  return fileStat;
}
项目:hadoop    文件:AggregatedLogDeletionService.java   
@Override
public void run() {
  long cutoffMillis = System.currentTimeMillis() - retentionMillis;
  LOG.info("aggregated log deletion started.");
  try {
    FileSystem fs = remoteRootLogDir.getFileSystem(conf);
    for(FileStatus userDir : fs.listStatus(remoteRootLogDir)) {
      if(userDir.isDirectory()) {
        Path userDirPath = new Path(userDir.getPath(), suffix);
        deleteOldLogDirsFrom(userDirPath, cutoffMillis, fs, rmClient);
      }
    }
  } catch (IOException e) {
    logIOException("Error reading root log dir this deletion " +
            "attempt is being aborted", e);
  }
  LOG.info("aggregated log deletion finished.");
}
项目:hadoop    文件:NativeAzureFileSystemBaseTest.java   
@Test
public void testListDirectory() throws Exception {
  Path rootFolder = new Path("testingList");
  assertTrue(fs.mkdirs(rootFolder));
  FileStatus[] listed = fs.listStatus(rootFolder);
  assertEquals(0, listed.length);
  Path innerFolder = new Path(rootFolder, "inner");
  assertTrue(fs.mkdirs(innerFolder));
  listed = fs.listStatus(rootFolder);
  assertEquals(1, listed.length);
  assertTrue(listed[0].isDirectory());
  Path innerFile = new Path(innerFolder, "innerFile");
  writeString(innerFile, "testing");
  listed = fs.listStatus(rootFolder);
  assertEquals(1, listed.length);
  assertTrue(listed[0].isDirectory());
  listed = fs.listStatus(innerFolder);
  assertEquals(1, listed.length);
  assertFalse(listed[0].isDirectory());
  assertTrue(fs.delete(rootFolder, true));
}
项目:hadoop    文件:FSDownload.java   
private static boolean checkPublicPermsForAll(FileSystem fs, 
      FileStatus status, FsAction dir, FsAction file) 
  throws IOException {
  FsPermission perms = status.getPermission();
  FsAction otherAction = perms.getOtherAction();
  if (status.isDirectory()) {
    if (!otherAction.implies(dir)) {
      return false;
    }

    for (FileStatus child : fs.listStatus(status.getPath())) {
      if(!checkPublicPermsForAll(fs, child, dir, file)) {
        return false;
      }
    }
    return true;
  }
  return (otherAction.implies(file));
}
项目:ditb    文件:TestLogsCleaner.java   
@Test(timeout=5000)
public void testZnodeCversionChange() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
  cleaner.setConf(conf);

  ReplicationQueuesClient rqcMock = Mockito.mock(ReplicationQueuesClient.class);
  Mockito.when(rqcMock.getQueuesZNodeCversion()).thenReturn(1, 2, 3, 4);

  Field rqc = ReplicationLogCleaner.class.getDeclaredField("replicationQueues");
  rqc.setAccessible(true);

  rqc.set(cleaner, rqcMock);

  // This should return eventually when cversion stabilizes
  cleaner.getDeletableFiles(new LinkedList<FileStatus>());
}
项目:hadoop    文件:NativeAzureFileSystemBaseTest.java   
@Test
public void testRenameImplicitFolder() throws Exception {
  Path testFile = new Path("deep/file/rename/test");
  FsPermission permission = FsPermission.createImmutable((short) 644);
  createEmptyFile(testFile, permission);
  boolean renameResult = fs.rename(new Path("deep/file"), new Path("deep/renamed"));
  assertTrue(renameResult);
  assertFalse(fs.exists(testFile));
  FileStatus newStatus = fs.getFileStatus(new Path("deep/renamed/rename/test"));
  assertNotNull(newStatus);
  assertEqualsIgnoreStickyBit(permission, newStatus.getPermission());
  assertTrue(fs.delete(new Path("deep"), true));
}
项目:ditb    文件:TestHFile.java   
public static void truncateFile(FileSystem fs, Path src, Path dst) throws IOException {
  FileStatus fst = fs.getFileStatus(src);
  long len = fst.getLen();
  len = len / 2 ;

  // create a truncated hfile
  FSDataOutputStream fdos = fs.create(dst);
  byte[] buf = new byte[(int)len];
  FSDataInputStream fdis = fs.open(src);
  fdis.read(buf);
  fdos.write(buf);
  fdis.close();
  fdos.close();
}
项目:hadoop    文件:TruncateOp.java   
private void waitForRecovery(FileSystem fs, Path fn, long newLength)
    throws IOException {
  LOG.info("Waiting on truncate file recovery for " + fn);
  for(;;) {
    FileStatus stat = fs.getFileStatus(fn);
    if(stat.getLen() == newLength) break;
    try {Thread.sleep(1000);} catch(InterruptedException ignored) {}
  }
}
项目:hadoop    文件:TestCombineFileInputFormat.java   
static FileStatus writeGzipFile(Configuration conf, Path name,
    short replication, int numBlocks)
    throws IOException, TimeoutException, InterruptedException {
  FileSystem fileSys = FileSystem.get(conf);

  GZIPOutputStream out = new GZIPOutputStream(fileSys.create(name, true, conf
      .getInt("io.file.buffer.size", 4096), replication, (long) BLOCKSIZE));
  writeDataAndSetReplication(fileSys, name, out, replication, numBlocks);
  return fileSys.getFileStatus(name);
}
项目:hadoop    文件:FTPFileSystem.java   
@Override
public FileStatus[] listStatus(Path file) throws IOException {
  FTPClient client = connect();
  try {
    FileStatus[] stats = listStatus(client, file);
    return stats;
  } finally {
    disconnect(client);
  }
}
项目:hadoop    文件:FTPFileSystem.java   
@Override
public FSDataInputStream open(Path file, int bufferSize) throws IOException {
  FTPClient client = connect();
  Path workDir = new Path(client.printWorkingDirectory());
  Path absolute = makeAbsolute(workDir, file);
  FileStatus fileStat = getFileStatus(client, absolute);
  if (fileStat.isDirectory()) {
    disconnect(client);
    throw new FileNotFoundException("Path " + file + " is a directory.");
  }
  client.allocate(bufferSize);
  Path parent = absolute.getParent();
  // Change to parent directory on the
  // server. Only then can we read the
  // file
  // on the server by opening up an InputStream. As a side effect the working
  // directory on the server is changed to the parent directory of the file.
  // The FTP client connection is closed when close() is called on the
  // FSDataInputStream.
  client.changeWorkingDirectory(parent.toUri().getPath());
  InputStream is = client.retrieveFileStream(file.getName());
  FSDataInputStream fis = new FSDataInputStream(new FTPInputStream(is,
      client, statistics));
  if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
    // The ftpClient is an inconsistent state. Must close the stream
    // which in turn will logout and disconnect from FTP server
    fis.close();
    throw new IOException("Unable to open file: " + file + ", Aborting");
  }
  return fis;
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * Rename a single file across snapshottable dirs.
 */
@Test (timeout=60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  hdfs.createSnapshot(sdir1, "s3");

  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);

  // change the replication factor of foo
  hdfs.setReplication(newfoo, REPL_1);

  // /dir2/.snapshot/s2/foo should still work
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
      "foo");
  assertTrue(hdfs.exists(foo_s2));
  FileStatus status = hdfs.getFileStatus(foo_s2);
  assertEquals(REPL, status.getReplication());

  final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
      "foo");
  assertFalse(hdfs.exists(foo_s3));
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
  assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
项目:ditb    文件:WALProcedureStore.java   
@Override
public void recoverLease() throws IOException {
  lock.lock();
  try {
    LOG.info("Starting WAL Procedure Store lease recovery");
    FileStatus[] oldLogs = getLogFiles();
    while (isRunning()) {
      // Get Log-MaxID and recover lease on old logs
      flushLogId = initOldLogs(oldLogs);

      // Create new state-log
      if (!rollWriter(flushLogId + 1)) {
        // someone else has already created this log
        LOG.debug("someone else has already created log " + flushLogId);
        continue;
      }

      // We have the lease on the log
      oldLogs = getLogFiles();
      if (getMaxLogId(oldLogs) > flushLogId) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Someone else created new logs. Expected maxLogId < " + flushLogId);
        }
        logs.getLast().removeFile();
        continue;
      }

      LOG.info("Lease acquired for flushLogId: " + flushLogId);
      break;
    }
  } finally {
    lock.unlock();
  }
}
项目:hadoop    文件:TestHDFSConcat.java   
/**
 * Test that the concat operation is properly persisted in the
 * edit log, and properly replayed on restart.
 */
@Test
public void testConcatInEditLog() throws Exception {
  final Path TEST_DIR = new Path("/testConcatInEditLog");
  final long FILE_LEN = blockSize;

  // 1. Concat some files
  Path[] srcFiles = new Path[3];
  for (int i = 0; i < srcFiles.length; i++) {
    Path path = new Path(TEST_DIR, "src-" + i);
    DFSTestUtil.createFile(dfs, path, FILE_LEN, REPL_FACTOR, 1);
    srcFiles[i] = path;
  }    
  Path targetFile = new Path(TEST_DIR, "target");
  DFSTestUtil.createFile(dfs, targetFile, FILE_LEN, REPL_FACTOR, 1);

  dfs.concat(targetFile, srcFiles);

  // 2. Verify the concat operation basically worked, and record
  // file status.
  assertTrue(dfs.exists(targetFile));
  FileStatus origStatus = dfs.getFileStatus(targetFile);

  // 3. Restart NN to force replay from edit log
  cluster.restartNameNode(true);

  // 4. Verify concat operation was replayed correctly and file status
  // did not change.
  assertTrue(dfs.exists(targetFile));
  assertFalse(dfs.exists(srcFiles[0]));

  FileStatus statusAfterRestart = dfs.getFileStatus(targetFile);

  assertEquals(origStatus.getModificationTime(),
      statusAfterRestart.getModificationTime());
}
项目:hadoop    文件:TopAuditLogger.java   
@Override
public void logAuditEvent(boolean succeeded, String userName,
    InetAddress addr, String cmd, String src, String dst, FileStatus status) {
  try {
    topMetrics.report(succeeded, userName, addr, cmd, src, dst, status);
  } catch (Throwable t) {
    LOG.error("An error occurred while reflecting the event in top service, "
        + "event: (cmd={},userName={})", cmd, userName);
  }

  if (LOG.isDebugEnabled()) {
    final StringBuilder sb = new StringBuilder();
    sb.append("allowed=").append(succeeded).append("\t");
    sb.append("ugi=").append(userName).append("\t");
    sb.append("ip=").append(addr).append("\t");
    sb.append("cmd=").append(cmd).append("\t");
    sb.append("src=").append(src).append("\t");
    sb.append("dst=").append(dst).append("\t");
    if (null == status) {
      sb.append("perm=null");
    } else {
      sb.append("perm=");
      sb.append(status.getOwner()).append(":");
      sb.append(status.getGroup()).append(":");
      sb.append(status.getPermission());
    }
    LOG.debug("------------------- logged event for top service: " + sb);
  }
}
项目:hadoop    文件:DistCpV1.java   
/**
 * Copies a file and validates the copy by checking the checksums.
 * If validation fails, retries (max number of tries is distcp.file.retries)
 * to copy the file.
 */
void copyWithRetries(FileStatus srcstat, Path relativedst,
                     OutputCollector<WritableComparable<?>, Text> out,
                     Reporter reporter) throws IOException {

  // max tries to copy when validation of copy fails
  final int maxRetries = job.getInt(FILE_RETRIES_LABEL, DEFAULT_FILE_RETRIES);
  // save update flag for later copies within the same map task
  final boolean saveUpdate = update;

  int retryCnt = 1;
  for (; retryCnt <= maxRetries; retryCnt++) {
    try {
      //copy the file and validate copy
      copy(srcstat, relativedst, out, reporter);
      break;// copy successful
    } catch (IOException e) {
      LOG.warn("Copy of " + srcstat.getPath() + " failed.", e);
      if (retryCnt < maxRetries) {// copy failed and need to retry
        LOG.info("Retrying copy of file " + srcstat.getPath());
        update = true; // set update flag for retries
      }
      else {// no more retries... Give up
        update = saveUpdate;
        throw new IOException("Copy of file failed even with " + retryCnt
                              + " tries.", e);
      }
    }
  }
}
项目:QDrill    文件:Metadata.java   
/**
 * Get the parquet metadata for the parquet files in a directory
 * @param path the path of the directory
 * @return
 * @throws IOException
 */
private ParquetTableMetadata_v1 getParquetTableMetadata(String path) throws IOException {
  Path p = new Path(path);
  FileStatus fileStatus = fs.getFileStatus(p);
  Stopwatch watch = new Stopwatch();
  watch.start();
  List<FileStatus> fileStatuses = getFileStatuses(fileStatus);
  logger.info("Took {} ms to get file statuses", watch.elapsed(TimeUnit.MILLISECONDS));
  return getParquetTableMetadata(fileStatuses);
}
项目:wherehowsX    文件:JSONFileAnalyzer.java   
@Override
public DatasetJsonRecord getSchema(Path targetFilePath) throws IOException {
    StringBuilder JsonObjectList = new StringBuilder();
    DatasetJsonRecord datasetJsonRecord = null;
    try {
        for (String realName : this.json2Array(getJsonObject(targetFilePath), "schema")) {
            if (realName.charAt(0) == '$') {
                JsonObjectList.append("{\"name\": \"" + realName.substring(1, realName.length()) + "\", \"type\": \"int\"},");
            } else {
                JsonObjectList.append("{\"name\": \"" + realName + "\", \"type\": \"string\"},");
            }
        }
        JsonObjectList.deleteCharAt(JsonObjectList.length() - 1);
        String schemaString = "{\"fields\":[" + JsonObjectList + "],\"name\": \"Result\", \"namespace\": \"com.tencent.thomas\", \"type\": \"record\"}";
        String codec = "json.codec";
        String storage = STORAGE_TYPE;
        String abstractPath = targetFilePath.toUri().getPath();
        FileStatus fstat = fs.getFileLinkStatus(targetFilePath);

        datasetJsonRecord =
                new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                        fstat.getPermission().toString(), codec, storage, "");
    } catch (Exception e) {
        LOG.error("path : {} content " + " is not JSON File format content  ",targetFilePath.toUri().getPath());
        LOG.info(e.getStackTrace().toString());
    }

    return datasetJsonRecord;
}
项目:dremio-oss    文件:GlobalDictionaryBuilder.java   
public static Map<String, Path> listDictionaryFiles(FileSystem fs, Path dictRootDir) throws IOException {
  final Map<String, Path> files = Maps.newHashMap();
  for (FileStatus fileStatus : fs.listStatus(dictRootDir, DICTIONARY_FILES_FILTER)) {
    files.put(getColumnFullPath(fileStatus.getPath().getName()), fileStatus.getPath());
  }
  return files;
}
项目:ditb    文件:NamespaceUpgrade.java   
public void migrateSnapshots() throws IOException {
  //migrate snapshot dir
  Path oldSnapshotDir = new Path(rootDir, HConstants.OLD_SNAPSHOT_DIR_NAME);
  Path newSnapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
  if (fs.exists(oldSnapshotDir)) {
    boolean foundOldSnapshotDir = false;
    // Logic to verify old snapshot dir culled from SnapshotManager
    // ignore all the snapshots in progress
    FileStatus[] snapshots = fs.listStatus(oldSnapshotDir,
      new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
    // loop through all the completed snapshots
    for (FileStatus snapshot : snapshots) {
      Path info = new Path(snapshot.getPath(), SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
      // if the snapshot is bad
      if (fs.exists(info)) {
        foundOldSnapshotDir = true;
        break;
      }
    }
    if(foundOldSnapshotDir) {
      LOG.info("Migrating snapshot dir");
      if (!fs.rename(oldSnapshotDir, newSnapshotDir)) {
        throw new IOException("Failed to move old snapshot dir "+
            oldSnapshotDir+" to new "+newSnapshotDir);
      }
    }
  }
}
项目:hadoop    文件:HttpFSFileSystem.java   
public static FILE_TYPE getType(FileStatus fileStatus) {
  if (fileStatus.isFile()) {
    return FILE;
  }
  if (fileStatus.isDirectory()) {
    return DIRECTORY;
  }
  if (fileStatus.isSymlink()) {
    return SYMLINK;
  }
  throw new IllegalArgumentException("Could not determine filetype for: " +
                                     fileStatus.getPath());
}
项目:ditb    文件:FSUtils.java   
public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
  FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
  List<Path> referenceFiles = new ArrayList<Path>(fds.length);
  for (FileStatus fdfs: fds) {
    Path fdPath = fdfs.getPath();
    referenceFiles.add(fdPath);
  }
  return referenceFiles;
}
项目:ditb    文件:WALInputFormat.java   
private List<FileStatus> getFiles(FileSystem fs, Path dir, long startTime, long endTime)
    throws IOException {
  List<FileStatus> result = new ArrayList<FileStatus>();
  LOG.debug("Scanning " + dir.toString() + " for WAL files");

  FileStatus[] files = fs.listStatus(dir);
  if (files == null) return Collections.emptyList();
  for (FileStatus file : files) {
    if (file.isDirectory()) {
      // recurse into sub directories
      result.addAll(getFiles(fs, file.getPath(), startTime, endTime));
    } else {
      String name = file.getPath().toString();
      int idx = name.lastIndexOf('.');
      if (idx > 0) {
        try {
          long fileStartTime = Long.parseLong(name.substring(idx+1));
          if (fileStartTime <= endTime) {
            LOG.info("Found: " + name);
            result.add(file);
          }
        } catch (NumberFormatException x) {
          idx = 0;
        }
      }
      if (idx == 0) {
        LOG.warn("File " + name + " does not appear to be an WAL file. Skipping...");
      }
    }
  }
  return result;
}
项目:hadoop    文件:TestAuditLogs.java   
/** test that allowed stat puts proper entry in audit log */
@Test
public void testAuditAllowedStat() throws Exception {
  final Path file = new Path(fnames[0]);
  FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

  setupAuditLogs();
  FileStatus st = userfs.getFileStatus(file);
  verifyAuditLogs(true);
  assertTrue("failed to stat file", st != null && st.isFile());
}