Java 类org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException 实例源码

项目:hadoop    文件:RpcProgramNfs3.java   
/**
 * Used by readdir and readdirplus to get dirents. It retries the listing if
 * the startAfter can't be found anymore.
 */
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
    byte[] startAfter) throws IOException {
  DirectoryListing dlisting;
  try {
    dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
  } catch (RemoteException e) {
    IOException io = e.unwrapRemoteException();
    if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
      throw io;
    }
    // This happens when startAfter was just deleted
    LOG.info("Cookie couldn't be found: "
        + new String(startAfter, Charset.forName("UTF-8"))
        + ", do listing from beginning");
    dlisting = dfsClient
        .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
  }
  return dlisting;
}
项目:aliyun-oss-hadoop-fs    文件:RpcProgramNfs3.java   
/**
 * Used by readdir and readdirplus to get dirents. It retries the listing if
 * the startAfter can't be found anymore.
 */
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
    byte[] startAfter) throws IOException {
  DirectoryListing dlisting;
  try {
    dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
  } catch (RemoteException e) {
    IOException io = e.unwrapRemoteException();
    if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
      throw io;
    }
    // This happens when startAfter was just deleted
    LOG.info("Cookie couldn't be found: "
        + new String(startAfter, Charset.forName("UTF-8"))
        + ", do listing from beginning");
    dlisting = dfsClient
        .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
  }
  return dlisting;
}
项目:big-c    文件:RpcProgramNfs3.java   
/**
 * Used by readdir and readdirplus to get dirents. It retries the listing if
 * the startAfter can't be found anymore.
 */
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
    byte[] startAfter) throws IOException {
  DirectoryListing dlisting;
  try {
    dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
  } catch (RemoteException e) {
    IOException io = e.unwrapRemoteException();
    if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
      throw io;
    }
    // This happens when startAfter was just deleted
    LOG.info("Cookie couldn't be found: "
        + new String(startAfter, Charset.forName("UTF-8"))
        + ", do listing from beginning");
    dlisting = dfsClient
        .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
  }
  return dlisting;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:RpcProgramNfs3.java   
/**
 * Used by readdir and readdirplus to get dirents. It retries the listing if
 * the startAfter can't be found anymore.
 */
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
    byte[] startAfter) throws IOException {
  DirectoryListing dlisting;
  try {
    dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
  } catch (RemoteException e) {
    IOException io = e.unwrapRemoteException();
    if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
      throw io;
    }
    // This happens when startAfter was just deleted
    LOG.info("Cookie couldn't be found: " + new String(startAfter)
        + ", do listing from beginning");
    dlisting = dfsClient
        .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
  }
  return dlisting;
}
项目:hops    文件:RpcProgramNfs3.java   
/**
 * Used by readdir and readdirplus to get dirents. It retries the listing if
 * the startAfter can't be found anymore.
 */
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
    byte[] startAfter) throws IOException {
  DirectoryListing dlisting = null;
  try {
    dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
  } catch (RemoteException e) {
    IOException io = e.unwrapRemoteException();
    if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
      throw io;
    }
    // This happens when startAfter was just deleted
    LOG.info("Cookie cound't be found: " + new String(startAfter) +
        ", do listing from beginning");
    dlisting = dfsClient.listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
  }
  return dlisting;
}
项目:hadoop-on-lustre2    文件:RpcProgramNfs3.java   
/**
 * Used by readdir and readdirplus to get dirents. It retries the listing if
 * the startAfter can't be found anymore.
 */
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
    byte[] startAfter) throws IOException {
  DirectoryListing dlisting = null;
  try {
    dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
  } catch (RemoteException e) {
    IOException io = e.unwrapRemoteException();
    if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
      throw io;
    }
    // This happens when startAfter was just deleted
    LOG.info("Cookie cound't be found: " + new String(startAfter)
        + ", do listing from beginning");
    dlisting = dfsClient
        .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
  }
  return dlisting;
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
    byte[] startAfter, boolean needLocation) throws IOException {
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory
      .getPathComponentsForReservedPath(srcArg);
  final String startAfterString = new String(startAfter, Charsets.UTF_8);
  final String src = fsd.resolvePath(pc, srcArg, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, true);

  // Get file name when startAfter is an INodePath
  if (FSDirectory.isReservedName(startAfterString)) {
    byte[][] startAfterComponents = FSDirectory
        .getPathComponentsForReservedPath(startAfterString);
    try {
      String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd);
      byte[][] regularPath = INode.getPathComponents(tmp);
      startAfter = regularPath[regularPath.length - 1];
    } catch (IOException e) {
      // Possibly the inode is deleted
      throw new DirectoryListingStartAfterNotFoundException(
          "Can't find startAfter " + startAfterString);
    }
  }

  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
      fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
    } else {
      fsd.checkTraverse(pc, iip);
    }
    isSuperUser = pc.isSuperUser();
  }
  return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
}
项目:big-c    文件:FSDirStatAndListingOp.java   
static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
    byte[] startAfter, boolean needLocation) throws IOException {
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory
      .getPathComponentsForReservedPath(srcArg);
  final String startAfterString = new String(startAfter, Charsets.UTF_8);
  final String src = fsd.resolvePath(pc, srcArg, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, true);

  // Get file name when startAfter is an INodePath
  if (FSDirectory.isReservedName(startAfterString)) {
    byte[][] startAfterComponents = FSDirectory
        .getPathComponentsForReservedPath(startAfterString);
    try {
      String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd);
      byte[][] regularPath = INode.getPathComponents(tmp);
      startAfter = regularPath[regularPath.length - 1];
    } catch (IOException e) {
      // Possibly the inode is deleted
      throw new DirectoryListingStartAfterNotFoundException(
          "Can't find startAfter " + startAfterString);
    }
  }

  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
      fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
    } else {
      fsd.checkTraverse(pc, iip);
    }
    isSuperUser = pc.isSuperUser();
  }
  return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
}
项目:hadoop    文件:TestINodeFile.java   
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestINodeFile.java   
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:big-c    文件:TestINodeFile.java   
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
private DirectoryListing getListingInt(final String srcArg, byte[] startAfter,
    boolean needLocation)
  throws AccessControlException, UnresolvedLinkException, IOException {
  String src = srcArg;
  DirectoryListing dl;
  FSPermissionChecker pc = getPermissionChecker();
  checkOperation(OperationCategory.READ);
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  String startAfterString = new String(startAfter);
  readLock();
  try {
    checkOperation(OperationCategory.READ);
    src = resolvePath(src, pathComponents);

    // Get file name when startAfter is an INodePath
    if (FSDirectory.isReservedName(startAfterString)) {
      byte[][] startAfterComponents = FSDirectory
          .getPathComponentsForReservedPath(startAfterString);
      try {
        String tmp = FSDirectory.resolvePath(src, startAfterComponents, dir);
        byte[][] regularPath = INode.getPathComponents(tmp);
        startAfter = regularPath[regularPath.length - 1];
      } catch (IOException e) {
        // Possibly the inode is deleted
        throw new DirectoryListingStartAfterNotFoundException(
            "Can't find startAfter " + startAfterString);
      }
    }

    boolean isSuperUser = true;
    if (isPermissionEnabled) {
      if (dir.isDir(src)) {
        checkPathAccess(pc, src, FsAction.READ_EXECUTE);
      } else {
        checkTraverse(pc, src);
      }
      isSuperUser = pc.isSuperUser();
    }
    logAuditEvent(true, "listStatus", srcArg);
    dl = dir.getListing(src, startAfter, needLocation, isSuperUser);
  } finally {
    readUnlock();
  }
  return dl;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestINodeFile.java   
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:FlexMap    文件:FSNamesystem.java   
private DirectoryListing getListingInt(final String srcArg, byte[] startAfter,
    boolean needLocation)
  throws AccessControlException, UnresolvedLinkException, IOException {
  String src = srcArg;
  DirectoryListing dl;
  FSPermissionChecker pc = getPermissionChecker();
  checkOperation(OperationCategory.READ);
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  String startAfterString = new String(startAfter);
  readLock();
  try {
    checkOperation(OperationCategory.READ);
    src = resolvePath(src, pathComponents);

    // Get file name when startAfter is an INodePath
    if (FSDirectory.isReservedName(startAfterString)) {
      byte[][] startAfterComponents = FSDirectory
          .getPathComponentsForReservedPath(startAfterString);
      try {
        String tmp = FSDirectory.resolvePath(src, startAfterComponents, dir);
        byte[][] regularPath = INode.getPathComponents(tmp);
        startAfter = regularPath[regularPath.length - 1];
      } catch (IOException e) {
        // Possibly the inode is deleted
        throw new DirectoryListingStartAfterNotFoundException(
            "Can't find startAfter " + startAfterString);
      }
    }

    boolean isSuperUser = true;
    if (isPermissionEnabled) {
      if (dir.isDir(src)) {
        checkPathAccess(pc, src, FsAction.READ_EXECUTE);
      } else {
        checkTraverse(pc, src);
      }
      isSuperUser = pc.isSuperUser();
    }
    logAuditEvent(true, "listStatus", srcArg);
    dl = dir.getListing(src, startAfter, needLocation, isSuperUser);
  } finally {
    readUnlock();
  }
  return dl;
}
项目:FlexMap    文件:TestINodeFile.java   
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-on-lustre2    文件:FSNamesystem.java   
private DirectoryListing getListingInt(String src, byte[] startAfter,
    boolean needLocation) 
  throws AccessControlException, UnresolvedLinkException, IOException {
  DirectoryListing dl;
  FSPermissionChecker pc = getPermissionChecker();
  checkOperation(OperationCategory.READ);
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  String startAfterString = new String(startAfter);
  readLock();
  try {
    checkOperation(OperationCategory.READ);
    src = FSDirectory.resolvePath(src, pathComponents, dir);

    // Get file name when startAfter is an INodePath
    if (FSDirectory.isReservedName(startAfterString)) {
      byte[][] startAfterComponents = FSDirectory
          .getPathComponentsForReservedPath(startAfterString);
      try {
        String tmp = FSDirectory.resolvePath(src, startAfterComponents, dir);
        byte[][] regularPath = INode.getPathComponents(tmp);
        startAfter = regularPath[regularPath.length - 1];
      } catch (IOException e) {
        // Possibly the inode is deleted
        throw new DirectoryListingStartAfterNotFoundException(
            "Can't find startAfter " + startAfterString);
      }
    }

    if (isPermissionEnabled) {
      if (dir.isDir(src)) {
        checkPathAccess(pc, src, FsAction.READ_EXECUTE);
      } else {
        checkTraverse(pc, src);
      }
    }
    logAuditEvent(true, "listStatus", src);
    dl = dir.getListing(src, startAfter, needLocation);
  } finally {
    readUnlock();
  }
  return dl;
}
项目:hadoop-on-lustre2    文件:TestINodeFile.java   
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}