Java 类org.apache.hadoop.hdfs.server.namenode.LeaseManager 实例源码

项目:hadoop    文件:DFSTestUtil.java   
public static void setNameNodeLogLevel(Level level) {
  GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
  GenericTestUtils.setLogLevel(BlockManager.LOG, level);
  GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
  GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
项目:hadoop    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    GenericTestUtils.disableLog(LogFactory.getLog(n));
  }

  GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class));
  GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class));
  GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class));
  GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
  GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));

  GenericTestUtils.disableLog(BlockScanner.LOG);
  GenericTestUtils.disableLog(HttpServer2.LOG);
  GenericTestUtils.disableLog(DataNode.LOG);
  GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);
  GenericTestUtils.disableLog(LeaseManager.LOG);
  GenericTestUtils.disableLog(NameNode.stateChangeLog);
  GenericTestUtils.disableLog(NameNode.blockStateChangeLog);
  GenericTestUtils.disableLog(DFSClient.LOG);
  GenericTestUtils.disableLog(Server.LOG);
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
public static void setNameNodeLogLevel(Level level) {
  GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
  GenericTestUtils.setLogLevel(BlockManager.LOG, level);
  GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
  GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
项目:aliyun-oss-hadoop-fs    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    GenericTestUtils.disableLog(LogFactory.getLog(n));
  }

  GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class));
  GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class));
  GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class));
  GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
  GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));

  GenericTestUtils.disableLog(BlockScanner.LOG);
  GenericTestUtils.disableLog(HttpServer2.LOG);
  GenericTestUtils.disableLog(DataNode.LOG);
  GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);
  GenericTestUtils.disableLog(LeaseManager.LOG);
  GenericTestUtils.disableLog(NameNode.stateChangeLog);
  GenericTestUtils.disableLog(NameNode.blockStateChangeLog);
  GenericTestUtils.disableLog(DFSClient.LOG);
  GenericTestUtils.disableLog(Server.LOG);
}
项目:big-c    文件:DFSTestUtil.java   
public static void setNameNodeLogLevel(Level level) {
  GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
  GenericTestUtils.setLogLevel(BlockManager.LOG, level);
  GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
  GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
项目:big-c    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    GenericTestUtils.disableLog(LogFactory.getLog(n));
  }

  GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class));
  GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class));
  GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class));
  GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
  GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));

  GenericTestUtils.disableLog(BlockScanner.LOG);
  GenericTestUtils.disableLog(HttpServer2.LOG);
  GenericTestUtils.disableLog(DataNode.LOG);
  GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);
  GenericTestUtils.disableLog(LeaseManager.LOG);
  GenericTestUtils.disableLog(NameNode.stateChangeLog);
  GenericTestUtils.disableLog(NameNode.blockStateChangeLog);
  GenericTestUtils.disableLog(DFSClient.LOG);
  GenericTestUtils.disableLog(Server.LOG);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSTestUtil.java   
public static void setNameNodeLogLevel(Level level) {
  GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
  GenericTestUtils.setLogLevel(BlockManager.LOG, level);
  GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
  GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    GenericTestUtils.disableLog(LogFactory.getLog(n));
  }

  GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class));
  GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class));
  GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class));
  GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
  GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));

  GenericTestUtils.disableLog(BlockScanner.LOG);
  GenericTestUtils.disableLog(HttpServer2.LOG);
  GenericTestUtils.disableLog(DataNode.LOG);
  GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);
  GenericTestUtils.disableLog(LeaseManager.LOG);
  GenericTestUtils.disableLog(NameNode.stateChangeLog);
  GenericTestUtils.disableLog(NameNode.blockStateChangeLog);
  GenericTestUtils.disableLog(DFSClient.LOG);
  GenericTestUtils.disableLog(Server.LOG);
}
项目:hadoop-EAR    文件:NNThroughputBenchmark.java   
static void turnOffNameNodeLogging() {
    // change log level to ERROR: NameNode.LOG & NameNode.stateChangeLog
    ((Log4JLogger) NameNode.LOG).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(
            Level.ERROR);
    ((Log4JLogger) NetworkTopology.LOG).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger) FSNamesystem.auditLog).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ERROR);
}
项目:hadoop-EAR    文件:TestStandbySafeMode.java   
@Test
public void testLeaseExpiry() throws Exception {
  setUp(true, "testLeaseExpiry");
  h.setIgnoreDatanodes(false);
  LeaseManager leaseManager = cluster.getStandbyAvatar(0).avatar.namesystem.leaseManager;
  // Set low lease periods.
  leaseManager.setLeasePeriod(LEASE_PERIOD, LEASE_PERIOD);
  String src = "/testLeaseExpiry";

  // Create some data.
  FSDataOutputStream out = fs.create(new Path(src));
  byte[] buffer = new byte[BLOCK_SIZE * 2];
  random.nextBytes(buffer);
  out.write(buffer);
  out.sync();

  // Wait for the hard lease time to expire.
  Thread.sleep(LEASE_PERIOD * 2);

  cluster.failOver();
  LOG.info("Failover done");

  // Renew lease.
  String clientName = ((DistributedFileSystem)fs).getClient().getClientName();
  cluster.getPrimaryAvatar(0).avatar.renewLease(clientName);
  LOG.info("Lease renewal done");

  // Wait to see whether lease expires.
  long start = System.currentTimeMillis();
  while (System.currentTimeMillis() - start < MAX_WAIT_TIME
      && leaseManager.getLeaseByPath(src) != null) {
    DFSTestUtil.waitSecond();
  }
  LOG.info("Wait for lease done");

  // Now try to write to the file.
  out.write(buffer);
  out.sync();
}
项目:hadoop-plus    文件:TestWebHdfsWithMultipleNameNodes.java   
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
}
项目:hadoop-plus    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }

  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));

  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
项目:FlexMap    文件:TestWebHdfsWithMultipleNameNodes.java   
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
}
项目:FlexMap    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }

  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));

  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer2.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
项目:hops    文件:TestWebHdfsWithMultipleNameNodes.java   
static private void setLogLevel() {
  ((Log4JLogger) LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger()
      .setLevel(Level.OFF);
}
项目:hadoop-TCP    文件:TestWebHdfsWithMultipleNameNodes.java   
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
}
项目:hadoop-TCP    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }

  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));

  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
项目:hardfs    文件:TestWebHdfsWithMultipleNameNodes.java   
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
}
项目:hardfs    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }

  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));

  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
项目:hadoop-on-lustre2    文件:TestWebHdfsWithMultipleNameNodes.java   
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
}
项目:hadoop-on-lustre2    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }

  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));

  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer2.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
项目:RDFS    文件:NNThroughputBenchmark.java   
static void turnOffNameNodeLogging() {
    // change log level to ERROR: NameNode.LOG & NameNode.stateChangeLog
    ((Log4JLogger) NameNode.LOG).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(
            Level.ERROR);
    ((Log4JLogger) NetworkTopology.LOG).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger) FSNamesystem.auditLog).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ERROR);
}
项目:RDFS    文件:TestStandbySafeMode.java   
@Test
public void testLeaseExpiry() throws Exception {
  setUp(true);
  h.setIgnoreDatanodes(false);
  LeaseManager leaseManager = cluster.getStandbyAvatar(0).avatar.namesystem.leaseManager;
  // Set low lease periods.
  leaseManager.setLeasePeriod(LEASE_PERIOD, LEASE_PERIOD);
  String src = "/testLeaseExpiry";

  // Create some data.
  FSDataOutputStream out = fs.create(new Path(src));
  byte[] buffer = new byte[BLOCK_SIZE * 2];
  random.nextBytes(buffer);
  out.write(buffer);
  out.sync();

  // Wait for the hard lease time to expire.
  Thread.sleep(LEASE_PERIOD * 2);

  cluster.failOver();
  LOG.info("Failover done");

  // Renew lease.
  ((DistributedFileSystem)fs).getClient().leasechecker.renew();
  LOG.info("Lease renewal done");

  // Wait to see whether lease expires.
  long start = System.currentTimeMillis();
  while (System.currentTimeMillis() - start < MAX_WAIT_TIME
      && leaseManager.getLeaseByPath(src) != null) {
    Thread.sleep(1000);
  }
  LOG.info("Wait for lease done");

  // Now try to write to the file.
  out.write(buffer);
  out.sync();
}
项目:aliyun-oss-hadoop-fs    文件:TestLeaseRecovery.java   
/**
 * The following test first creates a file with a few blocks.
 * It randomly truncates the replica of the last block stored in each datanode.
 * Finally, it triggers block synchronization to synchronize all stored block.
 */
@Test
public void testBlockSynchronization() throws Exception {
  final int ORG_FILE_SIZE = 3000; 
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
  cluster.waitActive();

  //create a file
  DistributedFileSystem dfs = cluster.getFileSystem();
  String filestr = "/foo";
  Path filepath = new Path(filestr);
  DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
  assertTrue(dfs.exists(filepath));
  DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);

  //get block info for the last block
  LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr);
  DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
  assertEquals(REPLICATION_NUM, datanodeinfos.length);

  //connect to data nodes
  DataNode[] datanodes = new DataNode[REPLICATION_NUM];
  for(int i = 0; i < REPLICATION_NUM; i++) {
    datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
    assertTrue(datanodes[i] != null);
  }

  //verify Block Info
  ExtendedBlock lastblock = locatedblock.getBlock();
  DataNode.LOG.info("newblocks=" + lastblock);
  for(int i = 0; i < REPLICATION_NUM; i++) {
    checkMetaInfo(lastblock, datanodes[i]);
  }

  DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
  cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName,
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));

  // expire lease to trigger block recovery.
  waitLeaseRecovery(cluster);

  Block[] updatedmetainfo = new Block[REPLICATION_NUM];
  long oldSize = lastblock.getNumBytes();
  lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr).getBlock();
  long currentGS = lastblock.getGenerationStamp();
  for(int i = 0; i < REPLICATION_NUM; i++) {
    updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
        lastblock.getBlockPoolId(), lastblock.getBlockId());
    assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
    assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
    assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
  }

  // verify that lease recovery does not occur when namenode is in safemode
  System.out.println("Testing that lease recovery cannot happen during safemode.");
  filestr = "/foo.safemode";
  filepath = new Path(filestr);
  dfs.create(filepath, (short)1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
  assertTrue(dfs.dfs.exists(filestr));
  DFSTestUtil.waitReplication(dfs, filepath, (short)1);
  waitLeaseRecovery(cluster);
  // verify that we still cannot recover the lease
  LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
  assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
}
项目:big-c    文件:TestLeaseRecovery.java   
/**
 * The following test first creates a file with a few blocks.
 * It randomly truncates the replica of the last block stored in each datanode.
 * Finally, it triggers block synchronization to synchronize all stored block.
 */
@Test
public void testBlockSynchronization() throws Exception {
  final int ORG_FILE_SIZE = 3000; 
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
  cluster.waitActive();

  //create a file
  DistributedFileSystem dfs = cluster.getFileSystem();
  String filestr = "/foo";
  Path filepath = new Path(filestr);
  DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
  assertTrue(dfs.exists(filepath));
  DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);

  //get block info for the last block
  LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr);
  DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
  assertEquals(REPLICATION_NUM, datanodeinfos.length);

  //connect to data nodes
  DataNode[] datanodes = new DataNode[REPLICATION_NUM];
  for(int i = 0; i < REPLICATION_NUM; i++) {
    datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
    assertTrue(datanodes[i] != null);
  }

  //verify Block Info
  ExtendedBlock lastblock = locatedblock.getBlock();
  DataNode.LOG.info("newblocks=" + lastblock);
  for(int i = 0; i < REPLICATION_NUM; i++) {
    checkMetaInfo(lastblock, datanodes[i]);
  }

  DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
  cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName,
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));

  // expire lease to trigger block recovery.
  waitLeaseRecovery(cluster);

  Block[] updatedmetainfo = new Block[REPLICATION_NUM];
  long oldSize = lastblock.getNumBytes();
  lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr).getBlock();
  long currentGS = lastblock.getGenerationStamp();
  for(int i = 0; i < REPLICATION_NUM; i++) {
    updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
        lastblock.getBlockPoolId(), lastblock.getBlockId());
    assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
    assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
    assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
  }

  // verify that lease recovery does not occur when namenode is in safemode
  System.out.println("Testing that lease recovery cannot happen during safemode.");
  filestr = "/foo.safemode";
  filepath = new Path(filestr);
  dfs.create(filepath, (short)1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
  assertTrue(dfs.dfs.exists(filestr));
  DFSTestUtil.waitReplication(dfs, filepath, (short)1);
  waitLeaseRecovery(cluster);
  // verify that we still cannot recover the lease
  LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
  assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestLeaseRecovery.java   
/**
 * The following test first creates a file with a few blocks.
 * It randomly truncates the replica of the last block stored in each datanode.
 * Finally, it triggers block synchronization to synchronize all stored block.
 */
@Test
public void testBlockSynchronization() throws Exception {
  final int ORG_FILE_SIZE = 3000; 
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
  cluster.waitActive();

  //create a file
  DistributedFileSystem dfs = cluster.getFileSystem();
  String filestr = "/foo";
  Path filepath = new Path(filestr);
  DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
  assertTrue(dfs.exists(filepath));
  DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);

  //get block info for the last block
  LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr);
  DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
  assertEquals(REPLICATION_NUM, datanodeinfos.length);

  //connect to data nodes
  DataNode[] datanodes = new DataNode[REPLICATION_NUM];
  for(int i = 0; i < REPLICATION_NUM; i++) {
    datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
    assertTrue(datanodes[i] != null);
  }

  //verify Block Info
  ExtendedBlock lastblock = locatedblock.getBlock();
  DataNode.LOG.info("newblocks=" + lastblock);
  for(int i = 0; i < REPLICATION_NUM; i++) {
    checkMetaInfo(lastblock, datanodes[i]);
  }

  DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
  cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName);

  // expire lease to trigger block recovery.
  waitLeaseRecovery(cluster);

  Block[] updatedmetainfo = new Block[REPLICATION_NUM];
  long oldSize = lastblock.getNumBytes();
  lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr).getBlock();
  long currentGS = lastblock.getGenerationStamp();
  for(int i = 0; i < REPLICATION_NUM; i++) {
    updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
        lastblock.getBlockPoolId(), lastblock.getBlockId());
    assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
    assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
    assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
  }

  // verify that lease recovery does not occur when namenode is in safemode
  System.out.println("Testing that lease recovery cannot happen during safemode.");
  filestr = "/foo.safemode";
  filepath = new Path(filestr);
  dfs.create(filepath, (short)1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
  assertTrue(dfs.dfs.exists(filestr));
  DFSTestUtil.waitReplication(dfs, filepath, (short)1);
  waitLeaseRecovery(cluster);
  // verify that we still cannot recover the lease
  LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
  assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
}
项目:FlexMap    文件:TestLeaseRecovery.java   
/**
 * The following test first creates a file with a few blocks.
 * It randomly truncates the replica of the last block stored in each datanode.
 * Finally, it triggers block synchronization to synchronize all stored block.
 */
@Test
public void testBlockSynchronization() throws Exception {
  final int ORG_FILE_SIZE = 3000; 
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
  cluster.waitActive();

  //create a file
  DistributedFileSystem dfs = cluster.getFileSystem();
  String filestr = "/foo";
  Path filepath = new Path(filestr);
  DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
  assertTrue(dfs.exists(filepath));
  DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);

  //get block info for the last block
  LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr);
  DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
  assertEquals(REPLICATION_NUM, datanodeinfos.length);

  //connect to data nodes
  DataNode[] datanodes = new DataNode[REPLICATION_NUM];
  for(int i = 0; i < REPLICATION_NUM; i++) {
    datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
    assertTrue(datanodes[i] != null);
  }

  //verify Block Info
  ExtendedBlock lastblock = locatedblock.getBlock();
  DataNode.LOG.info("newblocks=" + lastblock);
  for(int i = 0; i < REPLICATION_NUM; i++) {
    checkMetaInfo(lastblock, datanodes[i]);
  }

  DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
  cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName);

  // expire lease to trigger block recovery.
  waitLeaseRecovery(cluster);

  Block[] updatedmetainfo = new Block[REPLICATION_NUM];
  long oldSize = lastblock.getNumBytes();
  lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
      dfs.dfs.getNamenode(), filestr).getBlock();
  long currentGS = lastblock.getGenerationStamp();
  for(int i = 0; i < REPLICATION_NUM; i++) {
    updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
        lastblock.getBlockPoolId(), lastblock.getBlockId());
    assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
    assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
    assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
  }

  // verify that lease recovery does not occur when namenode is in safemode
  System.out.println("Testing that lease recovery cannot happen during safemode.");
  filestr = "/foo.safemode";
  filepath = new Path(filestr);
  dfs.create(filepath, (short)1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
  assertTrue(dfs.dfs.exists(filestr));
  DFSTestUtil.waitReplication(dfs, filepath, (short)1);
  waitLeaseRecovery(cluster);
  // verify that we still cannot recover the lease
  LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
  assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
  cluster.getNameNodeRpc().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
}
项目:hadoop-on-lustre    文件:TestShortCircuitLocalRead.java   
/**
 * Test to run benchmarks between shortcircuit read vs regular read with
 * specified number of threads simultaneously reading.
 * <br>
 * Run this using the following command:
 * bin/hadoop --config confdir \
 * org.apache.hadoop.hdfs.TestShortCircuitLocalRead \
 * <shortcircuit on?> <checsum on?> <Number of threads>
 */
public static void main(String[] args) throws Exception {
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.INFO);
  ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.INFO);
  ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.INFO);

  if (args.length != 3) {
    System.out.println("Usage: test shortcircuit checksum threadCount");
    System.exit(1);
  }
  boolean shortcircuit = Boolean.valueOf(args[0]);
  boolean checksum = Boolean.valueOf(args[1]);
  int threadCount = Integer.valueOf(args[2]);

  // Setup create a file
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, shortcircuit);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      checksum);

  //Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
  int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
  final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);

  // create a new file in home directory. Do not close it.
  final Path file1 = new Path("filelocal.dat");
  final FileSystem fs = FileSystem.get(conf);
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(dataToWrite);
  stm.close();

  long start = System.currentTimeMillis();
  final int iteration = 20;
  Thread[] threads = new Thread[threadCount];
  for (int i = 0; i < threadCount; i++) {
    threads[i] = new Thread() {
      public void run() {
        for (int i = 0; i < iteration; i++) {
          try {
            checkFileContent(fs, file1, dataToWrite, 0);
          } catch (IOException e) {
            e.printStackTrace();
          }
        }
      }
    };
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].start();
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].join();
  }
  long end = System.currentTimeMillis();
  System.out.println("Iteration " + iteration + " took " + (end - start));
  fs.delete(file1, false);
}
项目:hortonworks-extension    文件:TestShortCircuitLocalRead.java   
/**
 * Test to run benchmarks between shortcircuit read vs regular read with
 * specified number of threads simultaneously reading.
 * <br>
 * Run this using the following command:
 * bin/hadoop --config confdir \
 * org.apache.hadoop.hdfs.TestShortCircuitLocalRead \
 * <shortcircuit on?> <checsum on?> <Number of threads>
 */
public static void main(String[] args) throws Exception {
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.INFO);
  ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.INFO);
  ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.INFO);

  if (args.length != 3) {
    System.out.println("Usage: test shortcircuit checksum threadCount");
    System.exit(1);
  }
  boolean shortcircuit = Boolean.valueOf(args[0]);
  boolean checksum = Boolean.valueOf(args[1]);
  int threadCount = Integer.valueOf(args[2]);

  // Setup create a file
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, shortcircuit);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      checksum);

  //Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
  int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
  final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);

  // create a new file in home directory. Do not close it.
  final Path file1 = new Path("filelocal.dat");
  final FileSystem fs = FileSystem.get(conf);
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(dataToWrite);
  stm.close();

  long start = System.currentTimeMillis();
  final int iteration = 20;
  Thread[] threads = new Thread[threadCount];
  for (int i = 0; i < threadCount; i++) {
    threads[i] = new Thread() {
      public void run() {
        for (int i = 0; i < iteration; i++) {
          try {
            checkFileContent(fs, file1, dataToWrite, 0);
          } catch (IOException e) {
            e.printStackTrace();
          }
        }
      }
    };
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].start();
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].join();
  }
  long end = System.currentTimeMillis();
  System.out.println("Iteration " + iteration + " took " + (end - start));
  fs.delete(file1, false);
}
项目:hortonworks-extension    文件:TestShortCircuitLocalRead.java   
/**
 * Test to run benchmarks between shortcircuit read vs regular read with
 * specified number of threads simultaneously reading.
 * <br>
 * Run this using the following command:
 * bin/hadoop --config confdir \
 * org.apache.hadoop.hdfs.TestShortCircuitLocalRead \
 * <shortcircuit on?> <checsum on?> <Number of threads>
 */
public static void main(String[] args) throws Exception {
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.INFO);
  ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.INFO);
  ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.INFO);

  if (args.length != 3) {
    System.out.println("Usage: test shortcircuit checksum threadCount");
    System.exit(1);
  }
  boolean shortcircuit = Boolean.valueOf(args[0]);
  boolean checksum = Boolean.valueOf(args[1]);
  int threadCount = Integer.valueOf(args[2]);

  // Setup create a file
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, shortcircuit);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      checksum);

  //Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
  int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
  final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);

  // create a new file in home directory. Do not close it.
  final Path file1 = new Path("filelocal.dat");
  final FileSystem fs = FileSystem.get(conf);
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(dataToWrite);
  stm.close();

  long start = System.currentTimeMillis();
  final int iteration = 20;
  Thread[] threads = new Thread[threadCount];
  for (int i = 0; i < threadCount; i++) {
    threads[i] = new Thread() {
      public void run() {
        for (int i = 0; i < iteration; i++) {
          try {
            checkFileContent(fs, file1, dataToWrite, 0);
          } catch (IOException e) {
            e.printStackTrace();
          }
        }
      }
    };
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].start();
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].join();
  }
  long end = System.currentTimeMillis();
  System.out.println("Iteration " + iteration + " took " + (end - start));
  fs.delete(file1, false);
}