Java 类org.apache.hadoop.hdfs.server.datanode.DataBlockScanner 实例源码

项目:FlexMap    文件:TestFsDatasetImpl.java   
@Before
public void setUp() throws IOException {
  final DataNode datanode = Mockito.mock(DataNode.class);
  storage = Mockito.mock(DataStorage.class);
  scanner = Mockito.mock(DataBlockScanner.class);
  this.conf = new Configuration();
  final DNConf dnConf = new DNConf(conf);

  when(datanode.getConf()).thenReturn(conf);
  when(datanode.getDnConf()).thenReturn(dnConf);
  when(datanode.getBlockScanner()).thenReturn(scanner);

  createStorageDirs(storage, conf, NUM_INIT_VOLUMES);
  dataset = new FsDatasetImpl(datanode, storage, conf);
  for (String bpid : BLOCK_POOL_IDS) {
    dataset.addBlockPool(bpid, conf);
  }

  assertEquals(NUM_INIT_VOLUMES, dataset.getVolumes().size());
  assertEquals(0, dataset.getNumFailedVolumes());
}
项目:hadoop-plus    文件:RollingLogsImpl.java   
@Override
public String next() {
  String curLine = line;
  try {
    readNext();
  } catch (IOException e) {
    DataBlockScanner.LOG.warn("Failed to read next line.", e);
  }
  return curLine;
}
项目:hadoop-plus    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }

  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));

  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
项目:FlexMap    文件:RollingLogsImpl.java   
@Override
public String next() {
  String curLine = line;
  try {
    lastReadFile = file;
    readNext();
  } catch (IOException e) {
    DataBlockScanner.LOG.warn("Failed to read next line.", e);
  }
  return curLine;
}
项目:FlexMap    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }

  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));

  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer2.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
项目:hops    文件:RollingLogsImpl.java   
@Override
public String next() {
  String curLine = line;
  try {
    readNext();
  } catch (IOException e) {
    DataBlockScanner.LOG.warn("Failed to read next line.", e);
  }
  return curLine;
}
项目:hadoop-TCP    文件:RollingLogsImpl.java   
@Override
public String next() {
  String curLine = line;
  try {
    lastReadFile = file;
    readNext();
  } catch (IOException e) {
    DataBlockScanner.LOG.warn("Failed to read next line.", e);
  }
  return curLine;
}
项目:hadoop-TCP    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }

  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));

  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
项目:hadoop-on-lustre    文件:TestInterDatanodeProtocol.java   
public static void checkMetaInfo(Block b, InterDatanodeProtocol idp,
    DataBlockScanner scanner) throws IOException {
  BlockMetaDataInfo metainfo = idp.getBlockMetaDataInfo(b);
  assertEquals(b.getBlockId(), metainfo.getBlockId());
  assertEquals(b.getNumBytes(), metainfo.getNumBytes());
  if (scanner != null) {
    assertEquals(scanner.getLastScanTime(b),
        metainfo.getLastScanTime());
  }
}
项目:hardfs    文件:RollingLogsImpl.java   
@Override
public String next() {
  String curLine = line;
  try {
    lastReadFile = file;
    readNext();
  } catch (IOException e) {
    DataBlockScanner.LOG.warn("Failed to read next line.", e);
  }
  return curLine;
}
项目:hardfs    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }

  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));

  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
项目:hadoop-on-lustre2    文件:RollingLogsImpl.java   
@Override
public String next() {
  String curLine = line;
  try {
    lastReadFile = file;
    readNext();
  } catch (IOException e) {
    DataBlockScanner.LOG.warn("Failed to read next line.", e);
  }
  return curLine;
}
项目:hadoop-on-lustre2    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }

  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));

  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer2.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
项目:hadoop-0.20    文件:TestInterDatanodeProtocol.java   
public static void checkMetaInfo(Block b, InterDatanodeProtocol idp,
    DataBlockScanner scanner) throws IOException {
  BlockMetaDataInfo metainfo = idp.getBlockMetaDataInfo(b);
  assertEquals(b.getBlockId(), metainfo.getBlockId());
  assertEquals(b.getNumBytes(), metainfo.getNumBytes());
  if (scanner != null) {
    assertEquals(scanner.getLastScanTime(b),
        metainfo.getLastScanTime());
  }
}
项目:hortonworks-extension    文件:TestInterDatanodeProtocol.java   
public static void checkMetaInfo(Block b, InterDatanodeProtocol idp,
    DataBlockScanner scanner) throws IOException {
  BlockMetaDataInfo metainfo = idp.getBlockMetaDataInfo(b);
  assertEquals(b.getBlockId(), metainfo.getBlockId());
  assertEquals(b.getNumBytes(), metainfo.getNumBytes());
  if (scanner != null) {
    assertEquals(scanner.getLastScanTime(b),
        metainfo.getLastScanTime());
  }
}
项目:hortonworks-extension    文件:TestInterDatanodeProtocol.java   
public static void checkMetaInfo(Block b, InterDatanodeProtocol idp,
    DataBlockScanner scanner) throws IOException {
  BlockMetaDataInfo metainfo = idp.getBlockMetaDataInfo(b);
  assertEquals(b.getBlockId(), metainfo.getBlockId());
  assertEquals(b.getNumBytes(), metainfo.getNumBytes());
  if (scanner != null) {
    assertEquals(scanner.getLastScanTime(b),
        metainfo.getLastScanTime());
  }
}
项目:hadoop-gpu    文件:TestInterDatanodeProtocol.java   
public static void checkMetaInfo(Block b, InterDatanodeProtocol idp,
    DataBlockScanner scanner) throws IOException {
  BlockMetaDataInfo metainfo = idp.getBlockMetaDataInfo(b);
  assertEquals(b.getBlockId(), metainfo.getBlockId());
  assertEquals(b.getNumBytes(), metainfo.getNumBytes());
  if (scanner != null) {
    assertEquals(scanner.getLastScanTime(b),
        metainfo.getLastScanTime());
  }
}