Java 类org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType 实例源码

项目:hadoop    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase,
    StorageData sd) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+sd.storageInfo.getLayoutVersion()
           + " namespaceID="+sd.storageInfo.getNamespaceID()
           + " fsscTime="+sd.storageInfo.getCTime()
           + " clusterID="+sd.storageInfo.getClusterID()
           + " BlockPoolID="+sd.blockPoolId);
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertNamenodeRegistration() {
  StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
  NamenodeRegistration reg = new NamenodeRegistration("address:999",
      "http:1000", info, NamenodeRole.NAMENODE);
  NamenodeRegistrationProto regProto = PBHelper.convert(reg);
  NamenodeRegistration reg2 = PBHelper.convert(regProto);
  assertEquals(reg.getAddress(), reg2.getAddress());
  assertEquals(reg.getClusterID(), reg2.getClusterID());
  assertEquals(reg.getCTime(), reg2.getCTime());
  assertEquals(reg.getHttpAddress(), reg2.getHttpAddress());
  assertEquals(reg.getLayoutVersion(), reg2.getLayoutVersion());
  assertEquals(reg.getNamespaceID(), reg2.getNamespaceID());
  assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
  assertEquals(reg.getRole(), reg2.getRole());
  assertEquals(reg.getVersion(), reg2.getVersion());

}
项目:hadoop    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase,
    StorageData sd) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+sd.storageInfo.getLayoutVersion()
           + " namespaceID="+sd.storageInfo.getNamespaceID()
           + " fsscTime="+sd.storageInfo.getCTime()
           + " clusterID="+sd.storageInfo.getClusterID()
           + " BlockPoolID="+sd.blockPoolId);
}
项目:aliyun-oss-hadoop-fs    文件:TestPBHelper.java   
@Test
public void testConvertNamenodeRegistration() {
  StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
  NamenodeRegistration reg = new NamenodeRegistration("address:999",
      "http:1000", info, NamenodeRole.NAMENODE);
  NamenodeRegistrationProto regProto = PBHelper.convert(reg);
  NamenodeRegistration reg2 = PBHelper.convert(regProto);
  assertEquals(reg.getAddress(), reg2.getAddress());
  assertEquals(reg.getClusterID(), reg2.getClusterID());
  assertEquals(reg.getCTime(), reg2.getCTime());
  assertEquals(reg.getHttpAddress(), reg2.getHttpAddress());
  assertEquals(reg.getLayoutVersion(), reg2.getLayoutVersion());
  assertEquals(reg.getNamespaceID(), reg2.getNamespaceID());
  assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
  assertEquals(reg.getRole(), reg2.getRole());
  assertEquals(reg.getVersion(), reg2.getVersion());

}
项目:aliyun-oss-hadoop-fs    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:big-c    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase,
    StorageData sd) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+sd.storageInfo.getLayoutVersion()
           + " namespaceID="+sd.storageInfo.getNamespaceID()
           + " fsscTime="+sd.storageInfo.getCTime()
           + " clusterID="+sd.storageInfo.getClusterID()
           + " BlockPoolID="+sd.blockPoolId);
}
项目:big-c    文件:TestPBHelper.java   
@Test
public void testConvertNamenodeRegistration() {
  StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
  NamenodeRegistration reg = new NamenodeRegistration("address:999",
      "http:1000", info, NamenodeRole.NAMENODE);
  NamenodeRegistrationProto regProto = PBHelper.convert(reg);
  NamenodeRegistration reg2 = PBHelper.convert(regProto);
  assertEquals(reg.getAddress(), reg2.getAddress());
  assertEquals(reg.getClusterID(), reg2.getClusterID());
  assertEquals(reg.getCTime(), reg2.getCTime());
  assertEquals(reg.getHttpAddress(), reg2.getHttpAddress());
  assertEquals(reg.getLayoutVersion(), reg2.getLayoutVersion());
  assertEquals(reg.getNamespaceID(), reg2.getNamespaceID());
  assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
  assertEquals(reg.getRole(), reg2.getRole());
  assertEquals(reg.getVersion(), reg2.getVersion());

}
项目:big-c    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
/**
 * Create a DatanodeRegistration for a specific block pool.
 * @param nsInfo the namespace info from the first part of the NN handshake
 */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
  StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
  if (storageInfo == null) {
    // it's null in the case of SimulatedDataSet
    storageInfo = new StorageInfo(
        DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
        nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
        NodeType.DATA_NODE);
  }

  DatanodeID dnId = new DatanodeID(
      streamingAddr.getAddress().getHostAddress(), hostName, 
      storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
          infoSecurePort, getIpcPort());
  return new DatanodeRegistration(dnId, storageInfo, 
      new ExportedBlockKeys(), VersionInfo.getVersion());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase,
    StorageData sd) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+sd.storageInfo.getLayoutVersion()
           + " namespaceID="+sd.storageInfo.getNamespaceID()
           + " fsscTime="+sd.storageInfo.getCTime()
           + " clusterID="+sd.storageInfo.getClusterID()
           + " BlockPoolID="+sd.blockPoolId);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestPBHelper.java   
@Test
public void testConvertNamenodeRegistration() {
  StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
  NamenodeRegistration reg = new NamenodeRegistration("address:999",
      "http:1000", info, NamenodeRole.NAMENODE);
  NamenodeRegistrationProto regProto = PBHelper.convert(reg);
  NamenodeRegistration reg2 = PBHelper.convert(regProto);
  assertEquals(reg.getAddress(), reg2.getAddress());
  assertEquals(reg.getClusterID(), reg2.getClusterID());
  assertEquals(reg.getCTime(), reg2.getCTime());
  assertEquals(reg.getHttpAddress(), reg2.getHttpAddress());
  assertEquals(reg.getLayoutVersion(), reg2.getLayoutVersion());
  assertEquals(reg.getNamespaceID(), reg2.getNamespaceID());
  assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
  assertEquals(reg.getRole(), reg2.getRole());
  assertEquals(reg.getVersion(), reg2.getVersion());

}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-plus    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase,
    StorageData sd) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+sd.storageInfo.getLayoutVersion()
           + " namespaceID="+sd.storageInfo.getNamespaceID()
           + " fsscTime="+sd.storageInfo.getCTime()
           + " clusterID="+sd.storageInfo.getClusterID()
           + " BlockPoolID="+sd.blockPoolId);
}
项目:hadoop-plus    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:FlexMap    文件:DataNode.java   
/**
 * Create a DatanodeRegistration for a specific block pool.
 * @param nsInfo the namespace info from the first part of the NN handshake
 */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
  StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
  if (storageInfo == null) {
    // it's null in the case of SimulatedDataSet
    storageInfo = new StorageInfo(
        DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
        nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
        NodeType.DATA_NODE);
  }

  DatanodeID dnId = new DatanodeID(
      streamingAddr.getAddress().getHostAddress(), hostName, 
      storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
          infoSecurePort, getIpcPort());
  return new DatanodeRegistration(dnId, storageInfo, 
      new ExportedBlockKeys(), VersionInfo.getVersion());
}
项目:FlexMap    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase,
    StorageData sd) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+sd.storageInfo.getLayoutVersion()
           + " namespaceID="+sd.storageInfo.getNamespaceID()
           + " fsscTime="+sd.storageInfo.getCTime()
           + " clusterID="+sd.storageInfo.getClusterID()
           + " BlockPoolID="+sd.blockPoolId);
}
项目:FlexMap    文件:TestPBHelper.java   
@Test
public void testConvertNamenodeRegistration() {
  StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
  NamenodeRegistration reg = new NamenodeRegistration("address:999",
      "http:1000", info, NamenodeRole.NAMENODE);
  NamenodeRegistrationProto regProto = PBHelper.convert(reg);
  NamenodeRegistration reg2 = PBHelper.convert(regProto);
  assertEquals(reg.getAddress(), reg2.getAddress());
  assertEquals(reg.getClusterID(), reg2.getClusterID());
  assertEquals(reg.getCTime(), reg2.getCTime());
  assertEquals(reg.getHttpAddress(), reg2.getHttpAddress());
  assertEquals(reg.getLayoutVersion(), reg2.getLayoutVersion());
  assertEquals(reg.getNamespaceID(), reg2.getNamespaceID());
  assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
  assertEquals(reg.getRole(), reg2.getRole());
  assertEquals(reg.getVersion(), reg2.getVersion());

}
项目:FlexMap    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-TCP    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase,
    StorageData sd) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+sd.storageInfo.getLayoutVersion()
           + " namespaceID="+sd.storageInfo.getNamespaceID()
           + " fsscTime="+sd.storageInfo.getCTime()
           + " clusterID="+sd.storageInfo.getClusterID()
           + " BlockPoolID="+sd.blockPoolId);
}
项目:hadoop-TCP    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hardfs    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase,
    StorageData sd) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+sd.storageInfo.getLayoutVersion()
           + " namespaceID="+sd.storageInfo.getNamespaceID()
           + " fsscTime="+sd.storageInfo.getCTime()
           + " clusterID="+sd.storageInfo.getClusterID()
           + " BlockPoolID="+sd.blockPoolId);
}
项目:hardfs    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-on-lustre2    文件:DataNode.java   
/**
 * Create a DatanodeRegistration for a specific block pool.
 * @param nsInfo the namespace info from the first part of the NN handshake
 */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
  StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
  if (storageInfo == null) {
    // it's null in the case of SimulatedDataSet
    storageInfo = new StorageInfo(
        DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
        nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
        NodeType.DATA_NODE);
  }

  DatanodeID dnId = new DatanodeID(
      streamingAddr.getAddress().getHostAddress(), hostName, 
      storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
          infoSecurePort, getIpcPort());
  return new DatanodeRegistration(dnId, storageInfo, 
      new ExportedBlockKeys(), VersionInfo.getVersion());
}
项目:hadoop-on-lustre2    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase,
    StorageData sd) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+sd.storageInfo.getLayoutVersion()
           + " namespaceID="+sd.storageInfo.getNamespaceID()
           + " fsscTime="+sd.storageInfo.getCTime()
           + " clusterID="+sd.storageInfo.getClusterID()
           + " BlockPoolID="+sd.blockPoolId);
}
项目:hadoop-on-lustre2    文件:TestPBHelper.java   
@Test
public void testConvertNamenodeRegistration() {
  StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
  NamenodeRegistration reg = new NamenodeRegistration("address:999",
      "http:1000", info, NamenodeRole.NAMENODE);
  NamenodeRegistrationProto regProto = PBHelper.convert(reg);
  NamenodeRegistration reg2 = PBHelper.convert(regProto);
  assertEquals(reg.getAddress(), reg2.getAddress());
  assertEquals(reg.getClusterID(), reg2.getClusterID());
  assertEquals(reg.getCTime(), reg2.getCTime());
  assertEquals(reg.getHttpAddress(), reg2.getHttpAddress());
  assertEquals(reg.getLayoutVersion(), reg2.getLayoutVersion());
  assertEquals(reg.getNamespaceID(), reg2.getNamespaceID());
  assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
  assertEquals(reg.getRole(), reg2.getRole());
  assertEquals(reg.getVersion(), reg2.getVersion());

}
项目:hadoop-on-lustre2    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop    文件:NNStorage.java   
/**
 * Construct the NNStorage.
 * @param conf Namenode configuration.
 * @param imageDirs Directories the image can be stored in.
 * @param editsDirs Directories the editlog can be stored in.
 * @throws IOException if any directories are inaccessible.
 */
public NNStorage(Configuration conf, 
                 Collection<URI> imageDirs, Collection<URI> editsDirs) 
    throws IOException {
  super(NodeType.NAME_NODE);

  storageDirs = new CopyOnWriteArrayList<StorageDirectory>();

  // this may modify the editsDirs, so copy before passing in
  setStorageDirectories(imageDirs, 
                        Lists.newArrayList(editsDirs),
                        FSNamesystem.getSharedEditsDirs(conf));
}
项目:hadoop    文件:NamespaceInfo.java   
public NamespaceInfo(int nsID, String clusterID, String bpID,
    long cT, String buildVersion, String softwareVersion,
    long capabilities) {
  super(HdfsConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT,
      NodeType.NAME_NODE);
  blockPoolID = bpID;
  this.buildVersion = buildVersion;
  this.softwareVersion = softwareVersion;
  this.capabilities = capabilities;
}
项目:hadoop    文件:BlockPoolSliceStorage.java   
BlockPoolSliceStorage(int namespaceID, String bpID, long cTime,
    String clusterId) {
  super(NodeType.DATA_NODE);
  this.namespaceID = namespaceID;
  this.blockpoolID = bpID;
  this.cTime = cTime;
  this.clusterID = clusterId;
  storagesWithRollingUpgradeMarker = Collections.newSetFromMap(
      new ConcurrentHashMap<String, Boolean>());
  storagesWithoutRollingUpgradeMarker = Collections.newSetFromMap(
      new ConcurrentHashMap<String, Boolean>());
}
项目:hadoop    文件:BlockPoolSliceStorage.java   
private BlockPoolSliceStorage() {
  super(NodeType.DATA_NODE);
  storagesWithRollingUpgradeMarker = Collections.newSetFromMap(
      new ConcurrentHashMap<String, Boolean>());
  storagesWithoutRollingUpgradeMarker = Collections.newSetFromMap(
      new ConcurrentHashMap<String, Boolean>());
}
项目:hadoop    文件:StorageInfo.java   
public StorageInfo(int layoutV, int nsID, String cid, long cT, NodeType type) {
  layoutVersion = layoutV;
  clusterID = cid;
  namespaceID = nsID;
  cTime = cT;
  storageType = type;
}
项目:hadoop    文件:StorageInfo.java   
/** Validate and set storage type from {@link Properties}*/
protected void checkStorageType(Properties props, StorageDirectory sd)
    throws InconsistentFSStateException {
  if (storageType == null) { //don't care about storage type
    return;
  }
  NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType"));
  if (!storageType.equals(type)) {
    throw new InconsistentFSStateException(sd.root,
        "Incompatible node types: storageType=" + storageType
        + " but StorageDirectory type=" + type);
  }
}
项目:hadoop    文件:JNStorage.java   
/**
 * @param conf Configuration object
 * @param logDir the path to the directory in which data will be stored
 * @param errorReporter a callback to report errors
 * @throws IOException 
 */
protected JNStorage(Configuration conf, File logDir, StartupOption startOpt,
    StorageErrorReporter errorReporter) throws IOException {
  super(NodeType.JOURNAL_NODE);

  sd = new StorageDirectory(logDir);
  this.addStorageDir(sd);
  this.fjm = new FileJournalManager(conf, sd, errorReporter);

  analyzeAndRecoverStorage(startOpt);
}
项目:hadoop    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public DoUpgradeResponseProto doUpgrade(RpcController controller,
    DoUpgradeRequestProto request) throws ServiceException {
  StorageInfo si = PBHelper.convert(request.getSInfo(), NodeType.JOURNAL_NODE);
  try {
    impl.doUpgrade(convert(request.getJid()), si);
    return DoUpgradeResponseProto.getDefaultInstance();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public CanRollBackResponseProto canRollBack(RpcController controller,
    CanRollBackRequestProto request) throws ServiceException {
  try {
    StorageInfo si = PBHelper.convert(request.getStorage(), NodeType.JOURNAL_NODE);
    Boolean result = impl.canRollBack(convert(request.getJid()), si,
        PBHelper.convert(request.getPrevStorage(), NodeType.JOURNAL_NODE),
        request.getTargetLayoutVersion());
    return CanRollBackResponseProto.newBuilder()
        .setCanRollBack(result)
        .build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertStoragInfo() {
  StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
  StorageInfoProto infoProto = PBHelper.convert(info);
  StorageInfo info2 = PBHelper.convert(infoProto, NodeType.NAME_NODE);
  assertEquals(info.getClusterID(), info2.getClusterID());
  assertEquals(info.getCTime(), info2.getCTime());
  assertEquals(info.getLayoutVersion(), info2.getLayoutVersion());
  assertEquals(info.getNamespaceID(), info2.getNamespaceID());
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertCheckpointSignature() {
  CheckpointSignature s = new CheckpointSignature(
      getStorageInfo(NodeType.NAME_NODE), "bpid", 100, 1);
  CheckpointSignatureProto sProto = PBHelper.convert(s);
  CheckpointSignature s1 = PBHelper.convert(sProto);
  assertEquals(s.getBlockpoolID(), s1.getBlockpoolID());
  assertEquals(s.getClusterID(), s1.getClusterID());
  assertEquals(s.getCTime(), s1.getCTime());
  assertEquals(s.getCurSegmentTxId(), s1.getCurSegmentTxId());
  assertEquals(s.getLayoutVersion(), s1.getLayoutVersion());
  assertEquals(s.getMostRecentCheckpointTxId(),
      s1.getMostRecentCheckpointTxId());
  assertEquals(s.getNamespaceID(), s1.getNamespaceID());
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertDatanodeRegistration() {
  DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
  BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
  ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
      getBlockKey(1), keys);
  DatanodeRegistration reg = new DatanodeRegistration(dnId,
      new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
  DatanodeRegistrationProto proto = PBHelper.convert(reg);
  DatanodeRegistration reg2 = PBHelper.convert(proto);
  compare(reg.getStorageInfo(), reg2.getStorageInfo());
  compare(reg.getExportedKeys(), reg2.getExportedKeys());
  compare(reg, reg2);
  assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
项目:aliyun-oss-hadoop-fs    文件:NNStorage.java   
/**
 * Construct the NNStorage.
 * @param conf Namenode configuration.
 * @param imageDirs Directories the image can be stored in.
 * @param editsDirs Directories the editlog can be stored in.
 * @throws IOException if any directories are inaccessible.
 */
public NNStorage(Configuration conf, 
                 Collection<URI> imageDirs, Collection<URI> editsDirs) 
    throws IOException {
  super(NodeType.NAME_NODE);

  storageDirs = new CopyOnWriteArrayList<StorageDirectory>();

  // this may modify the editsDirs, so copy before passing in
  setStorageDirectories(imageDirs, 
                        Lists.newArrayList(editsDirs),
                        FSNamesystem.getSharedEditsDirs(conf));
  //Update NameDirSize metric value after NN start
  updateNameDirSize();
}