Java 类org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State 实例源码

项目:hadoop    文件:PBHelper.java   
public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) {
  if (hb == null) return null;
  NNHAStatusHeartbeatProto.Builder builder =
    NNHAStatusHeartbeatProto.newBuilder();
  switch (hb.getState()) {
    case ACTIVE:
      builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
      break;
    case STANDBY:
      builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
      break;
    default:
      throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +
          hb.getState());
  }
  builder.setTxid(hb.getTxId());
  return builder.build();
}
项目:aliyun-oss-hadoop-fs    文件:BlockManager.java   
/**
 * Adds block to list of blocks which will be invalidated on all its
 * datanodes.
 */
private void addToInvalidates(BlockInfo storedBlock) {
  if (!isPopulatingReplQueues()) {
    return;
  }
  StringBuilder datanodes = new StringBuilder();
  for(DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock,
      State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    final Block b = getBlockOnStorage(storedBlock, storage);
    if (b != null) {
      invalidateBlocks.add(b, node, false);
      datanodes.append(node).append(" ");
    }
  }
  if (datanodes.length() != 0) {
    blockLog.debug("BLOCK* addToInvalidates: {} {}", storedBlock, datanodes);
  }
}
项目:aliyun-oss-hadoop-fs    文件:DatanodeDescriptor.java   
/**
 * Find whether the datanode contains good storage of given type to
 * place block of size <code>blockSize</code>.
 *
 * <p>Currently datanode only cares about the storage type, in this
 * method, the first storage of given type we see is returned.
 *
 * @param t requested storage type
 * @param blockSize requested block size
 */
public DatanodeStorageInfo chooseStorage4Block(StorageType t,
    long blockSize) {
  final long requiredSize =
      blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
  final long scheduledSize = blockSize * getBlocksScheduled(t);
  long remaining = 0;
  DatanodeStorageInfo storage = null;
  for (DatanodeStorageInfo s : getStorageInfos()) {
    if (s.getState() == State.NORMAL && s.getStorageType() == t) {
      if (storage == null) {
        storage = s;
      }
      long r = s.getRemaining();
      if (r >= requiredSize) {
        remaining += r;
      }
    }
  }
  if (requiredSize > remaining - scheduledSize) {
    return null;
  }
  return storage;
}
项目:big-c    文件:PBHelper.java   
public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) {
  if (hb == null) return null;
  NNHAStatusHeartbeatProto.Builder builder =
    NNHAStatusHeartbeatProto.newBuilder();
  switch (hb.getState()) {
    case ACTIVE:
      builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
      break;
    case STANDBY:
      builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
      break;
    default:
      throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +
          hb.getState());
  }
  builder.setTxid(hb.getTxId());
  return builder.build();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) {
  if (hb == null) return null;
  NNHAStatusHeartbeatProto.Builder builder =
    NNHAStatusHeartbeatProto.newBuilder();
  switch (hb.getState()) {
    case ACTIVE:
      builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
      break;
    case STANDBY:
      builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
      break;
    default:
      throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +
          hb.getState());
  }
  builder.setTxid(hb.getTxId());
  return builder.build();
}
项目:hadoop-plus    文件:PBHelper.java   
public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) {
  if (hb == null) return null;
  NNHAStatusHeartbeatProto.Builder builder =
    NNHAStatusHeartbeatProto.newBuilder();
  switch (hb.getState()) {
    case ACTIVE:
      builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
      break;
    case STANDBY:
      builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
      break;
    default:
      throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +
          hb.getState());
  }
  builder.setTxid(hb.getTxId());
  return builder.build();
}
项目:FlexMap    文件:BlockManager.java   
/**
 * Adds block to list of blocks which will be invalidated on all its
 * datanodes.
 */
private void addToInvalidates(Block b) {
  if (!namesystem.isPopulatingReplQueues()) {
    return;
  }
  StringBuilder datanodes = new StringBuilder();
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    invalidateBlocks.add(b, node, false);
    datanodes.append(node).append(" ");
  }
  if (datanodes.length() != 0) {
    blockLog.info("BLOCK* addToInvalidates: " + b + " "
        + datanodes);
  }
}
项目:FlexMap    文件:PBHelper.java   
public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) {
  if (hb == null) return null;
  NNHAStatusHeartbeatProto.Builder builder =
    NNHAStatusHeartbeatProto.newBuilder();
  switch (hb.getState()) {
    case ACTIVE:
      builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
      break;
    case STANDBY:
      builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
      break;
    default:
      throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +
          hb.getState());
  }
  builder.setTxid(hb.getTxId());
  return builder.build();
}
项目:hadoop-TCP    文件:PBHelper.java   
public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) {
  if (hb == null) return null;
  NNHAStatusHeartbeatProto.Builder builder =
    NNHAStatusHeartbeatProto.newBuilder();
  switch (hb.getState()) {
    case ACTIVE:
      builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
      break;
    case STANDBY:
      builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
      break;
    default:
      throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +
          hb.getState());
  }
  builder.setTxid(hb.getTxId());
  return builder.build();
}
项目:hardfs    文件:PBHelper.java   
public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) {
  if (hb == null) return null;
  NNHAStatusHeartbeatProto.Builder builder =
    NNHAStatusHeartbeatProto.newBuilder();
  switch (hb.getState()) {
    case ACTIVE:
      builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
      break;
    case STANDBY:
      builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
      break;
    default:
      throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +
          hb.getState());
  }
  builder.setTxid(hb.getTxId());
  return builder.build();
}
项目:hadoop-on-lustre2    文件:PBHelper.java   
public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) {
  if (hb == null) return null;
  NNHAStatusHeartbeatProto.Builder builder =
    NNHAStatusHeartbeatProto.newBuilder();
  switch (hb.getState()) {
    case ACTIVE:
      builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
      break;
    case STANDBY:
      builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
      break;
    default:
      throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +
          hb.getState());
  }
  builder.setTxid(hb.getTxId());
  return builder.build();
}
项目:hadoop    文件:BlockManager.java   
/**
 * Adds block to list of blocks which will be invalidated on all its
 * datanodes.
 */
private void addToInvalidates(Block b) {
  if (!namesystem.isPopulatingReplQueues()) {
    return;
  }
  StringBuilder datanodes = new StringBuilder();
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    invalidateBlocks.add(b, node, false);
    datanodes.append(node).append(" ");
  }
  if (datanodes.length() != 0) {
    blockLog.info("BLOCK* addToInvalidates: {} {}", b, datanodes.toString());
  }
}
项目:hadoop    文件:BlockManager.java   
/**
 * Return the number of nodes hosting a given block, grouped
 * by the state of those replicas.
 */
public NumberReplicas countNodes(Block b) {
  int decommissioned = 0;
  int live = 0;
  int corrupt = 0;
  int excess = 0;
  int stale = 0;
  Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) {
      corrupt++;
    } else if (node.isDecommissionInProgress() || node.isDecommissioned()) {
      decommissioned++;
    } else {
      LightWeightLinkedSet<Block> blocksExcess = excessReplicateMap.get(node
          .getDatanodeUuid());
      if (blocksExcess != null && blocksExcess.contains(b)) {
        excess++;
      } else {
        live++;
      }
    }
    if (storage.areBlockContentsStale()) {
      stale++;
    }
  }
  return new NumberReplicas(live, decommissioned, corrupt, excess, stale);
}
项目:hadoop    文件:BlockManager.java   
/** 
 * Simpler, faster form of {@link #countNodes(Block)} that only returns the number
 * of live nodes.  If in startup safemode (or its 30-sec extension period),
 * then it gains speed by ignoring issues of excess replicas or nodes
 * that are decommissioned or in process of becoming decommissioned.
 * If not in startup, then it calls {@link #countNodes(Block)} instead.
 * 
 * @param b - the block being tested
 * @return count of live nodes for this block
 */
int countLiveNodes(BlockInfoContiguous b) {
  if (!namesystem.isInStartupSafeMode()) {
    return countNodes(b).liveReplicas();
  }
  // else proceed with fast case
  int live = 0;
  Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
      live++;
  }
  return live;
}
项目:hadoop    文件:DatanodeDescriptor.java   
private void updateFailedStorage(
    Set<DatanodeStorageInfo> failedStorageInfos) {
  for (DatanodeStorageInfo storageInfo : failedStorageInfos) {
    if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
      LOG.info(storageInfo + " failed.");
      storageInfo.setState(DatanodeStorage.State.FAILED);
    }
  }
}
项目:hadoop    文件:DatanodeDescriptor.java   
/**
 * Return the sum of remaining spaces of the specified type. If the remaining
 * space of a storage is less than minSize, it won't be counted toward the
 * sum.
 *
 * @param t The storage type. If null, the type is ignored.
 * @param minSize The minimum free space required.
 * @return the sum of remaining spaces that are bigger than minSize.
 */
public long getRemaining(StorageType t, long minSize) {
  long remaining = 0;
  for (DatanodeStorageInfo s : getStorageInfos()) {
    if (s.getState() == State.NORMAL &&
        (t == null || s.getStorageType() == t)) {
      long r = s.getRemaining();
      if (r >= minSize) {
        remaining += r;
      }
    }
  }
  return remaining;
}
项目:hadoop    文件:PBHelper.java   
private static StorageState convertState(State state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return StorageState.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return StorageState.NORMAL;
  }
}
项目:hadoop    文件:PBHelper.java   
private static State convertState(StorageState state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return DatanodeStorage.State.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return DatanodeStorage.State.NORMAL;
  }
}
项目:aliyun-oss-hadoop-fs    文件:PBHelperClient.java   
private static State convertState(StorageState state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return State.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return State.NORMAL;
  }
}
项目:aliyun-oss-hadoop-fs    文件:PBHelperClient.java   
private static StorageState convertState(State state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return StorageState.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return StorageState.NORMAL;
  }
}
项目:aliyun-oss-hadoop-fs    文件:BlockManager.java   
/**
 * Return the number of nodes hosting a given block, grouped
 * by the state of those replicas.
 * For a striped block, this includes nodes storing blocks belonging to the
 * striped block group.
 */
public NumberReplicas countNodes(Block b) {
  int decommissioned = 0;
  int decommissioning = 0;
  int live = 0;
  int readonly = 0;
  int corrupt = 0;
  int excess = 0;
  int stale = 0;
  Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
    if (storage.getState() == State.FAILED) {
      continue;
    } else if (storage.getState() == State.READ_ONLY_SHARED) {
      readonly++;
      continue;
    }
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) {
      corrupt++;
    } else if (node.isDecommissionInProgress()) {
      decommissioning++;
    } else if (node.isDecommissioned()) {
      decommissioned++;
    } else {
      LightWeightHashSet<BlockInfo> blocksExcess = excessReplicateMap.get(
          node.getDatanodeUuid());
      if (blocksExcess != null && blocksExcess.contains(b)) {
        excess++;
      } else {
        live++;
      }
    }
    if (storage.areBlockContentsStale()) {
      stale++;
    }
  }
  return new NumberReplicas(live, readonly, decommissioned, decommissioning,
      corrupt, excess, stale);
}
项目:aliyun-oss-hadoop-fs    文件:BlockManager.java   
/** 
 * Simpler, faster form of {@link #countNodes} that only returns the number
 * of live nodes.  If in startup safemode (or its 30-sec extension period),
 * then it gains speed by ignoring issues of excess replicas or nodes
 * that are decommissioned or in process of becoming decommissioned.
 * If not in startup, then it calls {@link #countNodes} instead.
 *
 * @param b - the block being tested
 * @return count of live nodes for this block
 */
int countLiveNodes(BlockInfo b) {
  if (!namesystem.isInStartupSafeMode()) {
    return countNodes(b).liveReplicas();
  }
  // else proceed with fast case
  int live = 0;
  Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
      live++;
  }
  return live;
}
项目:aliyun-oss-hadoop-fs    文件:DatanodeDescriptor.java   
private void updateFailedStorage(
    Set<DatanodeStorageInfo> failedStorageInfos) {
  for (DatanodeStorageInfo storageInfo : failedStorageInfos) {
    if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
      LOG.info("{} failed.", storageInfo);
      storageInfo.setState(DatanodeStorage.State.FAILED);
    }
  }
}
项目:big-c    文件:BlockManager.java   
/**
 * Adds block to list of blocks which will be invalidated on all its
 * datanodes.
 */
private void addToInvalidates(Block b) {
  if (!namesystem.isPopulatingReplQueues()) {
    return;
  }
  StringBuilder datanodes = new StringBuilder();
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    invalidateBlocks.add(b, node, false);
    datanodes.append(node).append(" ");
  }
  if (datanodes.length() != 0) {
    blockLog.info("BLOCK* addToInvalidates: {} {}", b, datanodes.toString());
  }
}
项目:big-c    文件:BlockManager.java   
/**
 * Return the number of nodes hosting a given block, grouped
 * by the state of those replicas.
 */
public NumberReplicas countNodes(Block b) {
  int decommissioned = 0;
  int live = 0;
  int corrupt = 0;
  int excess = 0;
  int stale = 0;
  Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) {
      corrupt++;
    } else if (node.isDecommissionInProgress() || node.isDecommissioned()) {
      decommissioned++;
    } else {
      LightWeightLinkedSet<Block> blocksExcess = excessReplicateMap.get(node
          .getDatanodeUuid());
      if (blocksExcess != null && blocksExcess.contains(b)) {
        excess++;
      } else {
        live++;
      }
    }
    if (storage.areBlockContentsStale()) {
      stale++;
    }
  }
  return new NumberReplicas(live, decommissioned, corrupt, excess, stale);
}
项目:big-c    文件:BlockManager.java   
/** 
 * Simpler, faster form of {@link #countNodes(Block)} that only returns the number
 * of live nodes.  If in startup safemode (or its 30-sec extension period),
 * then it gains speed by ignoring issues of excess replicas or nodes
 * that are decommissioned or in process of becoming decommissioned.
 * If not in startup, then it calls {@link #countNodes(Block)} instead.
 * 
 * @param b - the block being tested
 * @return count of live nodes for this block
 */
int countLiveNodes(BlockInfoContiguous b) {
  if (!namesystem.isInStartupSafeMode()) {
    return countNodes(b).liveReplicas();
  }
  // else proceed with fast case
  int live = 0;
  Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
      live++;
  }
  return live;
}
项目:big-c    文件:PBHelper.java   
private static StorageState convertState(State state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return StorageState.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return StorageState.NORMAL;
  }
}
项目:big-c    文件:PBHelper.java   
private static State convertState(StorageState state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return DatanodeStorage.State.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return DatanodeStorage.State.NORMAL;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockManager.java   
/**
 * Adds block to list of blocks which will be invalidated on all its
 * datanodes.
 */
private void addToInvalidates(Block b) {
  if (!namesystem.isPopulatingReplQueues()) {
    return;
  }
  StringBuilder datanodes = new StringBuilder();
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    invalidateBlocks.add(b, node, false);
    datanodes.append(node).append(" ");
  }
  if (datanodes.length() != 0) {
    blockLog.info("BLOCK* addToInvalidates: {} {}", b, datanodes.toString());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockManager.java   
/**
 * Return the number of nodes hosting a given block, grouped
 * by the state of those replicas.
 */
public NumberReplicas countNodes(Block b) {
  int decommissioned = 0;
  int live = 0;
  int corrupt = 0;
  int excess = 0;
  int stale = 0;
  Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) {
      corrupt++;
    } else if (node.isDecommissionInProgress() || node.isDecommissioned()) {
      decommissioned++;
    } else {
      LightWeightLinkedSet<Block> blocksExcess = excessReplicateMap.get(node
          .getDatanodeUuid());
      if (blocksExcess != null && blocksExcess.contains(b)) {
        excess++;
      } else {
        live++;
      }
    }
    if (storage.areBlockContentsStale()) {
      stale++;
    }
  }
  return new NumberReplicas(live, decommissioned, corrupt, excess, stale);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockManager.java   
/** 
 * Simpler, faster form of {@link #countNodes(Block)} that only returns the number
 * of live nodes.  If in startup safemode (or its 30-sec extension period),
 * then it gains speed by ignoring issues of excess replicas or nodes
 * that are decommissioned or in process of becoming decommissioned.
 * If not in startup, then it calls {@link #countNodes(Block)} instead.
 * 
 * @param b - the block being tested
 * @return count of live nodes for this block
 */
int countLiveNodes(BlockInfo b) {
  if (!namesystem.isInStartupSafeMode()) {
    return countNodes(b).liveReplicas();
  }
  // else proceed with fast case
  int live = 0;
  Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
      live++;
  }
  return live;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
private static StorageState convertState(State state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return StorageState.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return StorageState.NORMAL;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
private static State convertState(StorageState state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return DatanodeStorage.State.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return DatanodeStorage.State.NORMAL;
  }
}
项目:hadoop-plus    文件:PBHelper.java   
private static StorageState convert(State state) {
  switch(state) {
  case READ_ONLY:
    return StorageState.READ_ONLY;
  case NORMAL:
  default:
    return StorageState.NORMAL;
  }
}
项目:hadoop-plus    文件:PBHelper.java   
private static State convert(StorageState state) {
  switch(state) {
  case READ_ONLY:
    return DatanodeStorage.State.READ_ONLY;
  case NORMAL:
  default:
    return DatanodeStorage.State.NORMAL;
  }
}
项目:FlexMap    文件:BlockManager.java   
/**
 * Return the number of nodes hosting a given block, grouped
 * by the state of those replicas.
 */
public NumberReplicas countNodes(Block b) {
  int decommissioned = 0;
  int live = 0;
  int corrupt = 0;
  int excess = 0;
  int stale = 0;
  Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) {
      corrupt++;
    } else if (node.isDecommissionInProgress() || node.isDecommissioned()) {
      decommissioned++;
    } else {
      LightWeightLinkedSet<Block> blocksExcess = excessReplicateMap.get(node
          .getDatanodeUuid());
      if (blocksExcess != null && blocksExcess.contains(b)) {
        excess++;
      } else {
        live++;
      }
    }
    if (storage.areBlockContentsStale()) {
      stale++;
    }
  }
  return new NumberReplicas(live, decommissioned, corrupt, excess, stale);
}
项目:FlexMap    文件:BlockManager.java   
/** 
 * Simpler, faster form of {@link #countNodes(Block)} that only returns the number
 * of live nodes.  If in startup safemode (or its 30-sec extension period),
 * then it gains speed by ignoring issues of excess replicas or nodes
 * that are decommissioned or in process of becoming decommissioned.
 * If not in startup, then it calls {@link #countNodes(Block)} instead.
 * 
 * @param b - the block being tested
 * @return count of live nodes for this block
 */
int countLiveNodes(BlockInfo b) {
  if (!namesystem.isInStartupSafeMode()) {
    return countNodes(b).liveReplicas();
  }
  // else proceed with fast case
  int live = 0;
  Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
    final DatanodeDescriptor node = storage.getDatanodeDescriptor();
    if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
      live++;
  }
  return live;
}
项目:FlexMap    文件:PBHelper.java   
private static StorageState convertState(State state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return StorageState.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return StorageState.NORMAL;
  }
}
项目:FlexMap    文件:PBHelper.java   
private static State convertState(StorageState state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return DatanodeStorage.State.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return DatanodeStorage.State.NORMAL;
  }
}
项目:hops    文件:PBHelper.java   
private static StorageState convert(State state) {
  switch (state) {
    case READ_ONLY:
      return StorageState.READ_ONLY;
    case NORMAL:
    default:
      return StorageState.NORMAL;
  }
}