Java 类org.apache.hadoop.fs.InvalidRequestException 实例源码

项目:hadoop    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:hadoop    文件:CacheManager.java   
/**
 * Throws an exception if the CachePool does not have enough capacity to
 * cache the given path at the replication factor.
 *
 * @param pool CachePool where the path is being cached
 * @param path Path that is being cached
 * @param replication Replication factor of the path
 * @throws InvalidRequestException if the pool does not have enough capacity
 */
private void checkLimit(CachePool pool, String path,
    short replication) throws InvalidRequestException {
  CacheDirectiveStats stats = computeNeeded(path, replication);
  if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
    return;
  }
  if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
      .getLimit()) {
    throw new InvalidRequestException("Caching path " + path + " of size "
        + stats.getBytesNeeded() / replication + " bytes at replication "
        + replication + " would exceed pool " + pool.getPoolName()
        + "'s remaining capacity of "
        + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
  }
}
项目:hadoop    文件:CacheManager.java   
private void removeInternal(CacheDirective directive)
    throws InvalidRequestException {
  assert namesystem.hasWriteLock();
  // Remove the corresponding entry in directivesByPath.
  String path = directive.getPath();
  List<CacheDirective> directives = directivesByPath.get(path);
  if (directives == null || !directives.remove(directive)) {
    throw new InvalidRequestException("Failed to locate entry " +
        directive.getId() + " by path " + directive.getPath());
  }
  if (directives.size() == 0) {
    directivesByPath.remove(path);
  }
  // Fix up the stats from removing the pool
  final CachePool pool = directive.getPool();
  directive.addBytesNeeded(-directive.getBytesNeeded());
  directive.addFilesNeeded(-directive.getFilesNeeded());

  directivesById.remove(directive.getId());
  pool.getDirectiveList().remove(directive);
  assert directive.getPool() == null;

  setNeedsRescan();
}
项目:hadoop    文件:ShortCircuitRegistry.java   
public synchronized void unregisterSlot(SlotId slotId)
    throws InvalidRequestException {
  if (!enabled) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("unregisterSlot: ShortCircuitRegistry is " +
          "not enabled.");
    }
    throw new UnsupportedOperationException();
  }
  ShmId shmId = slotId.getShmId();
  RegisteredShm shm = segments.get(shmId);
  if (shm == null) {
    throw new InvalidRequestException("there is no shared memory segment " +
        "registered with shmId " + shmId);
  }
  Slot slot = shm.getSlot(slotId.getSlotIdx());
  slot.makeInvalid();
  shm.unregisterSlot(slotId.getSlotIdx());
  slots.remove(slot.getBlockId(), slot);
}
项目:aliyun-oss-hadoop-fs    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:aliyun-oss-hadoop-fs    文件:CacheManager.java   
/**
 * Throws an exception if the CachePool does not have enough capacity to
 * cache the given path at the replication factor.
 *
 * @param pool CachePool where the path is being cached
 * @param path Path that is being cached
 * @param replication Replication factor of the path
 * @throws InvalidRequestException if the pool does not have enough capacity
 */
private void checkLimit(CachePool pool, String path,
    short replication) throws InvalidRequestException {
  CacheDirectiveStats stats = computeNeeded(path, replication);
  if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
    return;
  }
  if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
      .getLimit()) {
    throw new InvalidRequestException("Caching path " + path + " of size "
        + stats.getBytesNeeded() / replication + " bytes at replication "
        + replication + " would exceed pool " + pool.getPoolName()
        + "'s remaining capacity of "
        + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
  }
}
项目:aliyun-oss-hadoop-fs    文件:CacheManager.java   
private void removeInternal(CacheDirective directive)
    throws InvalidRequestException {
  assert namesystem.hasWriteLock();
  // Remove the corresponding entry in directivesByPath.
  String path = directive.getPath();
  List<CacheDirective> directives = directivesByPath.get(path);
  if (directives == null || !directives.remove(directive)) {
    throw new InvalidRequestException("Failed to locate entry " +
        directive.getId() + " by path " + directive.getPath());
  }
  if (directives.size() == 0) {
    directivesByPath.remove(path);
  }
  // Fix up the stats from removing the pool
  final CachePool pool = directive.getPool();
  directive.addBytesNeeded(-directive.getBytesNeeded());
  directive.addFilesNeeded(-directive.getFilesNeeded());

  directivesById.remove(directive.getId());
  pool.getDirectiveList().remove(directive);
  assert directive.getPool() == null;

  setNeedsRescan();
}
项目:aliyun-oss-hadoop-fs    文件:ShortCircuitRegistry.java   
public synchronized void unregisterSlot(SlotId slotId)
    throws InvalidRequestException {
  if (!enabled) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("unregisterSlot: ShortCircuitRegistry is " +
          "not enabled.");
    }
    throw new UnsupportedOperationException();
  }
  ShmId shmId = slotId.getShmId();
  RegisteredShm shm = segments.get(shmId);
  if (shm == null) {
    throw new InvalidRequestException("there is no shared memory segment " +
        "registered with shmId " + shmId);
  }
  Slot slot = shm.getSlot(slotId.getSlotIdx());
  slot.makeInvalid();
  shm.unregisterSlot(slotId.getSlotIdx());
  slots.remove(slot.getBlockId(), slot);
}
项目:big-c    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:big-c    文件:CacheManager.java   
/**
 * Throws an exception if the CachePool does not have enough capacity to
 * cache the given path at the replication factor.
 *
 * @param pool CachePool where the path is being cached
 * @param path Path that is being cached
 * @param replication Replication factor of the path
 * @throws InvalidRequestException if the pool does not have enough capacity
 */
private void checkLimit(CachePool pool, String path,
    short replication) throws InvalidRequestException {
  CacheDirectiveStats stats = computeNeeded(path, replication);
  if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
    return;
  }
  if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
      .getLimit()) {
    throw new InvalidRequestException("Caching path " + path + " of size "
        + stats.getBytesNeeded() / replication + " bytes at replication "
        + replication + " would exceed pool " + pool.getPoolName()
        + "'s remaining capacity of "
        + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
  }
}
项目:big-c    文件:CacheManager.java   
private void removeInternal(CacheDirective directive)
    throws InvalidRequestException {
  assert namesystem.hasWriteLock();
  // Remove the corresponding entry in directivesByPath.
  String path = directive.getPath();
  List<CacheDirective> directives = directivesByPath.get(path);
  if (directives == null || !directives.remove(directive)) {
    throw new InvalidRequestException("Failed to locate entry " +
        directive.getId() + " by path " + directive.getPath());
  }
  if (directives.size() == 0) {
    directivesByPath.remove(path);
  }
  // Fix up the stats from removing the pool
  final CachePool pool = directive.getPool();
  directive.addBytesNeeded(-directive.getBytesNeeded());
  directive.addFilesNeeded(-directive.getFilesNeeded());

  directivesById.remove(directive.getId());
  pool.getDirectiveList().remove(directive);
  assert directive.getPool() == null;

  setNeedsRescan();
}
项目:big-c    文件:ShortCircuitRegistry.java   
public synchronized void unregisterSlot(SlotId slotId)
    throws InvalidRequestException {
  if (!enabled) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("unregisterSlot: ShortCircuitRegistry is " +
          "not enabled.");
    }
    throw new UnsupportedOperationException();
  }
  ShmId shmId = slotId.getShmId();
  RegisteredShm shm = segments.get(shmId);
  if (shm == null) {
    throw new InvalidRequestException("there is no shared memory segment " +
        "registered with shmId " + shmId);
  }
  Slot slot = shm.getSlot(slotId.getSlotIdx());
  slot.makeInvalid();
  shm.unregisterSlot(slotId.getSlotIdx());
  slots.remove(slot.getBlockId(), slot);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CacheManager.java   
/**
 * Throws an exception if the CachePool does not have enough capacity to
 * cache the given path at the replication factor.
 *
 * @param pool CachePool where the path is being cached
 * @param path Path that is being cached
 * @param replication Replication factor of the path
 * @throws InvalidRequestException if the pool does not have enough capacity
 */
private void checkLimit(CachePool pool, String path,
    short replication) throws InvalidRequestException {
  CacheDirectiveStats stats = computeNeeded(path, replication);
  if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
    return;
  }
  if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
      .getLimit()) {
    throw new InvalidRequestException("Caching path " + path + " of size "
        + stats.getBytesNeeded() / replication + " bytes at replication "
        + replication + " would exceed pool " + pool.getPoolName()
        + "'s remaining capacity of "
        + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CacheManager.java   
private void removeInternal(CacheDirective directive)
    throws InvalidRequestException {
  assert namesystem.hasWriteLock();
  // Remove the corresponding entry in directivesByPath.
  String path = directive.getPath();
  List<CacheDirective> directives = directivesByPath.get(path);
  if (directives == null || !directives.remove(directive)) {
    throw new InvalidRequestException("Failed to locate entry " +
        directive.getId() + " by path " + directive.getPath());
  }
  if (directives.size() == 0) {
    directivesByPath.remove(path);
  }
  // Fix up the stats from removing the pool
  final CachePool pool = directive.getPool();
  directive.addBytesNeeded(-directive.getBytesNeeded());
  directive.addFilesNeeded(-directive.getFilesNeeded());

  directivesById.remove(directive.getId());
  pool.getDirectiveList().remove(directive);
  assert directive.getPool() == null;

  setNeedsRescan();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ShortCircuitRegistry.java   
public synchronized void unregisterSlot(SlotId slotId)
    throws InvalidRequestException {
  if (!enabled) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("unregisterSlot: ShortCircuitRegistry is " +
          "not enabled.");
    }
    throw new UnsupportedOperationException();
  }
  ShmId shmId = slotId.getShmId();
  RegisteredShm shm = segments.get(shmId);
  if (shm == null) {
    throw new InvalidRequestException("there is no shared memory segment " +
        "registered with shmId " + shmId);
  }
  Slot slot = shm.getSlot(slotId.getSlotIdx());
  slot.makeInvalid();
  shm.unregisterSlot(slotId.getSlotIdx());
  slots.remove(slot.getBlockId(), slot);
}
项目:FlexMap    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:FlexMap    文件:CacheManager.java   
/**
 * Throws an exception if the CachePool does not have enough capacity to
 * cache the given path at the replication factor.
 *
 * @param pool CachePool where the path is being cached
 * @param path Path that is being cached
 * @param replication Replication factor of the path
 * @throws InvalidRequestException if the pool does not have enough capacity
 */
private void checkLimit(CachePool pool, String path,
    short replication) throws InvalidRequestException {
  CacheDirectiveStats stats = computeNeeded(path, replication);
  if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
    return;
  }
  if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
      .getLimit()) {
    throw new InvalidRequestException("Caching path " + path + " of size "
        + stats.getBytesNeeded() / replication + " bytes at replication "
        + replication + " would exceed pool " + pool.getPoolName()
        + "'s remaining capacity of "
        + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
  }
}
项目:FlexMap    文件:CacheManager.java   
private void removeInternal(CacheDirective directive)
    throws InvalidRequestException {
  assert namesystem.hasWriteLock();
  // Remove the corresponding entry in directivesByPath.
  String path = directive.getPath();
  List<CacheDirective> directives = directivesByPath.get(path);
  if (directives == null || !directives.remove(directive)) {
    throw new InvalidRequestException("Failed to locate entry " +
        directive.getId() + " by path " + directive.getPath());
  }
  if (directives.size() == 0) {
    directivesByPath.remove(path);
  }
  // Fix up the stats from removing the pool
  final CachePool pool = directive.getPool();
  directive.addBytesNeeded(-directive.getBytesNeeded());
  directive.addFilesNeeded(-directive.getFilesNeeded());

  directivesById.remove(directive.getId());
  pool.getDirectiveList().remove(directive);
  assert directive.getPool() == null;

  setNeedsRescan();
}
项目:FlexMap    文件:ShortCircuitRegistry.java   
public synchronized void unregisterSlot(SlotId slotId)
    throws InvalidRequestException {
  if (!enabled) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("unregisterSlot: ShortCircuitRegistry is " +
          "not enabled.");
    }
    throw new UnsupportedOperationException();
  }
  ShmId shmId = slotId.getShmId();
  RegisteredShm shm = segments.get(shmId);
  if (shm == null) {
    throw new InvalidRequestException("there is no shared memory segment " +
        "registered with shmId " + shmId);
  }
  Slot slot = shm.getSlot(slotId.getSlotIdx());
  slot.makeInvalid();
  shm.unregisterSlot(slotId.getSlotIdx());
  slots.remove(slot.getBlockId(), slot);
}
项目:hadoop-on-lustre2    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:hadoop-on-lustre2    文件:CacheManager.java   
/**
 * Throws an exception if the CachePool does not have enough capacity to
 * cache the given path at the replication factor.
 *
 * @param pool CachePool where the path is being cached
 * @param path Path that is being cached
 * @param replication Replication factor of the path
 * @throws InvalidRequestException if the pool does not have enough capacity
 */
private void checkLimit(CachePool pool, String path,
    short replication) throws InvalidRequestException {
  CacheDirectiveStats stats = computeNeeded(path, replication);
  if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
    return;
  }
  if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
      .getLimit()) {
    throw new InvalidRequestException("Caching path " + path + " of size "
        + stats.getBytesNeeded() / replication + " bytes at replication "
        + replication + " would exceed pool " + pool.getPoolName()
        + "'s remaining capacity of "
        + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
  }
}
项目:hadoop-on-lustre2    文件:CacheManager.java   
private void removeInternal(CacheDirective directive)
    throws InvalidRequestException {
  assert namesystem.hasWriteLock();
  // Remove the corresponding entry in directivesByPath.
  String path = directive.getPath();
  List<CacheDirective> directives = directivesByPath.get(path);
  if (directives == null || !directives.remove(directive)) {
    throw new InvalidRequestException("Failed to locate entry " +
        directive.getId() + " by path " + directive.getPath());
  }
  if (directives.size() == 0) {
    directivesByPath.remove(path);
  }
  // Fix up the stats from removing the pool
  final CachePool pool = directive.getPool();
  directive.addBytesNeeded(-directive.getBytesNeeded());
  directive.addFilesNeeded(-directive.getFilesNeeded());

  directivesById.remove(directive.getId());
  pool.getDirectiveList().remove(directive);
  assert directive.getPool() == null;

  setNeedsRescan();
}
项目:hadoop-on-lustre2    文件:ShortCircuitRegistry.java   
public synchronized void unregisterSlot(SlotId slotId)
    throws InvalidRequestException {
  if (!enabled) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("unregisterSlot: ShortCircuitRegistry is " +
          "not enabled.");
    }
    throw new UnsupportedOperationException();
  }
  ShmId shmId = slotId.getShmId();
  RegisteredShm shm = segments.get(shmId);
  if (shm == null) {
    throw new InvalidRequestException("there is no shared memory segment " +
        "registered with shmId " + shmId);
  }
  Slot slot = shm.getSlot(slotId.getSlotIdx());
  slot.makeInvalid();
  shm.unregisterSlot(slotId.getSlotIdx());
  slots.remove(slot.getBlockId(), slot);
}
项目:hadoop    文件:CacheDirectiveIterator.java   
@Override
public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey)
    throws IOException {
  BatchedEntries<CacheDirectiveEntry> entries = null;
  TraceScope scope = Trace.startSpan("listCacheDirectives", traceSampler);
  try {
    entries = namenode.listCacheDirectives(prevKey, filter);
  } catch (IOException e) {
    if (e.getMessage().contains("Filtering by ID is unsupported")) {
      // Retry case for old servers, do the filtering client-side
      long id = filter.getId();
      filter = removeIdFromFilter(filter);
      // Using id - 1 as prevId should get us a window containing the id
      // This is somewhat brittle, since it depends on directives being
      // returned in order of ascending ID.
      entries = namenode.listCacheDirectives(id - 1, filter);
      for (int i=0; i<entries.size(); i++) {
        CacheDirectiveEntry entry = entries.get(i);
        if (entry.getInfo().getId().equals((Long)id)) {
          return new SingleEntry(entry);
        }
      }
      throw new RemoteException(InvalidRequestException.class.getName(),
          "Did not find requested id " + id);
    }
    throw e;
  } finally {
    scope.close();
  }
  Preconditions.checkNotNull(entries);
  return entries;
}
项目:hadoop    文件:ShortCircuitShm.java   
synchronized public final Slot getSlot(int slotIdx)
    throws InvalidRequestException {
  if (!allocatedSlots.get(slotIdx)) {
    throw new InvalidRequestException(this + ": slot " + slotIdx +
        " does not exist.");
  }
  return slots[slotIdx];
}
项目:hadoop    文件:ShortCircuitShm.java   
/**
 * Register a slot.
 *
 * This function looks at a slot which has already been initialized (by
 * another process), and registers it with us.  Then, it returns the 
 * relevant Slot object.
 *
 * @return    The slot.
 *
 * @throws InvalidRequestException
 *            If the slot index we're trying to allocate has not been
 *            initialized, or is already in use.
 */
synchronized public final Slot registerSlot(int slotIdx,
    ExtendedBlockId blockId) throws InvalidRequestException {
  if (slotIdx < 0) {
    throw new InvalidRequestException(this + ": invalid negative slot " +
        "index " + slotIdx);
  }
  if (slotIdx >= slots.length) {
    throw new InvalidRequestException(this + ": invalid slot " +
        "index " + slotIdx);
  }
  if (allocatedSlots.get(slotIdx)) {
    throw new InvalidRequestException(this + ": slot " + slotIdx +
        " is already in use.");
  }
  Slot slot = new Slot(calculateSlotAddress(slotIdx), blockId);
  if (!slot.isValid()) {
    throw new InvalidRequestException(this + ": slot " + slotIdx +
        " is not marked as valid.");
  }
  slots[slotIdx] = slot;
  allocatedSlots.set(slotIdx, true);
  if (LOG.isTraceEnabled()) {
    LOG.trace(this + ": registerSlot " + slotIdx + ": allocatedSlots=" + allocatedSlots +
                StringUtils.getStackTrace(Thread.currentThread()));
  }
  return slot;
}
项目:hadoop    文件:CacheManager.java   
private static String validatePoolName(CacheDirectiveInfo directive)
    throws InvalidRequestException {
  String pool = directive.getPool();
  if (pool == null) {
    throw new InvalidRequestException("No pool specified.");
  }
  if (pool.isEmpty()) {
    throw new InvalidRequestException("Invalid empty pool name.");
  }
  return pool;
}
项目:hadoop    文件:CacheManager.java   
private static String validatePath(CacheDirectiveInfo directive)
    throws InvalidRequestException {
  if (directive.getPath() == null) {
    throw new InvalidRequestException("No path specified.");
  }
  String path = directive.getPath().toUri().getPath();
  if (!DFSUtil.isValidName(path)) {
    throw new InvalidRequestException("Invalid path '" + path + "'.");
  }
  return path;
}
项目:hadoop    文件:CacheManager.java   
private static short validateReplication(CacheDirectiveInfo directive,
    short defaultValue) throws InvalidRequestException {
  short repl = (directive.getReplication() != null)
      ? directive.getReplication() : defaultValue;
  if (repl <= 0) {
    throw new InvalidRequestException("Invalid replication factor " + repl
        + " <= 0");
  }
  return repl;
}
项目:hadoop    文件:CacheManager.java   
/**
 * Calculates the absolute expiry time of the directive from the
 * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
 * into an absolute time based on the local clock.
 * 
 * @param info to validate.
 * @param maxRelativeExpiryTime of the info's pool.
 * @return the expiration time, or the pool's max absolute expiration if the
 *         info's expiration was not set.
 * @throws InvalidRequestException if the info's Expiration is invalid.
 */
private static long validateExpiryTime(CacheDirectiveInfo info,
    long maxRelativeExpiryTime) throws InvalidRequestException {
  LOG.trace("Validating directive {} pool maxRelativeExpiryTime {}", info,
      maxRelativeExpiryTime);
  final long now = new Date().getTime();
  final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
  if (info == null || info.getExpiration() == null) {
    return maxAbsoluteExpiryTime;
  }
  Expiration expiry = info.getExpiration();
  if (expiry.getMillis() < 0l) {
    throw new InvalidRequestException("Cannot set a negative expiration: "
        + expiry.getMillis());
  }
  long relExpiryTime, absExpiryTime;
  if (expiry.isRelative()) {
    relExpiryTime = expiry.getMillis();
    absExpiryTime = now + relExpiryTime;
  } else {
    absExpiryTime = expiry.getMillis();
    relExpiryTime = absExpiryTime - now;
  }
  // Need to cap the expiry so we don't overflow a long when doing math
  if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
    throw new InvalidRequestException("Expiration "
        + expiry.toString() + " is too far in the future!");
  }
  // Fail if the requested expiry is greater than the max
  if (relExpiryTime > maxRelativeExpiryTime) {
    throw new InvalidRequestException("Expiration " + expiry.toString()
        + " exceeds the max relative expiration time of "
        + maxRelativeExpiryTime + " ms.");
  }
  return absExpiryTime;
}
项目:hadoop    文件:CacheManager.java   
/**
 * Get a CacheDirective by ID, validating the ID and that the directive
 * exists.
 */
private CacheDirective getById(long id) throws InvalidRequestException {
  // Check for invalid IDs.
  if (id <= 0) {
    throw new InvalidRequestException("Invalid negative ID.");
  }
  // Find the directive.
  CacheDirective directive = directivesById.get(id);
  if (directive == null) {
    throw new InvalidRequestException("No directive with ID " + id
        + " found.");
  }
  return directive;
}
项目:hadoop    文件:CacheManager.java   
/**
 * Get a CachePool by name, validating that it exists.
 */
private CachePool getCachePool(String poolName)
    throws InvalidRequestException {
  CachePool pool = cachePools.get(poolName);
  if (pool == null) {
    throw new InvalidRequestException("Unknown pool " + poolName);
  }
  return pool;
}
项目:hadoop    文件:CacheManager.java   
/**
 * Adds a directive, skipping most error checking. This should only be called
 * internally in special scenarios like edit log replay.
 */
CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive)
    throws InvalidRequestException {
  long id = directive.getId();
  CacheDirective entry = new CacheDirective(directive);
  CachePool pool = cachePools.get(directive.getPool());
  addInternal(entry, pool);
  if (nextDirectiveId <= id) {
    nextDirectiveId = id + 1;
  }
  return entry.toInfo();
}
项目:hadoop    文件:CacheManager.java   
/**
 * Modifies a directive, skipping most error checking. This is for careful
 * internal use only. modifyDirective can be non-deterministic since its error
 * checking depends on current system time, which poses a problem for edit log
 * replay.
 */
void modifyDirectiveFromEditLog(CacheDirectiveInfo info)
    throws InvalidRequestException {
  // Check for invalid IDs.
  Long id = info.getId();
  if (id == null) {
    throw new InvalidRequestException("Must supply an ID.");
  }
  CacheDirective prevEntry = getById(id);
  CacheDirectiveInfo newInfo = createFromInfoAndDefaults(info, prevEntry);
  removeInternal(prevEntry);
  addInternal(new CacheDirective(newInfo), getCachePool(newInfo.getPool()));
}
项目:hadoop    文件:ShortCircuitRegistry.java   
public synchronized void registerSlot(ExtendedBlockId blockId, SlotId slotId,
    boolean isCached) throws InvalidRequestException {
  if (!enabled) {
    if (LOG.isTraceEnabled()) {
      LOG.trace(this + " can't register a slot because the " +
          "ShortCircuitRegistry is not enabled.");
    }
    throw new UnsupportedOperationException();
  }
  ShmId shmId = slotId.getShmId();
  RegisteredShm shm = segments.get(shmId);
  if (shm == null) {
    throw new InvalidRequestException("there is no shared memory segment " +
        "registered with shmId " + shmId);
  }
  Slot slot = shm.registerSlot(slotId.getSlotIdx(), blockId);
  if (isCached) {
    slot.makeAnchorable();
  } else {
    slot.makeUnanchorable();
  }
  boolean added = slots.put(blockId, slot);
  Preconditions.checkState(added);
  if (LOG.isTraceEnabled()) {
    LOG.trace(this + ": registered " + blockId + " with slot " +
      slotId + " (isCached=" + isCached + ")");
  }
}
项目:aliyun-oss-hadoop-fs    文件:CacheDirectiveIterator.java   
@Override
public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey)
    throws IOException {
  BatchedEntries<CacheDirectiveEntry> entries;
  try (TraceScope ignored = tracer.newScope("listCacheDirectives")) {
    entries = namenode.listCacheDirectives(prevKey, filter);
  } catch (IOException e) {
    if (e.getMessage().contains("Filtering by ID is unsupported")) {
      // Retry case for old servers, do the filtering client-side
      long id = filter.getId();
      filter = removeIdFromFilter(filter);
      // Using id - 1 as prevId should get us a window containing the id
      // This is somewhat brittle, since it depends on directives being
      // returned in order of ascending ID.
      entries = namenode.listCacheDirectives(id - 1, filter);
      for (int i = 0; i < entries.size(); i++) {
        CacheDirectiveEntry entry = entries.get(i);
        if (entry.getInfo().getId().equals(id)) {
          return new SingleEntry(entry);
        }
      }
      throw new RemoteException(InvalidRequestException.class.getName(),
          "Did not find requested id " + id);
    }
    throw e;
  }
  Preconditions.checkNotNull(entries);
  return entries;
}
项目:aliyun-oss-hadoop-fs    文件:ShortCircuitShm.java   
synchronized public final Slot getSlot(int slotIdx)
    throws InvalidRequestException {
  if (!allocatedSlots.get(slotIdx)) {
    throw new InvalidRequestException(this + ": slot " + slotIdx +
        " does not exist.");
  }
  return slots[slotIdx];
}
项目:aliyun-oss-hadoop-fs    文件:ShortCircuitShm.java   
/**
 * Register a slot.
 *
 * This function looks at a slot which has already been initialized (by
 * another process), and registers it with us.  Then, it returns the
 * relevant Slot object.
 *
 * @return    The slot.
 *
 * @throws InvalidRequestException
 *            If the slot index we're trying to allocate has not been
 *            initialized, or is already in use.
 */
synchronized public final Slot registerSlot(int slotIdx,
    ExtendedBlockId blockId) throws InvalidRequestException {
  if (slotIdx < 0) {
    throw new InvalidRequestException(this + ": invalid negative slot " +
        "index " + slotIdx);
  }
  if (slotIdx >= slots.length) {
    throw new InvalidRequestException(this + ": invalid slot " +
        "index " + slotIdx);
  }
  if (allocatedSlots.get(slotIdx)) {
    throw new InvalidRequestException(this + ": slot " + slotIdx +
        " is already in use.");
  }
  Slot slot = new Slot(calculateSlotAddress(slotIdx), blockId);
  if (!slot.isValid()) {
    throw new InvalidRequestException(this + ": slot " + slotIdx +
        " is not marked as valid.");
  }
  slots[slotIdx] = slot;
  allocatedSlots.set(slotIdx, true);
  if (LOG.isTraceEnabled()) {
    LOG.trace(this + ": registerSlot " + slotIdx + ": allocatedSlots=" + allocatedSlots +
                StringUtils.getStackTrace(Thread.currentThread()));
  }
  return slot;
}
项目:aliyun-oss-hadoop-fs    文件:CacheManager.java   
private static String validatePoolName(CacheDirectiveInfo directive)
    throws InvalidRequestException {
  String pool = directive.getPool();
  if (pool == null) {
    throw new InvalidRequestException("No pool specified.");
  }
  if (pool.isEmpty()) {
    throw new InvalidRequestException("Invalid empty pool name.");
  }
  return pool;
}